Mock Version: 5.0 Mock Version: 5.0 Mock Version: 5.0 ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -bs --noclean --target noarch --nodeps /builddir/build/SPECS/python-mutagen.spec'], chrootPath='/var/lib/mock/f40-build-2486729-61650/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -bs --noclean --target noarch --nodeps /builddir/build/SPECS/python-mutagen.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1706227200 Wrote: /builddir/build/SRPMS/python-mutagen-1.47.0-3.fc40.src.rpm Child return code was: 0 ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -bb --noclean --target noarch --nodeps /builddir/build/SPECS/python-mutagen.spec'], chrootPath='/var/lib/mock/f40-build-2486729-61650/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -bb --noclean --target noarch --nodeps /builddir/build/SPECS/python-mutagen.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1706227200 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.dCWn99 + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf mutagen-1.47.0 + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/mutagen-1.47.0.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd mutagen-1.47.0 + rm -rf /builddir/build/BUILD/mutagen-1.47.0-SPECPARTS + /usr/bin/mkdir -p /builddir/build/BUILD/mutagen-1.47.0-SPECPARTS + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + RPM_EC=0 ++ jobs -p + exit 0 Executing(%build): /bin/sh -e /var/tmp/rpm-tmp.CZvjlI + umask 022 + cd /builddir/build/BUILD + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd mutagen-1.47.0 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + /usr/bin/python3 setup.py build '--executable=/usr/bin/python3 -sP' running build running build_py creating build creating build/lib creating build/lib/mutagen copying mutagen/easymp4.py -> build/lib/mutagen copying mutagen/monkeysaudio.py -> build/lib/mutagen copying mutagen/m4a.py -> build/lib/mutagen copying mutagen/oggopus.py -> build/lib/mutagen copying mutagen/wave.py -> build/lib/mutagen copying mutagen/aiff.py -> build/lib/mutagen copying mutagen/easyid3.py -> build/lib/mutagen copying mutagen/_constants.py -> build/lib/mutagen copying mutagen/trueaudio.py -> build/lib/mutagen copying mutagen/_vorbis.py -> build/lib/mutagen copying mutagen/oggvorbis.py -> build/lib/mutagen copying mutagen/_iff.py -> build/lib/mutagen copying mutagen/tak.py -> build/lib/mutagen copying mutagen/flac.py -> build/lib/mutagen copying mutagen/_riff.py -> build/lib/mutagen copying mutagen/dsf.py -> build/lib/mutagen copying mutagen/wavpack.py -> build/lib/mutagen copying mutagen/optimfrog.py -> build/lib/mutagen copying mutagen/oggspeex.py -> build/lib/mutagen copying mutagen/ogg.py -> build/lib/mutagen copying mutagen/apev2.py -> build/lib/mutagen copying mutagen/aac.py -> build/lib/mutagen copying mutagen/dsdiff.py -> build/lib/mutagen copying mutagen/smf.py -> build/lib/mutagen copying mutagen/__init__.py -> build/lib/mutagen copying mutagen/musepack.py -> build/lib/mutagen copying mutagen/oggflac.py -> build/lib/mutagen copying mutagen/ac3.py -> build/lib/mutagen copying mutagen/_tags.py -> build/lib/mutagen copying mutagen/oggtheora.py -> build/lib/mutagen copying mutagen/_util.py -> build/lib/mutagen copying mutagen/_file.py -> build/lib/mutagen creating build/lib/mutagen/id3 copying mutagen/id3/_id3v1.py -> build/lib/mutagen/id3 copying mutagen/id3/_frames.py -> build/lib/mutagen/id3 copying mutagen/id3/__init__.py -> build/lib/mutagen/id3 copying mutagen/id3/_specs.py -> build/lib/mutagen/id3 copying mutagen/id3/_tags.py -> build/lib/mutagen/id3 copying mutagen/id3/_util.py -> build/lib/mutagen/id3 copying mutagen/id3/_file.py -> build/lib/mutagen/id3 creating build/lib/mutagen/mp4 copying mutagen/mp4/__init__.py -> build/lib/mutagen/mp4 copying mutagen/mp4/_atom.py -> build/lib/mutagen/mp4 copying mutagen/mp4/_as_entry.py -> build/lib/mutagen/mp4 copying mutagen/mp4/_util.py -> build/lib/mutagen/mp4 creating build/lib/mutagen/asf copying mutagen/asf/_attrs.py -> build/lib/mutagen/asf copying mutagen/asf/_objects.py -> build/lib/mutagen/asf copying mutagen/asf/__init__.py -> build/lib/mutagen/asf copying mutagen/asf/_util.py -> build/lib/mutagen/asf creating build/lib/mutagen/mp3 copying mutagen/mp3/__init__.py -> build/lib/mutagen/mp3 copying mutagen/mp3/_util.py -> build/lib/mutagen/mp3 creating build/lib/mutagen/_tools copying mutagen/_tools/moggsplit.py -> build/lib/mutagen/_tools copying mutagen/_tools/mid3cp.py -> build/lib/mutagen/_tools copying mutagen/_tools/mid3iconv.py -> build/lib/mutagen/_tools copying mutagen/_tools/mid3v2.py -> build/lib/mutagen/_tools copying mutagen/_tools/mutagen_pony.py -> build/lib/mutagen/_tools copying mutagen/_tools/__init__.py -> build/lib/mutagen/_tools copying mutagen/_tools/mutagen_inspect.py -> build/lib/mutagen/_tools copying mutagen/_tools/_util.py -> build/lib/mutagen/_tools copying mutagen/py.typed -> build/lib/mutagen + sphinx-build -b html -n docs docs/_build Running Sphinx v7.2.6 making output directory... done loading intersphinx inventory from https://docs.python.org/3/objects.inv... WARNING: failed to reach any of the inventories with the following issues: intersphinx inventory 'https://docs.python.org/3/objects.inv' not fetchable due to : HTTPSConnectionPool(host='docs.python.org', port=443): Max retries exceeded with url: /3/objects.inv (Caused by NewConnectionError(': Failed to establish a new connection: [Errno -3] Temporary failure in name resolution')) building [mo]: targets for 0 po files that are out of date writing output... building [html]: targets for 48 source files that are out of date updating environment: [new config] 48 added, 0 changed, 0 removed reading sources... [ 2%] api/aac reading sources... [ 4%] api/ac3 reading sources... [ 6%] api/aiff reading sources... [ 8%] api/ape reading sources... [ 10%] api/asf reading sources... [ 12%] api/base reading sources... [ 15%] api/dsdiff reading sources... [ 17%] api/dsf reading sources... [ 19%] api/flac reading sources... [ 21%] api/id3 reading sources... [ 23%] api/id3_frames reading sources... [ 25%] api/index reading sources... [ 27%] api/monkeysaudio reading sources... [ 29%] api/mp3 reading sources... [ 31%] api/mp4 reading sources... [ 33%] api/musepack reading sources... [ 35%] api/ogg reading sources... [ 38%] api/oggflac reading sources... [ 40%] api/oggopus reading sources... [ 42%] api/oggspeex reading sources... [ 44%] api/oggtheora reading sources... [ 46%] api/oggvorbis reading sources... [ 48%] api/optimfrog reading sources... [ 50%] api/smf reading sources... [ 52%] api/tak reading sources... [ 54%] api/trueaudio reading sources... [ 56%] api/vcomment reading sources... [ 58%] api/wave reading sources... [ 60%] api/wavpack reading sources... [ 62%] changelog reading sources... [ 65%] contact reading sources... [ 67%] index reading sources... [ 69%] man/index reading sources... [ 71%] man/mid3cp reading sources... [ 73%] man/mid3iconv reading sources... [ 75%] man/mid3v2 reading sources... [ 77%] man/moggsplit reading sources... [ 79%] man/mutagen-inspect reading sources... [ 81%] man/mutagen-pony reading sources... [ 83%] user/apev2 reading sources... [ 85%] user/classes reading sources... [ 88%] user/filelike reading sources... [ 90%] user/gettingstarted reading sources... [ 92%] user/id3 reading sources... [ 94%] user/index reading sources... [ 96%] user/mp4 reading sources... [ 98%] user/padding reading sources... [100%] user/vcomment looking for now-outdated files... none found pickling environment... done checking consistency... done preparing documents... done copying assets... copying static files... done copying extra files... done done writing output... [ 2%] api/aac writing output... [ 4%] api/ac3 writing output... [ 6%] api/aiff writing output... [ 8%] api/ape writing output... [ 10%] api/asf writing output... [ 12%] api/base writing output... [ 15%] api/dsdiff writing output... [ 17%] api/dsf writing output... [ 19%] api/flac writing output... [ 21%] api/id3 writing output... [ 23%] api/id3_frames writing output... [ 25%] api/index writing output... [ 27%] api/monkeysaudio writing output... [ 29%] api/mp3 writing output... [ 31%] api/mp4 writing output... [ 33%] api/musepack writing output... [ 35%] api/ogg writing output... [ 38%] api/oggflac writing output... [ 40%] api/oggopus writing output... [ 42%] api/oggspeex writing output... [ 44%] api/oggtheora writing output... [ 46%] api/oggvorbis writing output... [ 48%] api/optimfrog writing output... [ 50%] api/smf writing output... [ 52%] api/tak writing output... [ 54%] api/trueaudio writing output... [ 56%] api/vcomment writing output... [ 58%] api/wave writing output... [ 60%] api/wavpack writing output... [ 62%] changelog writing output... [ 65%] contact writing output... [ 67%] index writing output... [ 69%] man/index writing output... [ 71%] man/mid3cp writing output... [ 73%] man/mid3iconv writing output... [ 75%] man/mid3v2 writing output... [ 77%] man/moggsplit writing output... [ 79%] man/mutagen-inspect writing output... [ 81%] man/mutagen-pony writing output... [ 83%] user/apev2 writing output... [ 85%] user/classes writing output... [ 88%] user/filelike writing output... [ 90%] user/gettingstarted writing output... [ 92%] user/id3 writing output... [ 94%] user/index writing output... [ 96%] user/mp4 writing output... [ 98%] user/padding writing output... [100%] user/vcomment /builddir/build/BUILD/mutagen-1.47.0/docs/api/base.rst:54: WARNING: py:obj reference target not found: python:str /builddir/build/BUILD/mutagen-1.47.0/docs/api/base.rst:60: WARNING: py:obj reference target not found: python:bytes /builddir/build/BUILD/mutagen-1.47.0/docs/api/base.rst:66: WARNING: py:obj reference target not found: python:str /builddir/build/BUILD/mutagen-1.47.0/docs/api/base.rst:66: WARNING: py:obj reference target not found: python:bytes /builddir/build/BUILD/mutagen-1.47.0/mutagen/mp4/__init__.py:docstring of mutagen.mp4.MP4Chapters:1: WARNING: py:class reference target not found: collections.abc.Sequence /builddir/build/BUILD/mutagen-1.47.0/docs/user/filelike.rst:7: WARNING: py:obj reference target not found: io.BytesIO generating indices... genindex py-modindex done writing additional pages... search done copying images... [100%] images/logo.svg dumping search index in English (code: en)... done dumping object inventory... done build succeeded, 7 warnings. The HTML pages are in docs/_build. + RPM_EC=0 ++ jobs -p + exit 0 Executing(%install): /bin/sh -e /var/tmp/rpm-tmp.s2GgTK + umask 022 + cd /builddir/build/BUILD + '[' /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch '!=' / ']' + rm -rf /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch ++ dirname /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch + mkdir -p /builddir/build/BUILDROOT + mkdir /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd mutagen-1.47.0 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + /usr/bin/python3 setup.py install -O1 --skip-build --root /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch --prefix /usr running install /usr/lib/python3.12/site-packages/setuptools/_distutils/cmd.py:66: SetuptoolsDeprecationWarning: setup.py install is deprecated. !! ******************************************************************************** Please avoid running ``setup.py`` directly. Instead, use pypa/build, pypa/installer or other standards-based tools. Follow the current Python packaging guidelines when building Python RPM packages. See https://blog.ganssle.io/articles/2021/10/setup-py-deprecated.html and https://docs.fedoraproject.org/en-US/packaging-guidelines/Python/ for details. ******************************************************************************** !! self.initialize_options() running install_lib creating /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr creating /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib creating /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12 creating /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages creating /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/easymp4.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/monkeysaudio.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/m4a.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/oggopus.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen creating /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools copying build/lib/mutagen/_tools/moggsplit.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools copying build/lib/mutagen/_tools/mid3cp.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools copying build/lib/mutagen/_tools/mid3iconv.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools copying build/lib/mutagen/_tools/mid3v2.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools copying build/lib/mutagen/_tools/mutagen_pony.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools copying build/lib/mutagen/_tools/__init__.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools copying build/lib/mutagen/_tools/mutagen_inspect.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools copying build/lib/mutagen/_tools/_util.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools copying build/lib/mutagen/wave.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/aiff.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/easyid3.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/_constants.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/trueaudio.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/_vorbis.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/oggvorbis.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen creating /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/mp3 copying build/lib/mutagen/mp3/__init__.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/mp3 copying build/lib/mutagen/mp3/_util.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/mp3 copying build/lib/mutagen/_iff.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/tak.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/flac.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/_riff.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/dsf.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen creating /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/mp4 copying build/lib/mutagen/mp4/__init__.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/mp4 copying build/lib/mutagen/mp4/_atom.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/mp4 copying build/lib/mutagen/mp4/_as_entry.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/mp4 copying build/lib/mutagen/mp4/_util.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/mp4 creating /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/id3 copying build/lib/mutagen/id3/_id3v1.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/id3 copying build/lib/mutagen/id3/_frames.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/id3 copying build/lib/mutagen/id3/__init__.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/id3 copying build/lib/mutagen/id3/_specs.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/id3 copying build/lib/mutagen/id3/_tags.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/id3 copying build/lib/mutagen/id3/_util.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/id3 copying build/lib/mutagen/id3/_file.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/id3 copying build/lib/mutagen/wavpack.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/optimfrog.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/py.typed -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/oggspeex.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/ogg.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/apev2.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/aac.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/dsdiff.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/smf.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/__init__.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/musepack.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/oggflac.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/ac3.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/_tags.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen creating /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/asf copying build/lib/mutagen/asf/_attrs.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/asf copying build/lib/mutagen/asf/_objects.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/asf copying build/lib/mutagen/asf/__init__.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/asf copying build/lib/mutagen/asf/_util.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/asf copying build/lib/mutagen/oggtheora.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/_util.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen copying build/lib/mutagen/_file.py -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/easymp4.py to easymp4.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/monkeysaudio.py to monkeysaudio.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/m4a.py to m4a.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/oggopus.py to oggopus.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools/moggsplit.py to moggsplit.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools/mid3cp.py to mid3cp.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools/mid3iconv.py to mid3iconv.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools/mid3v2.py to mid3v2.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools/mutagen_pony.py to mutagen_pony.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools/__init__.py to __init__.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools/mutagen_inspect.py to mutagen_inspect.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tools/_util.py to _util.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/wave.py to wave.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/aiff.py to aiff.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/easyid3.py to easyid3.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_constants.py to _constants.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/trueaudio.py to trueaudio.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_vorbis.py to _vorbis.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/oggvorbis.py to oggvorbis.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/mp3/__init__.py to __init__.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/mp3/_util.py to _util.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_iff.py to _iff.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/tak.py to tak.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/flac.py to flac.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_riff.py to _riff.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/dsf.py to dsf.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/mp4/__init__.py to __init__.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/mp4/_atom.py to _atom.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/mp4/_as_entry.py to _as_entry.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/mp4/_util.py to _util.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/id3/_id3v1.py to _id3v1.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/id3/_frames.py to _frames.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/id3/__init__.py to __init__.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/id3/_specs.py to _specs.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/id3/_tags.py to _tags.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/id3/_util.py to _util.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/id3/_file.py to _file.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/wavpack.py to wavpack.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/optimfrog.py to optimfrog.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/oggspeex.py to oggspeex.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/ogg.py to ogg.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/apev2.py to apev2.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/aac.py to aac.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/dsdiff.py to dsdiff.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/smf.py to smf.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/__init__.py to __init__.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/musepack.py to musepack.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/oggflac.py to oggflac.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/ac3.py to ac3.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_tags.py to _tags.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/asf/_attrs.py to _attrs.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/asf/_objects.py to _objects.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/asf/__init__.py to __init__.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/asf/_util.py to _util.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/oggtheora.py to oggtheora.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_util.py to _util.cpython-312.pyc byte-compiling /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen/_file.py to _file.cpython-312.pyc writing byte-compilation script '/tmp/tmp1zo_f88t.py' /usr/bin/python3 /tmp/tmp1zo_f88t.py removing /tmp/tmp1zo_f88t.py running install_data creating /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/share creating /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/share/man creating /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/share/man/man1 copying man/mid3cp.1 -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/share/man/man1 copying man/mid3iconv.1 -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/share/man/man1 copying man/mutagen-inspect.1 -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/share/man/man1 copying man/mutagen-pony.1 -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/share/man/man1 copying man/moggsplit.1 -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/share/man/man1 copying man/mid3v2.1 -> /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/share/man/man1 running install_egg_info running egg_info writing mutagen.egg-info/PKG-INFO writing dependency_links to mutagen.egg-info/dependency_links.txt writing entry points to mutagen.egg-info/entry_points.txt writing top-level names to mutagen.egg-info/top_level.txt reading manifest file 'mutagen.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' warning: no files found matching '*.pyi' under directory 'mutagen' warning: no files found matching 'README.rst' under directory 'mutagen' adding license file 'COPYING' writing manifest file 'mutagen.egg-info/SOURCES.txt' Copying mutagen.egg-info to /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages/mutagen-1.47.0-py3.12.egg-info running install_scripts Installing mid3cp script to /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/bin Installing mid3iconv script to /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/bin Installing mid3v2 script to /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/bin Installing moggsplit script to /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/bin Installing mutagen-inspect script to /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/bin Installing mutagen-pony script to /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/bin + rm -rfv /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/bin/__pycache__ + install -D -p -m 0644 man/mid3cp.1 man/mid3iconv.1 man/mid3v2.1 man/moggsplit.1 man/mutagen-inspect.1 man/mutagen-pony.1 /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/share/man/man1 + rm -rf docs/_build/.buildinfo docs/_build/.doctrees + /usr/bin/find-debuginfo -j8 --strict-build-id -m -i --build-id-seed 1.47.0-3.fc40 --unique-debug-suffix -1.47.0-3.fc40.noarch --unique-debug-src-base python-mutagen-1.47.0-3.fc40.noarch --run-dwz --dwz-low-mem-die-limit 10000000 --dwz-max-die-limit 50000000 -S debugsourcefiles.list /builddir/build/BUILD/mutagen-1.47.0 find-debuginfo: starting Extracting debug info from 0 files Creating .debug symlinks for symlinks to ELF files find: ‘debug’: No such file or directory find-debuginfo: done + /usr/lib/rpm/check-buildroot + /usr/lib/rpm/redhat/brp-ldconfig + /usr/lib/rpm/brp-compress + /usr/lib/rpm/redhat/brp-strip-lto /usr/bin/strip + /usr/lib/rpm/brp-strip-static-archive /usr/bin/strip + /usr/lib/rpm/check-rpaths + /usr/lib/rpm/redhat/brp-mangle-shebangs + /usr/lib/rpm/brp-remove-la-files + env /usr/lib/rpm/redhat/brp-python-bytecompile '' 1 0 -j8 Bytecompiling .py files below /builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12 using python3.12 + /usr/lib/rpm/redhat/brp-python-hardlink Executing(%check): /bin/sh -e /var/tmp/rpm-tmp.vRXvF0 + umask 022 + cd /builddir/build/BUILD + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + RUSTFLAGS='-Copt-level=3 -Cdebuginfo=2 -Ccodegen-units=1 -Cstrip=none -Cforce-frame-pointers=yes -Clink-arg=-Wl,-z,relro -Clink-arg=-Wl,-z,now --cap-lints=warn' + export RUSTFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd mutagen-1.47.0 + CFLAGS='-O2 -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Werror=implicit-function-declaration -Werror=implicit-int -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + PATH=/builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/bin:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/sbin + PYTHONPATH=/builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib64/python3.12/site-packages:/builddir/build/BUILDROOT/python-mutagen-1.47.0-3.fc40.noarch/usr/lib/python3.12/site-packages + PYTHONDONTWRITEBYTECODE=1 + PYTEST_XDIST_AUTO_NUM_WORKERS=8 + /usr/bin/pytest ============================= test session starts ============================== platform linux -- Python 3.12.0, pytest-7.3.2, pluggy-1.3.0 rootdir: /builddir/build/BUILD/mutagen-1.47.0 plugins: hypothesis-6.82.0 collected 4033 items tests/test___init__.py .....................................FF.......... [ 1%] ..........FF......................F......................F.............. [ 3%] .......FFF.....................F......................F................. [ 4%] .....FF.....................F......................F.................... [ 6%] ..F......................FF....................FFF....................FF [ 8%] F....................FFF.....................F......................F... [ 10%] ..................FFF....................FF.....................FF...... [ 11%] ...............FFF....................FF.....................FFF........ [ 13%] ............FFF.....................F.....................FFF........... [ 15%] ..........F.....................FFF.....................F............... [ 17%] ......FFF.....................F.....................FFF................. [ 19%] ....F......................FF....................FFF.................... [ 20%] FFF....................FFF.....................F......................F. [ 22%] .....................F......................F......................F.... [ 24%] ..................F.....................FFF....................FFF...... [ 26%] ..............FFF....................FFF....................FFF......... [ 27%] ............F......................F.....................FF............. [ 29%] .........F.....................FFF.....................F................ [ 31%] .....FFF.....................F.....................FFF.................. [ 33%] ..FFF........F...... [ 33%] tests/test__id3frames.py ............................................... [ 35%] ........................................................................ [ 36%] ........................................................................ [ 38%] ........................................................................ [ 40%] ........................................................................ [ 42%] ........................................................................ [ 43%] ........................................................................ [ 45%] ........................................................................ [ 47%] ........................................................................ [ 49%] ........................................................................ [ 51%] ........................................................................ [ 52%] ........................................................................ [ 54%] ........................................................................ [ 56%] ........................................................................ [ 58%] ...................................................... [ 59%] tests/test__id3specs.py .......................................... [ 60%] tests/test__id3util.py ......................... [ 61%] tests/test__iff.py . [ 61%] tests/test__riff.py ......... [ 61%] tests/test__util.py .................................................... [ 62%] ............................................. [ 63%] tests/test__vorbis.py .................................................. [ 65%] ........ [ 65%] tests/test_aac.py ............ [ 65%] tests/test_ac3.py ........ [ 65%] tests/test_aiff.py .................................. [ 66%] tests/test_apev2.py .................................................... [ 67%] .................................... [ 68%] tests/test_asf.py ...................................................... [ 70%] ....................................................................... [ 71%] tests/test_dsdiff.py ........... [ 72%] tests/test_dsf.py .................. [ 72%] tests/test_easyid3.py ............................................... [ 73%] tests/test_easymp4.py .............. [ 74%] tests/test_flac.py ..................................................... [ 75%] ........................................................................ [ 77%] ............................... [ 78%] tests/test_id3.py ...................................................... [ 79%] ........................................................................ [ 81%] ... [ 81%] tests/test_m4a.py . [ 81%] tests/test_monkeysaudio.py ........ [ 81%] tests/test_mp3.py ................................................ [ 82%] tests/test_mp4.py ...................................................... [ 83%] ........................................................................ [ 85%] ........................................................................ [ 87%] ........................................................................ [ 89%] ........................................................................ [ 91%] . [ 91%] tests/test_musepack.py ............ [ 91%] tests/test_ogg.py ................................................. [ 92%] tests/test_oggflac.py ................................... [ 93%] tests/test_oggopus.py ............................. [ 94%] tests/test_oggspeex.py ................................ [ 95%] tests/test_oggtheora.py .............................. [ 95%] tests/test_oggvorbis.py ........................................... [ 96%] tests/test_optimfrog.py ....... [ 97%] tests/test_smf.py .... [ 97%] tests/test_tak.py ......... [ 97%] tests/test_tools_mid3cp.py .............. [ 97%] tests/test_tools_mid3iconv.py ....... [ 97%] tests/test_tools_mid3v2.py ................................ [ 98%] tests/test_tools_moggsplit.py . [ 98%] tests/test_tools_mutagen_inspect.py . [ 98%] tests/test_tools_mutagen_pony.py . [ 98%] tests/test_tools_util.py .. [ 98%] tests/test_trueaudio.py .......... [ 99%] tests/test_wave.py ................. [ 99%] tests/test_wavpack.py ...................... [100%] =================================== FAILURES =================================== _____________________ TFileTypeAAC.test_test_fileobj_load ______________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 2 valid examples in 1.05 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(249497224059367950272299615428071896821) to this test or run pytest with --hypothesis-seed=249497224059367950272299615428071896821 to reproduce this failure. _____________________ TFileTypeAAC.test_test_fileobj_save ______________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.01 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(79866412648170853650456318355643354203) to this test or run pytest with --hypothesis-seed=79866412648170853650456318355643354203 to reproduce this failure. ___________________ TFileTypeAAC_2.test_test_fileobj_delete ____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.01 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(230629639112586700469023338004329692196) to this test or run pytest with --hypothesis-seed=230629639112586700469023338004329692196 to reproduce this failure. ____________________ TFileTypeAAC_2.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.18 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(316273080355531680865190910934256948494) to this test or run pytest with --hypothesis-seed=316273080355531680865190910934256948494 to reproduce this failure. _____________________ TFileTypeAC3.test_test_fileobj_load ______________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.10 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(186355081829035280127013823879158654740) to this test or run pytest with --hypothesis-seed=186355081829035280127013823879158654740 to reproduce this failure. ____________________ TFileTypeAC3_2.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 8 valid examples in 1.47 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(208918262790022644750773901853190882037) to this test or run pytest with --hypothesis-seed=208918262790022644750773901853190882037 to reproduce this failure. ____________________ TFileTypeAIFF.test_test_fileobj_delete ____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.33 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(86053680254530700088791772710325290903) to this test or run pytest with --hypothesis-seed=86053680254530700088791772710325290903 to reproduce this failure. _____________________ TFileTypeAIFF.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.19 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(268414604033633665471653045878003715984) to this test or run pytest with --hypothesis-seed=268414604033633665471653045878003715984 to reproduce this failure. _____________________ TFileTypeAIFF.test_test_fileobj_save _____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 8 valid examples in 1.08 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(244122575954276991755695797316996270632) to this test or run pytest with --hypothesis-seed=244122575954276991755695797316996270632 to reproduce this failure. ____________________ TFileTypeAIFF_2.test_test_fileobj_load ____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.16 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(334600291134432628611976136855075720065) to this test or run pytest with --hypothesis-seed=334600291134432628611976136855075720065 to reproduce this failure. ____________________ TFileTypeAIFF_3.test_test_fileobj_load ____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.02 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(106557745704038969148473677830048062721) to this test or run pytest with --hypothesis-seed=106557745704038969148473677830048062721 to reproduce this failure. ____________________ TFileTypeAIFF_4.test_test_fileobj_load ____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 7 valid examples in 1.24 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(109585232170550511416767373576560068594) to this test or run pytest with --hypothesis-seed=109585232170550511416767373576560068594 to reproduce this failure. ____________________ TFileTypeAIFF_4.test_test_fileobj_save ____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 1 valid examples in 2.40 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(334962747251088289547814491270778328812) to this test or run pytest with --hypothesis-seed=334962747251088289547814491270778328812 to reproduce this failure. ____________________ TFileTypeAIFF_5.test_test_fileobj_load ____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.17 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(49516294712402834429472717793162921533) to this test or run pytest with --hypothesis-seed=49516294712402834429472717793162921533 to reproduce this failure. ____________________ TFileTypeAIFF_6.test_test_fileobj_load ____________________ self = data = ConjectureData(INTERESTING, 2 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 2 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) function = .run at 0xffffffb9a08400> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ t = @given(generate_test_file_objects(h, self.KIND)) > def run(t): tests/test___init__.py:345: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (,) kwargs = {}, initial_draws = 1, start = 9282973.582172085, result = None finish = 9282973.919343283, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=337171) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 337.17ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test___init__.py:345: in run def run(t): _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 2 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 337.17ms, which exceeds the deadline of 200.00ms'), 'Traceback (most recent call last):\n....settings.deadline)\nhypothesis.errors.DeadlineExceeded: Test took 337.17ms, which exceeds the deadline of 200.00ms\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis run(t=) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: run( E t=(lambda i: _TestFileObj(fileobj, stop_after=i))(0), E ) E Unreliable test timings! On an initial run, this test took 337.17ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.23 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky __________________ TFileTypeAPEv2File.test_test_fileobj_load ___________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.11 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(328627275547604454005252558616289780963) to this test or run pytest with --hypothesis-seed=328627275547604454005252558616289780963 to reproduce this failure. _________________ TFileTypeAPEv2File_2.test_test_fileobj_load __________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.04 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(596485727186065432522795380622987783) to this test or run pytest with --hypothesis-seed=596485727186065432522795380622987783 to reproduce this failure. _________________ TFileTypeAPEv2File_2.test_test_fileobj_save __________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.13 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(80094608045531519516428433812116211794) to this test or run pytest with --hypothesis-seed=80094608045531519516428433812116211794 to reproduce this failure. ____________________ TFileTypeASF.test_test_fileobj_delete _____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.14 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(96932870044983039386191497044510560359) to this test or run pytest with --hypothesis-seed=96932870044983039386191497044510560359 to reproduce this failure. _____________________ TFileTypeASF.test_test_fileobj_load ______________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.18 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(195623877671555918476015775486999271595) to this test or run pytest with --hypothesis-seed=195623877671555918476015775486999271595 to reproduce this failure. _____________________ TFileTypeASF.test_test_fileobj_save ______________________ self = data = ConjectureData(INTERESTING, 2 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 2 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) function = .run at 0xffffffb9a08040> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ t = @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): tests/test___init__.py:358: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (,) kwargs = {}, initial_draws = 1, start = 9283046.978939297, result = None finish = 9283047.314928096, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=335989) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 335.99ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test___init__.py:358: in run def run(t): _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 2 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 335.99ms, which exceeds the deadline of 200.00ms'), 'Traceback (most recent call last):\n....settings.deadline)\nhypothesis.errors.DeadlineExceeded: Test took 335.99ms, which exceeds the deadline of 200.00ms\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis run(t=) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: run( E t=(lambda i: _TestFileObj(fileobj, stop_after=i))(11), E ) E Unreliable test timings! On an initial run, this test took 335.99ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.28 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________ TFileTypeASF_2.test_test_fileobj_delete ____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.22 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(121860875269199971859988829055129040257) to this test or run pytest with --hypothesis-seed=121860875269199971859988829055129040257 to reproduce this failure. ____________________ TFileTypeASF_2.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.15 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(30448754033161918220992657820808744098) to this test or run pytest with --hypothesis-seed=30448754033161918220992657820808744098 to reproduce this failure. ____________________ TFileTypeASF_2.test_test_fileobj_save _____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.02 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(186435074152818944938768957325952543538) to this test or run pytest with --hypothesis-seed=186435074152818944938768957325952543538 to reproduce this failure. ___________________ TFileTypeASF_3.test_test_fileobj_delete ____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.18 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(175553766173919659042229673887313036395) to this test or run pytest with --hypothesis-seed=175553766173919659042229673887313036395 to reproduce this failure. ____________________ TFileTypeASF_3.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.18 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(327142471578075386264286560226326792548) to this test or run pytest with --hypothesis-seed=327142471578075386264286560226326792548 to reproduce this failure. ____________________ TFileTypeASF_3.test_test_fileobj_save _____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.13 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(141690791993242711580821439743341174021) to this test or run pytest with --hypothesis-seed=141690791993242711580821439743341174021 to reproduce this failure. ____________________ TFileTypeDSDIFF.test_test_fileobj_load ____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 2 valid examples in 1.11 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ___________________ TFileTypeDSDIFF_2.test_test_fileobj_load ___________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.36 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(292921843654054143744108161829599953661) to this test or run pytest with --hypothesis-seed=292921843654054143744108161829599953661 to reproduce this failure. __________________ TFileTypeDSDIFF_3.test_test_fileobj_delete __________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.01 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(318782989867673924217686317257037865641) to this test or run pytest with --hypothesis-seed=318782989867673924217686317257037865641 to reproduce this failure. ___________________ TFileTypeDSDIFF_3.test_test_fileobj_load ___________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.20 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(48429265416468776184928427390651184394) to this test or run pytest with --hypothesis-seed=48429265416468776184928427390651184394 to reproduce this failure. ___________________ TFileTypeDSDIFF_3.test_test_fileobj_save ___________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.14 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(217642262227047600602381660935141123050) to this test or run pytest with --hypothesis-seed=217642262227047600602381660935141123050 to reproduce this failure. ____________________ TFileTypeDSF.test_test_fileobj_delete _____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.16 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(28904933407257950224662201186919902596) to this test or run pytest with --hypothesis-seed=28904933407257950224662201186919902596 to reproduce this failure. _____________________ TFileTypeDSF.test_test_fileobj_load ______________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.24 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(2351731395787523048901287105432533575) to this test or run pytest with --hypothesis-seed=2351731395787523048901287105432533575 to reproduce this failure. ___________________ TFileTypeDSF_2.test_test_fileobj_delete ____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.03 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(274489338134864424880457147490240957429) to this test or run pytest with --hypothesis-seed=274489338134864424880457147490240957429 to reproduce this failure. ____________________ TFileTypeDSF_2.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.19 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(58565080924818720151677468307634644473) to this test or run pytest with --hypothesis-seed=58565080924818720151677468307634644473 to reproduce this failure. ___________________ TFileTypeDSF_3.test_test_fileobj_delete ____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.01 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(162548626485546311655216148872687768403) to this test or run pytest with --hypothesis-seed=162548626485546311655216148872687768403 to reproduce this failure. ____________________ TFileTypeDSF_3.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.02 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(281934238207648921154398879877510955450) to this test or run pytest with --hypothesis-seed=281934238207648921154398879877510955450 to reproduce this failure. ____________________ TFileTypeDSF_3.test_test_fileobj_save _____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.32 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(80049841018836603595861129643591725497) to this test or run pytest with --hypothesis-seed=80049841018836603595861129643591725497 to reproduce this failure. ___________________ TFileTypeDSF_4.test_test_fileobj_delete ____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.12 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(30040028469773614464541662643129655628) to this test or run pytest with --hypothesis-seed=30040028469773614464541662643129655628 to reproduce this failure. ____________________ TFileTypeDSF_4.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.56 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(58856423199987879201248529483010358703) to this test or run pytest with --hypothesis-seed=58856423199987879201248529483010358703 to reproduce this failure. ____________________ TFileTypeFLAC.test_test_fileobj_delete ____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.07 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(218520660415984571775796926128160826928) to this test or run pytest with --hypothesis-seed=218520660415984571775796926128160826928 to reproduce this failure. _____________________ TFileTypeFLAC.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.05 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(55513496445756040351017521526768685197) to this test or run pytest with --hypothesis-seed=55513496445756040351017521526768685197 to reproduce this failure. _____________________ TFileTypeFLAC.test_test_fileobj_save _____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.15 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(60048366516614495181636930984356065902) to this test or run pytest with --hypothesis-seed=60048366516614495181636930984356065902 to reproduce this failure. ________________ TFileTypeID3FileType.test_test_fileobj_delete _________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.04 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(295230290590597542973399952676113707808) to this test or run pytest with --hypothesis-seed=295230290590597542973399952676113707808 to reproduce this failure. _________________ TFileTypeID3FileType.test_test_fileobj_load __________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 7 valid examples in 1.19 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(183413220989165240390150837647629088514) to this test or run pytest with --hypothesis-seed=183413220989165240390150837647629088514 to reproduce this failure. _________________ TFileTypeID3FileType.test_test_fileobj_save __________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.38 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ________________ TFileTypeID3FileType_2.test_test_fileobj_load _________________ self = data = ConjectureData(INTERESTING, 2 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 2 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) function = .run at 0xffffffb9a49f80> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ t = @given(generate_test_file_objects(h, self.KIND)) > def run(t): tests/test___init__.py:345: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (,) kwargs = {}, initial_draws = 1, start = 9283329.01479919, result = None finish = 9283329.314988088, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=300189) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 300.19ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test___init__.py:345: in run def run(t): _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 2 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 300.19ms, which exceeds the deadline of 200.00ms'), 'Traceback (most recent call last):\n....settings.deadline)\nhypothesis.errors.DeadlineExceeded: Test took 300.19ms, which exceeds the deadline of 200.00ms\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis run(t=) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: run( E t=(lambda i: _TestFileObj(fileobj, fail_after=i))(3), E ) E Unreliable test timings! On an initial run, this test took 300.19ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.51 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _______________ TFileTypeID3FileType_3.test_test_fileobj_delete ________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 7 valid examples in 1.14 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(189575858213923826986637810385523954585) to this test or run pytest with --hypothesis-seed=189575858213923826986637810385523954585 to reproduce this failure. ________________ TFileTypeID3FileType_3.test_test_fileobj_load _________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.17 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(159556010065974924816174693809124560898) to this test or run pytest with --hypothesis-seed=159556010065974924816174693809124560898 to reproduce this failure. ________________ TFileTypeID3FileType_3.test_test_fileobj_save _________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.17 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(260327883569859331626800187269352446104) to this test or run pytest with --hypothesis-seed=260327883569859331626800187269352446104 to reproduce this failure. ________________ TFileTypeID3FileType_4.test_test_fileobj_load _________________ self = data = ConjectureData(INTERESTING, 3 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 3 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 3 bytes, frozen) function = .run at 0xffffffb994b2e0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 3 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ t = @given(generate_test_file_objects(h, self.KIND)) > def run(t): tests/test___init__.py:345: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (,) kwargs = {}, initial_draws = 1, start = 9283384.990783272, result = None finish = 9283385.31217067, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=321387) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 321.39ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test___init__.py:345: in run def run(t): _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 3 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 321.39ms, which exceeds the deadline of 200.00ms'), 'Traceback (most recent call last):\n....settings.deadline)\nhypothesis.errors.DeadlineExceeded: Test took 321.39ms, which exceeds the deadline of 200.00ms\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis run(t=) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: run( E t=(lambda i: _TestFileObj(fileobj, stop_after=i))(0), E ) E Unreliable test timings! On an initial run, this test took 321.39ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.13 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ____________________ TFileTypeMP3.test_test_fileobj_delete _____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.13 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(191985617847267432837004651987238830774) to this test or run pytest with --hypothesis-seed=191985617847267432837004651987238830774 to reproduce this failure. _____________________ TFileTypeMP3.test_test_fileobj_load ______________________ self = data = ConjectureData(INTERESTING, 2 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 2 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) function = .run at 0xffffffb9a723e0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ t = @given(generate_test_file_objects(h, self.KIND)) > def run(t): tests/test___init__.py:345: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (,) kwargs = {}, initial_draws = 1, start = 9283423.18811672, result = None finish = 9283423.54536852, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=357252) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 357.25ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test___init__.py:345: in run def run(t): _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 2 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 357.25ms, which exceeds the deadline of 200.00ms'), 'Traceback (most recent call last):\n....settings.deadline)\nhypothesis.errors.DeadlineExceeded: Test took 357.25ms, which exceeds the deadline of 200.00ms\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis run(t=) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: run( E t=(lambda i: _TestFileObj(fileobj, fail_after=i))(18), E ) E Unreliable test timings! On an initial run, this test took 357.25ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 26.34 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____________________ TFileTypeMP3.test_test_fileobj_save ______________________ self = data = ConjectureData(INTERESTING, 3 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 3 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 3 bytes, frozen) function = .run at 0xffffffb98182c0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 3 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ t = @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): tests/test___init__.py:358: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (,) kwargs = {}, initial_draws = 1, start = 9283442.618577944, result = None finish = 9283443.147533145, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=528955) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 528.95ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test___init__.py:358: in run def run(t): _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 3 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 528.95ms, which exceeds the deadline of 200.00ms'), 'Traceback (most recent call last):\n....settings.deadline)\nhypothesis.errors.DeadlineExceeded: Test took 528.95ms, which exceeds the deadline of 200.00ms\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis run(t=) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: run( E t=(lambda i: _TestFileObj(fileobj, stop_after=i))(0), E ) E Unreliable test timings! On an initial run, this test took 528.95ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 8.91 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ____________________ TFileTypeMP3_2.test_test_fileobj_load _____________________ self = data = ConjectureData(INTERESTING, 2 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 2 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) function = .run at 0xffffffb9858720> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ t = @given(generate_test_file_objects(h, self.KIND)) > def run(t): tests/test___init__.py:345: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (,) kwargs = {}, initial_draws = 1, start = 9283468.038598545, result = None finish = 9283468.329409745, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=290811) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 290.81ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test___init__.py:345: in run def run(t): _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 2 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 290.81ms, which exceeds the deadline of 200.00ms'), 'Traceback (most recent call last):\n....settings.deadline)\nhypothesis.errors.DeadlineExceeded: Test took 290.81ms, which exceeds the deadline of 200.00ms\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis run(t=) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: run( E t=(lambda i: _TestFileObj(fileobj, fail_after=i))(18), E ) E Unreliable test timings! On an initial run, this test took 290.81ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 3.87 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________ TFileTypeMP3_3.test_test_fileobj_delete ____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.05 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(330707512299976662316669198298344706153) to this test or run pytest with --hypothesis-seed=330707512299976662316669198298344706153 to reproduce this failure. ____________________ TFileTypeMP3_3.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.10 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(81965246666669596509545675437213823120) to this test or run pytest with --hypothesis-seed=81965246666669596509545675437213823120 to reproduce this failure. ____________________ TFileTypeMP3_3.test_test_fileobj_save _____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 9 valid examples in 1.24 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(94899748859404259421952452389827150038) to this test or run pytest with --hypothesis-seed=94899748859404259421952452389827150038 to reproduce this failure. ____________________ TFileTypeMP3_4.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.02 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ____________________ TFileTypeMP4.test_test_fileobj_delete _____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.49 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(248350788686231245588671222827931523174) to this test or run pytest with --hypothesis-seed=248350788686231245588671222827931523174 to reproduce this failure. _____________________ TFileTypeMP4.test_test_fileobj_load ______________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 8 valid examples in 1.36 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(77260966482569404606637246193556555927) to this test or run pytest with --hypothesis-seed=77260966482569404606637246193556555927 to reproduce this failure. _____________________ TFileTypeMP4.test_test_fileobj_save ______________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 9 valid examples in 1.12 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(179389185676108869903507392654564469803) to this test or run pytest with --hypothesis-seed=179389185676108869903507392654564469803 to reproduce this failure. ____________________ TFileTypeMP4_2.test_test_fileobj_load _____________________ self = data = ConjectureData(INTERESTING, 4 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 4 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 4 bytes, frozen) function = .run at 0xffffffb98719e0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 4 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ t = @given(generate_test_file_objects(h, self.KIND)) > def run(t): tests/test___init__.py:345: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (,) kwargs = {}, initial_draws = 1, start = 9283562.020597477, result = None finish = 9283562.530564576, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=509967) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 509.97ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test___init__.py:345: in run def run(t): _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 4 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 509.97ms, which exceeds the deadline of 200.00ms'), 'Traceback (most recent call last):\n....settings.deadline)\nhypothesis.errors.DeadlineExceeded: Test took 509.97ms, which exceeds the deadline of 200.00ms\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis run(t=) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: run( E t=(lambda i: _TestFileObj(fileobj, stop_after=i))(0), E ) E Unreliable test timings! On an initial run, this test took 509.97ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.02 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ____________________ TFileTypeMP4_3.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.14 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(10425736511696518412891773846871957680) to this test or run pytest with --hypothesis-seed=10425736511696518412891773846871957680 to reproduce this failure. ____________________ TFileTypeMP4_3.test_test_fileobj_save _____________________ self = data = ConjectureData(INTERESTING, 2 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 2 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) function = .run at 0xffffffb9a70860> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ t = @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): tests/test___init__.py:358: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (,) kwargs = {}, initial_draws = 1, start = 9283594.826097447, result = None finish = 9283595.160636948, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=334540) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 334.54ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test___init__.py:358: in run def run(t): _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 2 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 334.54ms, which exceeds the deadline of 200.00ms'), 'Traceback (most recent call last):\n....settings.deadline)\nhypothesis.errors.DeadlineExceeded: Test took 334.54ms, which exceeds the deadline of 200.00ms\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis run(t=) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: run( E t=(lambda i: _TestFileObj(fileobj, fail_after=i))(0), E ) E Unreliable test timings! On an initial run, this test took 334.54ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.46 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ___________________ TFileTypeMP4_4.test_test_fileobj_delete ____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.03 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(235609998756461529249569396431825993040) to this test or run pytest with --hypothesis-seed=235609998756461529249569396431825993040 to reproduce this failure. ____________________ TFileTypeMP4_4.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.26 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(333414666275746250268714357005722708654) to this test or run pytest with --hypothesis-seed=333414666275746250268714357005722708654 to reproduce this failure. ____________________ TFileTypeMP4_4.test_test_fileobj_save _____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.24 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(334056357358990264361866993100057530758) to this test or run pytest with --hypothesis-seed=334056357358990264361866993100057530758 to reproduce this failure. ___________________ TFileTypeMP4_5.test_test_fileobj_delete ____________________ self = data = ConjectureData(INTERESTING, 3 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 3 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 3 bytes, frozen) function = .run at 0xffffffb98193a0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 3 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ t = @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) tests/test___init__.py:371: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (,) kwargs = {}, initial_draws = 1, start = 9283643.02882806, result = None finish = 9283643.335598858, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=306771) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 306.77ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test___init__.py:371: in run h, lambda t: o.delete(fileobj=t))) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 3 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 306.77ms, which exceeds the deadline of 200.00ms'), 'Traceback (most recent call last):\n....settings.deadline)\nhypothesis.errors.DeadlineExceeded: Test took 306.77ms, which exceeds the deadline of 200.00ms\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis run(t=) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: run( E t=(lambda i: _TestFileObj(fileobj, fail_after=i))(0), E ) E Unreliable test timings! On an initial run, this test took 306.77ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.18 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ____________________ TFileTypeMP4_5.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.31 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(46965081081022303965627429740450901088) to this test or run pytest with --hypothesis-seed=46965081081022303965627429740450901088 to reproduce this failure. ____________________ TFileTypeMP4_5.test_test_fileobj_save _____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.10 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(108785984478043798518700072228222374333) to this test or run pytest with --hypothesis-seed=108785984478043798518700072228222374333 to reproduce this failure. ___________________ TFileTypeMP4_6.test_test_fileobj_delete ____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 7 valid examples in 1.40 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(281649326313352083378674735748594645610) to this test or run pytest with --hypothesis-seed=281649326313352083378674735748594645610 to reproduce this failure. ____________________ TFileTypeMP4_6.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.31 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(240751053149166942156188220091030874864) to this test or run pytest with --hypothesis-seed=240751053149166942156188220091030874864 to reproduce this failure. ____________________ TFileTypeMP4_6.test_test_fileobj_save _____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 8 valid examples in 1.46 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(186789981663640437786683527273183481535) to this test or run pytest with --hypothesis-seed=186789981663640437786683527273183481535 to reproduce this failure. _________________ TFileTypeMonkeysAudio.test_test_fileobj_load _________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.33 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(227399281107224257953685539227209735602) to this test or run pytest with --hypothesis-seed=227399281107224257953685539227209735602 to reproduce this failure. ________________ TFileTypeMonkeysAudio_2.test_test_fileobj_load ________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.04 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(183974514669656223828504012064024188651) to this test or run pytest with --hypothesis-seed=183974514669656223828504012064024188651 to reproduce this failure. ___________________ TFileTypeMusepack.test_test_fileobj_load ___________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.14 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(183557975321236589714500526892360646818) to this test or run pytest with --hypothesis-seed=183557975321236589714500526892360646818 to reproduce this failure. __________________ TFileTypeMusepack_2.test_test_fileobj_load __________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.25 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(315020990381883125141010447242766608943) to this test or run pytest with --hypothesis-seed=315020990381883125141010447242766608943 to reproduce this failure. __________________ TFileTypeMusepack_3.test_test_fileobj_load __________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 7 valid examples in 1.43 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(292715683637851787384491113123216119856) to this test or run pytest with --hypothesis-seed=292715683637851787384491113123216119856 to reproduce this failure. __________________ TFileTypeMusepack_4.test_test_fileobj_load __________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.03 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(16037296581304318228812478280569099854) to this test or run pytest with --hypothesis-seed=16037296581304318228812478280569099854 to reproduce this failure. __________________ TFileTypeOggFLAC.test_test_fileobj_delete ___________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 7 valid examples in 1.40 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(284525835219544363626256715764001449852) to this test or run pytest with --hypothesis-seed=284525835219544363626256715764001449852 to reproduce this failure. ___________________ TFileTypeOggFLAC.test_test_fileobj_load ____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 7 valid examples in 1.02 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(203787327263893067324160338988484413853) to this test or run pytest with --hypothesis-seed=203787327263893067324160338988484413853 to reproduce this failure. ___________________ TFileTypeOggFLAC.test_test_fileobj_save ____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 7 valid examples in 1.22 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(209939926029561647932572944534191201008) to this test or run pytest with --hypothesis-seed=209939926029561647932572944534191201008 to reproduce this failure. __________________ TFileTypeOggOpus.test_test_fileobj_delete ___________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.16 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(246871325237935492704040474471425501722) to this test or run pytest with --hypothesis-seed=246871325237935492704040474471425501722 to reproduce this failure. ___________________ TFileTypeOggOpus.test_test_fileobj_load ____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.10 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(84352532321736104207616988980634941034) to this test or run pytest with --hypothesis-seed=84352532321736104207616988980634941034 to reproduce this failure. ___________________ TFileTypeOggOpus.test_test_fileobj_save ____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.01 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(274296329244556855877984770935404712231) to this test or run pytest with --hypothesis-seed=274296329244556855877984770935404712231 to reproduce this failure. __________________ TFileTypeOggSpeex.test_test_fileobj_delete __________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 8 valid examples in 1.35 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(116873801229259294276132913675312782202) to this test or run pytest with --hypothesis-seed=116873801229259294276132913675312782202 to reproduce this failure. ___________________ TFileTypeOggSpeex.test_test_fileobj_load ___________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.00 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(253960294584977331818441134238529157118) to this test or run pytest with --hypothesis-seed=253960294584977331818441134238529157118 to reproduce this failure. ___________________ TFileTypeOggSpeex.test_test_fileobj_save ___________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.01 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(262744129026699373582642258928883114033) to this test or run pytest with --hypothesis-seed=262744129026699373582642258928883114033 to reproduce this failure. _________________ TFileTypeOggTheora.test_test_fileobj_delete __________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 2 valid examples in 1.89 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(17158655648104250034746320743035753504) to this test or run pytest with --hypothesis-seed=17158655648104250034746320743035753504 to reproduce this failure. __________________ TFileTypeOggTheora.test_test_fileobj_load ___________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.21 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(7693995079921687713169840941617023138) to this test or run pytest with --hypothesis-seed=7693995079921687713169840941617023138 to reproduce this failure. __________________ TFileTypeOggTheora.test_test_fileobj_save ___________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.15 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(20037362668638924706750696437924677791) to this test or run pytest with --hypothesis-seed=20037362668638924706750696437924677791 to reproduce this failure. _________________ TFileTypeOggVorbis.test_test_fileobj_delete __________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.13 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(289691527887452215023587952860740275278) to this test or run pytest with --hypothesis-seed=289691527887452215023587952860740275278 to reproduce this failure. __________________ TFileTypeOggVorbis.test_test_fileobj_load ___________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.34 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(115098294729305404706362444525462556963) to this test or run pytest with --hypothesis-seed=115098294729305404706362444525462556963 to reproduce this failure. __________________ TFileTypeOggVorbis.test_test_fileobj_save ___________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.10 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(330925001894183562935166570956577932209) to this test or run pytest with --hypothesis-seed=330925001894183562935166570956577932209 to reproduce this failure. __________________ TFileTypeOptimFROG.test_test_fileobj_load ___________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.50 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(114505831743951734778294178776504979342) to this test or run pytest with --hypothesis-seed=114505831743951734778294178776504979342 to reproduce this failure. _________________ TFileTypeOptimFROG_2.test_test_fileobj_load __________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 7 valid examples in 1.34 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(292104831524237749681748322906370376121) to this test or run pytest with --hypothesis-seed=292104831524237749681748322906370376121 to reproduce this failure. ____________________ TFileTypeSMF.test_test_fileobj_delete _____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 3 valid examples in 1.15 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(59476871605452591099104810919477991487) to this test or run pytest with --hypothesis-seed=59476871605452591099104810919477991487 to reproduce this failure. _____________________ TFileTypeSMF.test_test_fileobj_load ______________________ self = data = ConjectureData(INTERESTING, 2 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 2 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) function = .run at 0xffffffb975b420> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ t = @given(generate_test_file_objects(h, self.KIND)) > def run(t): tests/test___init__.py:345: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (,) kwargs = {}, initial_draws = 1, start = 9283968.330612184, result = None finish = 9283968.744612982, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=414001) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 414.00ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test___init__.py:345: in run def run(t): _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 2 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 414.00ms, which exceeds the deadline of 200.00ms'), 'Traceback (most recent call last):\n....settings.deadline)\nhypothesis.errors.DeadlineExceeded: Test took 414.00ms, which exceeds the deadline of 200.00ms\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis run(t=) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: run( E t=(lambda i: _TestFileObj(fileobj, fail_after=i))(14), E ) E Unreliable test timings! On an initial run, this test took 414.00ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 35.67 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky _____________________ TFileTypeTAK.test_test_fileobj_load ______________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.36 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(274027271720847315617218830443527533902) to this test or run pytest with --hypothesis-seed=274027271720847315617218830443527533902 to reproduce this failure. ___________________ TFileTypeTAK_2.test_test_fileobj_delete ____________________ self = data = ConjectureData(INTERESTING, 2 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 2 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) function = .run at 0xffffffb985ae80> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ t = @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) tests/test___init__.py:371: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (,) kwargs = {}, initial_draws = 1, start = 9284012.049784312, result = None finish = 9284012.339228813, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=289445) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 289.44ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test___init__.py:371: in run h, lambda t: o.delete(fileobj=t))) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 2 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 289.44ms, which exceeds the deadline of 200.00ms'), 'Traceback (most recent call last):\n....settings.deadline)\nhypothesis.errors.DeadlineExceeded: Test took 289.44ms, which exceeds the deadline of 200.00ms\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis run(t=) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: run( E t=(lambda i: _TestFileObj(fileobj, stop_after=i))(1), E ) E Unreliable test timings! On an initial run, this test took 289.44ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 2.79 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ____________________ TFileTypeTAK_2.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.02 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(290580270238147740433374201080758902532) to this test or run pytest with --hypothesis-seed=290580270238147740433374201080758902532 to reproduce this failure. ____________________ TFileTypeTAK_2.test_test_fileobj_save _____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.35 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(73641701960336848694462986407466523083) to this test or run pytest with --hypothesis-seed=73641701960336848694462986407466523083 to reproduce this failure. __________________ TFileTypeTrueAudio.test_test_fileobj_load ___________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 6 valid examples in 1.02 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(62603040642962552168825438960622606076) to this test or run pytest with --hypothesis-seed=62603040642962552168825438960622606076 to reproduce this failure. ____________________ TFileTypeWAVE.test_test_fileobj_delete ____________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.00 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(317594193090185399530331309165329967276) to this test or run pytest with --hypothesis-seed=317594193090185399530331309165329967276 to reproduce this failure. _____________________ TFileTypeWAVE.test_test_fileobj_load _____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.40 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(234675452625034596419383760163412951475) to this test or run pytest with --hypothesis-seed=234675452625034596419383760163412951475 to reproduce this failure. _____________________ TFileTypeWAVE.test_test_fileobj_save _____________________ self = data = ConjectureData(INTERESTING, 2 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 2 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) function = .run at 0xffffffb97cb740> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 2 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ t = @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): tests/test___init__.py:358: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (,) kwargs = {}, initial_draws = 1, start = 9284073.80879807, result = None finish = 9284074.142822567, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=334024) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 334.02ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test___init__.py:358: in run def run(t): _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 2 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 334.02ms, which exceeds the deadline of 200.00ms'), 'Traceback (most recent call last):\n....settings.deadline)\nhypothesis.errors.DeadlineExceeded: Test took 334.02ms, which exceeds the deadline of 200.00ms\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis run(t=) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: run( E t=(lambda i: _TestFileObj(fileobj, fail_after=i))(3), E ) E Unreliable test timings! On an initial run, this test took 334.02ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 0.95 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ____________________ TFileTypeWAVE_2.test_test_fileobj_load ____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 4 valid examples in 1.15 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(82879179151248581946572877975981960575) to this test or run pytest with --hypothesis-seed=82879179151248581946572877975981960575 to reproduce this failure. ___________________ TFileTypeWAVE_3.test_test_fileobj_delete ___________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.01 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(183476937297571551363652408667516301685) to this test or run pytest with --hypothesis-seed=183476937297571551363652408667516301685 to reproduce this failure. ____________________ TFileTypeWAVE_3.test_test_fileobj_load ____________________ self = data = ConjectureData(INTERESTING, 4 bytes, frozen) def _execute_once_for_engine(self, data): """Wrapper around ``execute_once`` that intercepts test failure exceptions and single-test control exceptions, and turns them into appropriate method calls to `data` instead. This allows the engine to assume that any exception other than ``StopTest`` must be a fatal error, and should stop the entire engine. """ try: trace = frozenset() if ( self.failed_normally and not self.failed_due_to_deadline and Phase.shrink in self.settings.phases and Phase.explain in self.settings.phases and sys.gettrace() is None and not PYPY ): # pragma: no cover # This is in fact covered by our *non-coverage* tests, but due to the # settrace() contention *not* by our coverage tests. Ah well. tracer = Tracer() try: sys.settrace(tracer.trace) result = self.execute_once(data) if data.status == Status.VALID: self.explain_traces[None].add(frozenset(tracer.branches)) finally: sys.settrace(None) trace = frozenset(tracer.branches) else: > result = self.execute_once(data) /usr/lib/python3.12/site-packages/hypothesis/core.py:850: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(INTERESTING, 4 bytes, frozen), print_example = False is_final = False, expected_failure = None, example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. > result = self.test_runner(data, run) /usr/lib/python3.12/site-packages/hypothesis/core.py:789: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 4 bytes, frozen) function = .run at 0xffffffb96d74c0> def default_new_style_executor(data, function): > return function(data) /usr/lib/python3.12/site-packages/hypothesis/executors.py:47: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ data = ConjectureData(INTERESTING, 4 bytes, frozen) def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) > return test(*args, **kwargs) /usr/lib/python3.12/site-packages/hypothesis/core.py:785: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ t = @given(generate_test_file_objects(h, self.KIND)) > def run(t): tests/test___init__.py:345: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ args = (,) kwargs = {}, initial_draws = 1, start = 9284118.415744796, result = None finish = 9284118.739265792, internal_draw_time = 0 runtime = datetime.timedelta(microseconds=323521) current_deadline = datetime.timedelta(microseconds=250000) @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: > raise DeadlineExceeded(runtime, self.settings.deadline) E hypothesis.errors.DeadlineExceeded: Test took 323.52ms, which exceeds the deadline of 200.00ms /usr/lib/python3.12/site-packages/hypothesis/core.py:737: DeadlineExceeded The above exception was the direct cause of the following exception: self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ tests/test___init__.py:345: in run def run(t): _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = data = ConjectureData(VALID, 4 bytes, frozen), print_example = True is_final = True expected_failure = (DeadlineExceeded('Test took 323.52ms, which exceeds the deadline of 200.00ms'), 'Traceback (most recent call last):\n....settings.deadline)\nhypothesis.errors.DeadlineExceeded: Test took 323.52ms, which exceeds the deadline of 200.00ms\n') example_kwargs = None def execute_once( self, data, print_example=False, is_final=False, expected_failure=None, example_kwargs=None, ): """Run the test function once, using ``data`` as input. If the test raises an exception, it will propagate through to the caller of this method. Depending on its type, this could represent an ordinary test failure, or a fatal error, or a control exception. If this method returns normally, the test might have passed, or it might have placed ``data`` in an unsuccessful state and then swallowed the corresponding control exception. """ self.ever_executed = True data.is_find = self.is_find text_repr = None if self.settings.deadline is None: test = self.test else: @proxies(self.test) def test(*args, **kwargs): self.__test_runtime = None initial_draws = len(data.draw_times) start = time.perf_counter() result = self.test(*args, **kwargs) finish = time.perf_counter() internal_draw_time = sum(data.draw_times[initial_draws:]) runtime = datetime.timedelta( seconds=finish - start - internal_draw_time ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: current_deadline = (current_deadline // 4) * 5 if runtime >= current_deadline: raise DeadlineExceeded(runtime, self.settings.deadline) return result def run(data): # Set up dynamic context needed by a single test run. with local_settings(self.settings): with deterministic_PRNG(): with BuildContext(data, is_final=is_final) as context: if self.stuff.selfy is not None: data.hypothesis_runner = self.stuff.selfy # Generate all arguments to the test function. args = self.stuff.args kwargs = dict(self.stuff.kwargs) if example_kwargs is None: a, kw, argslices = context.prep_args_kwargs_from_strategies( (), self.stuff.given_kwargs ) assert not a, "strategies all moved to kwargs by now" else: kw = example_kwargs argslices = {} kwargs.update(kw) if expected_failure is not None: nonlocal text_repr text_repr = repr_call(test, args, kwargs) if print_example or current_verbosity() >= Verbosity.verbose: printer = RepresentationPrinter(context=context) if print_example: printer.text("Falsifying example:") else: printer.text("Trying example:") if self.print_given_args: printer.text(" ") printer.repr_call( test.__name__, args, kwargs, force_split=True, arg_slices=argslices, leading_comment=( "# " + context.data.slice_comments[(0, 0)] if (0, 0) in context.data.slice_comments else None ), ) report(printer.getvalue()) return test(*args, **kwargs) # Run the test function once, via the executor hook. # In most cases this will delegate straight to `run(data)`. result = self.test_runner(data, run) # If a failure was expected, it should have been raised already, so # instead raise an appropriate diagnostic error. if expected_failure is not None: exception, traceback = expected_failure if ( isinstance(exception, DeadlineExceeded) and self.__test_runtime is not None ): report( "Unreliable test timings! On an initial run, this " "test took %.2fms, which exceeded the deadline of " "%.2fms, but on a subsequent run it took %.2f ms, " "which did not. If you expect this sort of " "variability in your test timings, consider turning " "deadlines off for this test by setting deadline=None." % ( exception.runtime.total_seconds() * 1000, self.settings.deadline.total_seconds() * 1000, self.__test_runtime.total_seconds() * 1000, ) ) else: report("Failed to reproduce exception. Expected: \n" + traceback) > raise Flaky( f"Hypothesis {text_repr} produces unreliable results: " "Falsified on the first call but did not on a subsequent one" ) from exception E hypothesis.errors.Flaky: Hypothesis run(t=) produces unreliable results: Falsified on the first call but did not on a subsequent one E Falsifying example: run( E t=(lambda i: _TestFileObj(fileobj, stop_after=i))(0), E ) E Unreliable test timings! On an initial run, this test took 323.52ms, which exceeded the deadline of 200.00ms, but on a subsequent run it took 1.11 ms, which did not. If you expect this sort of variability in your test timings, consider turning deadlines off for this test by setting deadline=None. /usr/lib/python3.12/site-packages/hypothesis/core.py:814: Flaky ____________________ TFileTypeWAVE_3.test_test_fileobj_save ____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.24 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(151722717132164690905643475361448179082) to this test or run pytest with --hypothesis-seed=151722717132164690905643475361448179082 to reproduce this failure. __________________ TFileTypeWavPack.test_test_fileobj_delete ___________________ self = def test_test_fileobj_delete(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects( h, lambda t: o.delete(fileobj=t))) def run(t): try: o.delete(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:378: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects( > h, lambda t: o.delete(fileobj=t))) E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 7 valid examples in 1.28 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:371: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(25037334470500497389516150890803391016) to this test or run pytest with --hypothesis-seed=25037334470500497389516150890803391016 to reproduce this failure. ___________________ TFileTypeWavPack.test_test_fileobj_load ____________________ self = def test_test_fileobj_load(self): with open(self.filename, "rb") as h: @given(generate_test_file_objects(h, self.KIND)) def run(t): try: self.KIND(t) except MutagenError: pass > run() tests/test___init__.py:351: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, self.KIND)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.34 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:345: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(178197077677935556341549468350933662199) to this test or run pytest with --hypothesis-seed=178197077677935556341549468350933662199 to reproduce this failure. ___________________ TFileTypeWavPack.test_test_fileobj_save ____________________ self = def test_test_fileobj_save(self): with open(self.filename, "rb+") as h: o = self.KIND(_TestFileObj(h)) @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) def run(t): try: o.save(fileobj=t) except MutagenError: pass > run() tests/test___init__.py:364: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, lambda t: o.save(fileobj=t))) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 5 valid examples in 1.35 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:358: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(202777419869203490879825141423277277639) to this test or run pytest with --hypothesis-seed=202777419869203490879825141423277277639 to reproduce this failure. ___________________________ TFile.test_mock_fileobj ____________________________ self = def test_mock_fileobj(self): for filename in self.filenames: with open(filename, "rb") as h: @given(generate_test_file_objects(h, File)) def run(t): try: File(t) except MutagenError: pass > run() tests/test___init__.py:623: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ @given(generate_test_file_objects(h, File)) > def run(t): E hypothesis.errors.FailedHealthCheck: Data generation is extremely slow: Only produced 2 valid examples in 1.13 seconds (0 invalid ones and 0 exceeded maximum size). Try decreasing size of the data you're generating (with e.g. max_size or max_leaves parameters). E See https://hypothesis.readthedocs.io/en/latest/healthchecks.html for more information about this. If you want to disable just this health check, add HealthCheck.too_slow to the suppress_health_check settings for this test. tests/test___init__.py:617: FailedHealthCheck ----------------------------- Captured stdout call ----------------------------- You can add @seed(106769970965617324647651738863706539150) to this test or run pytest with --hypothesis-seed=106769970965617324647651738863706539150 to reproduce this failure. =========================== short test summary info ============================ FAILED tests/test___init__.py::TFileTypeAAC::test_test_fileobj_load - hypothe... FAILED tests/test___init__.py::TFileTypeAAC::test_test_fileobj_save - hypothe... FAILED tests/test___init__.py::TFileTypeAAC_2::test_test_fileobj_delete - hyp... FAILED tests/test___init__.py::TFileTypeAAC_2::test_test_fileobj_load - hypot... FAILED tests/test___init__.py::TFileTypeAC3::test_test_fileobj_load - hypothe... FAILED tests/test___init__.py::TFileTypeAC3_2::test_test_fileobj_load - hypot... FAILED tests/test___init__.py::TFileTypeAIFF::test_test_fileobj_delete - hypo... FAILED tests/test___init__.py::TFileTypeAIFF::test_test_fileobj_load - hypoth... FAILED tests/test___init__.py::TFileTypeAIFF::test_test_fileobj_save - hypoth... FAILED tests/test___init__.py::TFileTypeAIFF_2::test_test_fileobj_load - hypo... FAILED tests/test___init__.py::TFileTypeAIFF_3::test_test_fileobj_load - hypo... FAILED tests/test___init__.py::TFileTypeAIFF_4::test_test_fileobj_load - hypo... FAILED tests/test___init__.py::TFileTypeAIFF_4::test_test_fileobj_save - hypo... FAILED tests/test___init__.py::TFileTypeAIFF_5::test_test_fileobj_load - hypo... FAILED tests/test___init__.py::TFileTypeAIFF_6::test_test_fileobj_load - hypo... FAILED tests/test___init__.py::TFileTypeAPEv2File::test_test_fileobj_load - h... FAILED tests/test___init__.py::TFileTypeAPEv2File_2::test_test_fileobj_load FAILED tests/test___init__.py::TFileTypeAPEv2File_2::test_test_fileobj_save FAILED tests/test___init__.py::TFileTypeASF::test_test_fileobj_delete - hypot... FAILED tests/test___init__.py::TFileTypeASF::test_test_fileobj_load - hypothe... FAILED tests/test___init__.py::TFileTypeASF::test_test_fileobj_save - hypothe... FAILED tests/test___init__.py::TFileTypeASF_2::test_test_fileobj_delete - hyp... FAILED tests/test___init__.py::TFileTypeASF_2::test_test_fileobj_load - hypot... FAILED tests/test___init__.py::TFileTypeASF_2::test_test_fileobj_save - hypot... FAILED tests/test___init__.py::TFileTypeASF_3::test_test_fileobj_delete - hyp... FAILED tests/test___init__.py::TFileTypeASF_3::test_test_fileobj_load - hypot... FAILED tests/test___init__.py::TFileTypeASF_3::test_test_fileobj_save - hypot... FAILED tests/test___init__.py::TFileTypeDSDIFF::test_test_fileobj_load - hypo... FAILED tests/test___init__.py::TFileTypeDSDIFF_2::test_test_fileobj_load - hy... FAILED tests/test___init__.py::TFileTypeDSDIFF_3::test_test_fileobj_delete - ... FAILED tests/test___init__.py::TFileTypeDSDIFF_3::test_test_fileobj_load - hy... FAILED tests/test___init__.py::TFileTypeDSDIFF_3::test_test_fileobj_save - hy... FAILED tests/test___init__.py::TFileTypeDSF::test_test_fileobj_delete - hypot... FAILED tests/test___init__.py::TFileTypeDSF::test_test_fileobj_load - hypothe... FAILED tests/test___init__.py::TFileTypeDSF_2::test_test_fileobj_delete - hyp... FAILED tests/test___init__.py::TFileTypeDSF_2::test_test_fileobj_load - hypot... FAILED tests/test___init__.py::TFileTypeDSF_3::test_test_fileobj_delete - hyp... FAILED tests/test___init__.py::TFileTypeDSF_3::test_test_fileobj_load - hypot... FAILED tests/test___init__.py::TFileTypeDSF_3::test_test_fileobj_save - hypot... FAILED tests/test___init__.py::TFileTypeDSF_4::test_test_fileobj_delete - hyp... FAILED tests/test___init__.py::TFileTypeDSF_4::test_test_fileobj_load - hypot... FAILED tests/test___init__.py::TFileTypeFLAC::test_test_fileobj_delete - hypo... FAILED tests/test___init__.py::TFileTypeFLAC::test_test_fileobj_load - hypoth... FAILED tests/test___init__.py::TFileTypeFLAC::test_test_fileobj_save - hypoth... FAILED tests/test___init__.py::TFileTypeID3FileType::test_test_fileobj_delete FAILED tests/test___init__.py::TFileTypeID3FileType::test_test_fileobj_load FAILED tests/test___init__.py::TFileTypeID3FileType::test_test_fileobj_save FAILED tests/test___init__.py::TFileTypeID3FileType_2::test_test_fileobj_load FAILED tests/test___init__.py::TFileTypeID3FileType_3::test_test_fileobj_delete FAILED tests/test___init__.py::TFileTypeID3FileType_3::test_test_fileobj_load FAILED tests/test___init__.py::TFileTypeID3FileType_3::test_test_fileobj_save FAILED tests/test___init__.py::TFileTypeID3FileType_4::test_test_fileobj_load FAILED tests/test___init__.py::TFileTypeMP3::test_test_fileobj_delete - hypot... FAILED tests/test___init__.py::TFileTypeMP3::test_test_fileobj_load - hypothe... FAILED tests/test___init__.py::TFileTypeMP3::test_test_fileobj_save - hypothe... FAILED tests/test___init__.py::TFileTypeMP3_2::test_test_fileobj_load - hypot... FAILED tests/test___init__.py::TFileTypeMP3_3::test_test_fileobj_delete - hyp... FAILED tests/test___init__.py::TFileTypeMP3_3::test_test_fileobj_load - hypot... FAILED tests/test___init__.py::TFileTypeMP3_3::test_test_fileobj_save - hypot... FAILED tests/test___init__.py::TFileTypeMP3_4::test_test_fileobj_load - hypot... FAILED tests/test___init__.py::TFileTypeMP4::test_test_fileobj_delete - hypot... FAILED tests/test___init__.py::TFileTypeMP4::test_test_fileobj_load - hypothe... FAILED tests/test___init__.py::TFileTypeMP4::test_test_fileobj_save - hypothe... FAILED tests/test___init__.py::TFileTypeMP4_2::test_test_fileobj_load - hypot... FAILED tests/test___init__.py::TFileTypeMP4_3::test_test_fileobj_load - hypot... FAILED tests/test___init__.py::TFileTypeMP4_3::test_test_fileobj_save - hypot... FAILED tests/test___init__.py::TFileTypeMP4_4::test_test_fileobj_delete - hyp... FAILED tests/test___init__.py::TFileTypeMP4_4::test_test_fileobj_load - hypot... FAILED tests/test___init__.py::TFileTypeMP4_4::test_test_fileobj_save - hypot... FAILED tests/test___init__.py::TFileTypeMP4_5::test_test_fileobj_delete - hyp... FAILED tests/test___init__.py::TFileTypeMP4_5::test_test_fileobj_load - hypot... FAILED tests/test___init__.py::TFileTypeMP4_5::test_test_fileobj_save - hypot... FAILED tests/test___init__.py::TFileTypeMP4_6::test_test_fileobj_delete - hyp... FAILED tests/test___init__.py::TFileTypeMP4_6::test_test_fileobj_load - hypot... FAILED tests/test___init__.py::TFileTypeMP4_6::test_test_fileobj_save - hypot... FAILED tests/test___init__.py::TFileTypeMonkeysAudio::test_test_fileobj_load FAILED tests/test___init__.py::TFileTypeMonkeysAudio_2::test_test_fileobj_load FAILED tests/test___init__.py::TFileTypeMusepack::test_test_fileobj_load - hy... FAILED tests/test___init__.py::TFileTypeMusepack_2::test_test_fileobj_load - ... FAILED tests/test___init__.py::TFileTypeMusepack_3::test_test_fileobj_load - ... FAILED tests/test___init__.py::TFileTypeMusepack_4::test_test_fileobj_load - ... FAILED tests/test___init__.py::TFileTypeOggFLAC::test_test_fileobj_delete - h... FAILED tests/test___init__.py::TFileTypeOggFLAC::test_test_fileobj_load - hyp... FAILED tests/test___init__.py::TFileTypeOggFLAC::test_test_fileobj_save - hyp... FAILED tests/test___init__.py::TFileTypeOggOpus::test_test_fileobj_delete - h... FAILED tests/test___init__.py::TFileTypeOggOpus::test_test_fileobj_load - hyp... FAILED tests/test___init__.py::TFileTypeOggOpus::test_test_fileobj_save - hyp... FAILED tests/test___init__.py::TFileTypeOggSpeex::test_test_fileobj_delete - ... FAILED tests/test___init__.py::TFileTypeOggSpeex::test_test_fileobj_load - hy... FAILED tests/test___init__.py::TFileTypeOggSpeex::test_test_fileobj_save - hy... FAILED tests/test___init__.py::TFileTypeOggTheora::test_test_fileobj_delete FAILED tests/test___init__.py::TFileTypeOggTheora::test_test_fileobj_load - h... FAILED tests/test___init__.py::TFileTypeOggTheora::test_test_fileobj_save - h... FAILED tests/test___init__.py::TFileTypeOggVorbis::test_test_fileobj_delete FAILED tests/test___init__.py::TFileTypeOggVorbis::test_test_fileobj_load - h... FAILED tests/test___init__.py::TFileTypeOggVorbis::test_test_fileobj_save - h... FAILED tests/test___init__.py::TFileTypeOptimFROG::test_test_fileobj_load - h... FAILED tests/test___init__.py::TFileTypeOptimFROG_2::test_test_fileobj_load FAILED tests/test___init__.py::TFileTypeSMF::test_test_fileobj_delete - hypot... FAILED tests/test___init__.py::TFileTypeSMF::test_test_fileobj_load - hypothe... FAILED tests/test___init__.py::TFileTypeTAK::test_test_fileobj_load - hypothe... FAILED tests/test___init__.py::TFileTypeTAK_2::test_test_fileobj_delete - hyp... FAILED tests/test___init__.py::TFileTypeTAK_2::test_test_fileobj_load - hypot... FAILED tests/test___init__.py::TFileTypeTAK_2::test_test_fileobj_save - hypot... FAILED tests/test___init__.py::TFileTypeTrueAudio::test_test_fileobj_load - h... FAILED tests/test___init__.py::TFileTypeWAVE::test_test_fileobj_delete - hypo... FAILED tests/test___init__.py::TFileTypeWAVE::test_test_fileobj_load - hypoth... FAILED tests/test___init__.py::TFileTypeWAVE::test_test_fileobj_save - hypoth... FAILED tests/test___init__.py::TFileTypeWAVE_2::test_test_fileobj_load - hypo... FAILED tests/test___init__.py::TFileTypeWAVE_3::test_test_fileobj_delete - hy... FAILED tests/test___init__.py::TFileTypeWAVE_3::test_test_fileobj_load - hypo... FAILED tests/test___init__.py::TFileTypeWAVE_3::test_test_fileobj_save - hypo... FAILED tests/test___init__.py::TFileTypeWavPack::test_test_fileobj_delete - h... FAILED tests/test___init__.py::TFileTypeWavPack::test_test_fileobj_load - hyp... FAILED tests/test___init__.py::TFileTypeWavPack::test_test_fileobj_save - hyp... FAILED tests/test___init__.py::TFile::test_mock_fileobj - hypothesis.errors.F... ================ 116 failed, 3917 passed in 4584.83s (1:16:24) ================= error: Bad exit status from /var/tmp/rpm-tmp.vRXvF0 (%check) RPM build errors: Bad exit status from /var/tmp/rpm-tmp.vRXvF0 (%check) Child return code was: 1 EXCEPTION: [Error('Command failed: \n # bash --login -c /usr/bin/rpmbuild -bb --noclean --target noarch --nodeps /builddir/build/SPECS/python-mutagen.spec\n', 1)] Traceback (most recent call last): File "/usr/lib/python3.11/site-packages/mockbuild/trace_decorator.py", line 93, in trace result = func(*args, **kw) ^^^^^^^^^^^^^^^^^ File "/usr/lib/python3.11/site-packages/mockbuild/util.py", line 597, in do_with_status raise exception.Error("Command failed: \n # %s\n%s" % (command, output), child.returncode) mockbuild.exception.Error: Command failed: # bash --login -c /usr/bin/rpmbuild -bb --noclean --target noarch --nodeps /builddir/build/SPECS/python-mutagen.spec