Mock Version: 3.0 Mock Version: 3.0 Mock Version: 3.0 ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -bs --target noarch --nodeps /builddir/build/SPECS/python-zarr.spec'], chrootPath='/var/lib/mock/f38-build-side-42-init-devel-499506-25265/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -bs --target noarch --nodeps /builddir/build/SPECS/python-zarr.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1682294400 Wrote: /builddir/build/SRPMS/python-zarr-2.14.2-1.fc38.src.rpm Child return code was: 0 ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -br --target noarch --nodeps /builddir/build/SPECS/python-zarr.spec'], chrootPath='/var/lib/mock/f38-build-side-42-init-devel-499506-25265/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueraiseExc=FalseprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -br --target noarch --nodeps /builddir/build/SPECS/python-zarr.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1682294400 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.chB609 + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf zarr-2.14.2 + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/zarr-2.14.2.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd zarr-2.14.2 + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + RPM_EC=0 ++ jobs -p + exit 0 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.nMl31H + umask 022 + cd /builddir/build/BUILD + cd zarr-2.14.2 + CFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(toml) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/zarr-2.14.2/.pyproject-builddir + CFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/zarr-2.14.2/.pyproject-builddir + RPM_TOXENV=py311 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/zarr-2.14.2/pyproject-wheeldir -r Handling setuptools>=64.0.0 from build-system.requires Requirement satisfied: setuptools>=64.0.0 (installed: setuptools 65.5.1) Handling setuptools-scm from build-system.requires Requirement not satisfied: setuptools-scm Exiting dependency generation pass: build backend + rm -rfv '*.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Wrote: /builddir/build/SRPMS/python-zarr-2.14.2-1.fc38.buildreqs.nosrc.rpm Child return code was: 11 Dynamic buildrequires detected Going to install missing buildrequires. See root.log for details. ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -br --target noarch --nodeps /builddir/build/SPECS/python-zarr.spec'], chrootPath='/var/lib/mock/f38-build-side-42-init-devel-499506-25265/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueraiseExc=FalseprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -br --target noarch --nodeps /builddir/build/SPECS/python-zarr.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1682294400 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.nplrnS + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf zarr-2.14.2 + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/zarr-2.14.2.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd zarr-2.14.2 + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + RPM_EC=0 ++ jobs -p + exit 0 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.UftKx0 + umask 022 + cd /builddir/build/BUILD + cd zarr-2.14.2 + CFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(toml) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/zarr-2.14.2/.pyproject-builddir + CFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/zarr-2.14.2/.pyproject-builddir + RPM_TOXENV=py311 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/zarr-2.14.2/pyproject-wheeldir -r Handling setuptools>=64.0.0 from build-system.requires Requirement satisfied: setuptools>=64.0.0 (installed: setuptools 65.5.1) Handling setuptools-scm from build-system.requires Requirement satisfied: setuptools-scm (installed: setuptools-scm 7.1.0) /usr/lib/python3.11/site-packages/setuptools/config/pyprojecttoml.py:108: _BetaConfiguration: Support for `[tool.setuptools]` in `pyproject.toml` is still *beta*. warnings.warn(msg, _BetaConfiguration) HOOK STDOUT: running egg_info HOOK STDOUT: writing zarr.egg-info/PKG-INFO HOOK STDOUT: writing dependency_links to zarr.egg-info/dependency_links.txt HOOK STDOUT: writing requirements to zarr.egg-info/requires.txt HOOK STDOUT: writing top-level names to zarr.egg-info/top_level.txt HOOK STDOUT: reading manifest file 'zarr.egg-info/SOURCES.txt' HOOK STDOUT: adding license file 'LICENSE.txt' HOOK STDOUT: writing manifest file 'zarr.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement not satisfied: wheel Exiting dependency generation pass: get_requires_for_build_wheel + rm -rfv '*.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Wrote: /builddir/build/SRPMS/python-zarr-2.14.2-1.fc38.buildreqs.nosrc.rpm Child return code was: 11 Dynamic buildrequires detected Going to install missing buildrequires. See root.log for details. ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -br --target noarch --nodeps /builddir/build/SPECS/python-zarr.spec'], chrootPath='/var/lib/mock/f38-build-side-42-init-devel-499506-25265/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueraiseExc=FalseprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -br --target noarch --nodeps /builddir/build/SPECS/python-zarr.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1682294400 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.Qmyl9z + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf zarr-2.14.2 + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/zarr-2.14.2.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd zarr-2.14.2 + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + RPM_EC=0 ++ jobs -p + exit 0 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.25Cls6 + umask 022 + cd /builddir/build/BUILD + cd zarr-2.14.2 + CFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(toml) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/zarr-2.14.2/.pyproject-builddir + CFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/zarr-2.14.2/.pyproject-builddir + RPM_TOXENV=py311 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/zarr-2.14.2/pyproject-wheeldir -r Handling setuptools>=64.0.0 from build-system.requires Requirement satisfied: setuptools>=64.0.0 (installed: setuptools 65.5.1) Handling setuptools-scm from build-system.requires Requirement satisfied: setuptools-scm (installed: setuptools-scm 7.1.0) /usr/lib/python3.11/site-packages/setuptools/config/pyprojecttoml.py:108: _BetaConfiguration: Support for `[tool.setuptools]` in `pyproject.toml` is still *beta*. warnings.warn(msg, _BetaConfiguration) HOOK STDOUT: running egg_info HOOK STDOUT: writing zarr.egg-info/PKG-INFO HOOK STDOUT: writing dependency_links to zarr.egg-info/dependency_links.txt HOOK STDOUT: writing requirements to zarr.egg-info/requires.txt HOOK STDOUT: writing top-level names to zarr.egg-info/top_level.txt HOOK STDOUT: reading manifest file 'zarr.egg-info/SOURCES.txt' HOOK STDOUT: adding license file 'LICENSE.txt' HOOK STDOUT: writing manifest file 'zarr.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.38.4) /usr/lib/python3.11/site-packages/setuptools/config/pyprojecttoml.py:108: _BetaConfiguration: Support for `[tool.setuptools]` in `pyproject.toml` is still *beta*. warnings.warn(msg, _BetaConfiguration) HOOK STDOUT: running dist_info HOOK STDOUT: writing zarr.egg-info/PKG-INFO HOOK STDOUT: writing dependency_links to zarr.egg-info/dependency_links.txt HOOK STDOUT: writing requirements to zarr.egg-info/requires.txt HOOK STDOUT: writing top-level names to zarr.egg-info/top_level.txt HOOK STDOUT: reading manifest file 'zarr.egg-info/SOURCES.txt' HOOK STDOUT: adding license file 'LICENSE.txt' HOOK STDOUT: writing manifest file 'zarr.egg-info/SOURCES.txt' HOOK STDOUT: creating '/builddir/build/BUILD/zarr-2.14.2/zarr-2.14.2.dist-info' Handling asciitree from hook generated metadata: Requires-Dist Requirement not satisfied: asciitree Handling numpy (>=1.20) from hook generated metadata: Requires-Dist Requirement satisfied: numpy (>=1.20) (installed: numpy 1.22.0) Handling fasteners from hook generated metadata: Requires-Dist Requirement not satisfied: fasteners Handling numcodecs (>=0.10.0) from hook generated metadata: Requires-Dist Requirement not satisfied: numcodecs (>=0.10.0) Handling notebook ; extra == 'jupyter' from hook generated metadata: Requires-Dist Ignoring alien requirement: notebook ; extra == 'jupyter' Handling ipytree (>=0.2.2) ; extra == 'jupyter' from hook generated metadata: Requires-Dist Ignoring alien requirement: ipytree (>=0.2.2) ; extra == 'jupyter' Handling ipywidgets (>=8.0.0) ; extra == 'jupyter' from hook generated metadata: Requires-Dist Ignoring alien requirement: ipywidgets (>=8.0.0) ; extra == 'jupyter' + rm -rfv zarr-2.14.2.dist-info/ removed 'zarr-2.14.2.dist-info/LICENSE.txt' removed 'zarr-2.14.2.dist-info/top_level.txt' removed 'zarr-2.14.2.dist-info/METADATA' removed directory 'zarr-2.14.2.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Wrote: /builddir/build/SRPMS/python-zarr-2.14.2-1.fc38.buildreqs.nosrc.rpm Child return code was: 11 Dynamic buildrequires detected Going to install missing buildrequires. See root.log for details. ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -br --target noarch --nodeps /builddir/build/SPECS/python-zarr.spec'], chrootPath='/var/lib/mock/f38-build-side-42-init-devel-499506-25265/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueraiseExc=FalseprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -br --target noarch --nodeps /builddir/build/SPECS/python-zarr.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1682294400 Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.z2J0h7 + umask 022 + cd /builddir/build/BUILD + cd /builddir/build/BUILD + rm -rf zarr-2.14.2 + /usr/lib/rpm/rpmuncompress -x /builddir/build/SOURCES/zarr-2.14.2.tar.gz + STATUS=0 + '[' 0 -ne 0 ']' + cd zarr-2.14.2 + /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . + RPM_EC=0 ++ jobs -p + exit 0 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.CH9b9z + umask 022 + cd /builddir/build/BUILD + cd zarr-2.14.2 + CFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(toml) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/zarr-2.14.2/.pyproject-builddir + CFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/zarr-2.14.2/.pyproject-builddir + RPM_TOXENV=py311 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/zarr-2.14.2/pyproject-wheeldir -r Handling setuptools>=64.0.0 from build-system.requires Requirement satisfied: setuptools>=64.0.0 (installed: setuptools 65.5.1) Handling setuptools-scm from build-system.requires Requirement satisfied: setuptools-scm (installed: setuptools-scm 7.1.0) /usr/lib/python3.11/site-packages/setuptools/config/pyprojecttoml.py:108: _BetaConfiguration: Support for `[tool.setuptools]` in `pyproject.toml` is still *beta*. warnings.warn(msg, _BetaConfiguration) HOOK STDOUT: running egg_info HOOK STDOUT: writing zarr.egg-info/PKG-INFO HOOK STDOUT: writing dependency_links to zarr.egg-info/dependency_links.txt HOOK STDOUT: writing requirements to zarr.egg-info/requires.txt HOOK STDOUT: writing top-level names to zarr.egg-info/top_level.txt HOOK STDOUT: reading manifest file 'zarr.egg-info/SOURCES.txt' HOOK STDOUT: adding license file 'LICENSE.txt' HOOK STDOUT: writing manifest file 'zarr.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.38.4) /usr/lib/python3.11/site-packages/setuptools/config/pyprojecttoml.py:108: _BetaConfiguration: Support for `[tool.setuptools]` in `pyproject.toml` is still *beta*. warnings.warn(msg, _BetaConfiguration) HOOK STDOUT: running dist_info HOOK STDOUT: writing zarr.egg-info/PKG-INFO HOOK STDOUT: writing dependency_links to zarr.egg-info/dependency_links.txt HOOK STDOUT: writing requirements to zarr.egg-info/requires.txt HOOK STDOUT: writing top-level names to zarr.egg-info/top_level.txt HOOK STDOUT: reading manifest file 'zarr.egg-info/SOURCES.txt' HOOK STDOUT: adding license file 'LICENSE.txt' HOOK STDOUT: writing manifest file 'zarr.egg-info/SOURCES.txt' HOOK STDOUT: creating '/builddir/build/BUILD/zarr-2.14.2/zarr-2.14.2.dist-info' Handling asciitree from hook generated metadata: Requires-Dist Requirement satisfied: asciitree (installed: asciitree 0.3.3) Handling numpy (>=1.20) from hook generated metadata: Requires-Dist Requirement satisfied: numpy (>=1.20) (installed: numpy 1.22.0) Handling fasteners from hook generated metadata: Requires-Dist Requirement satisfied: fasteners (installed: fasteners 0.18) Handling numcodecs (>=0.10.0) from hook generated metadata: Requires-Dist Requirement satisfied: numcodecs (>=0.10.0) (installed: numcodecs 0.11.0) Handling notebook ; extra == 'jupyter' from hook generated metadata: Requires-Dist Ignoring alien requirement: notebook ; extra == 'jupyter' Handling ipytree (>=0.2.2) ; extra == 'jupyter' from hook generated metadata: Requires-Dist Ignoring alien requirement: ipytree (>=0.2.2) ; extra == 'jupyter' Handling ipywidgets (>=8.0.0) ; extra == 'jupyter' from hook generated metadata: Requires-Dist Ignoring alien requirement: ipywidgets (>=8.0.0) ; extra == 'jupyter' + rm -rfv zarr-2.14.2.dist-info/ removed 'zarr-2.14.2.dist-info/LICENSE.txt' removed 'zarr-2.14.2.dist-info/top_level.txt' removed 'zarr-2.14.2.dist-info/METADATA' removed directory 'zarr-2.14.2.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Wrote: /builddir/build/SRPMS/python-zarr-2.14.2-1.fc38.buildreqs.nosrc.rpm Child return code was: 11 Dynamic buildrequires detected Going to install missing buildrequires. See root.log for details. ENTER ['do_with_status'](['bash', '--login', '-c', '/usr/bin/rpmbuild -ba --noprep --target noarch --nodeps /builddir/build/SPECS/python-zarr.spec'], chrootPath='/var/lib/mock/f38-build-side-42-init-devel-499506-25265/root'env={'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'}shell=Falselogger=timeout=864000uid=996gid=135user='mockbuild'nspawn_args=[]unshare_net=TrueprintOutput=False) Executing command: ['bash', '--login', '-c', '/usr/bin/rpmbuild -ba --noprep --target noarch --nodeps /builddir/build/SPECS/python-zarr.spec'] with env {'TERM': 'vt100', 'SHELL': '/bin/bash', 'HOME': '/builddir', 'HOSTNAME': 'mock', 'PATH': '/usr/bin:/bin:/usr/sbin:/sbin', 'PROMPT_COMMAND': 'printf "\\033]0;\\007"', 'PS1': ' \\s-\\v\\$ ', 'LANG': 'C.UTF-8'} and shell False Building target platforms: noarch Building for target noarch setting SOURCE_DATE_EPOCH=1682294400 Executing(%generate_buildrequires): /bin/sh -e /var/tmp/rpm-tmp.ncTLAg + umask 022 + cd /builddir/build/BUILD + cd zarr-2.14.2 + CFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + echo pyproject-rpm-macros + echo python3-devel + echo 'python3dist(pip) >= 19' + echo 'python3dist(packaging)' + '[' -f pyproject.toml ']' + echo '(python3dist(toml) if python3-devel < 3.11)' + rm -rfv '*.dist-info/' + '[' -f /usr/bin/python3 ']' + mkdir -p /builddir/build/BUILD/zarr-2.14.2/.pyproject-builddir + CFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/zarr-2.14.2/.pyproject-builddir + RPM_TOXENV=py311 + HOSTNAME=rpmbuild + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_buildrequires.py --generate-extras --python3_pkgversion 3 --wheeldir /builddir/build/BUILD/zarr-2.14.2/pyproject-wheeldir -r Handling setuptools>=64.0.0 from build-system.requires Requirement satisfied: setuptools>=64.0.0 (installed: setuptools 65.5.1) Handling setuptools-scm from build-system.requires Requirement satisfied: setuptools-scm (installed: setuptools-scm 7.1.0) /usr/lib/python3.11/site-packages/setuptools/config/pyprojecttoml.py:108: _BetaConfiguration: Support for `[tool.setuptools]` in `pyproject.toml` is still *beta*. warnings.warn(msg, _BetaConfiguration) HOOK STDOUT: running egg_info HOOK STDOUT: writing zarr.egg-info/PKG-INFO HOOK STDOUT: writing dependency_links to zarr.egg-info/dependency_links.txt HOOK STDOUT: writing requirements to zarr.egg-info/requires.txt HOOK STDOUT: writing top-level names to zarr.egg-info/top_level.txt HOOK STDOUT: reading manifest file 'zarr.egg-info/SOURCES.txt' HOOK STDOUT: adding license file 'LICENSE.txt' HOOK STDOUT: writing manifest file 'zarr.egg-info/SOURCES.txt' Handling wheel from get_requires_for_build_wheel Requirement satisfied: wheel (installed: wheel 0.38.4) /usr/lib/python3.11/site-packages/setuptools/config/pyprojecttoml.py:108: _BetaConfiguration: Support for `[tool.setuptools]` in `pyproject.toml` is still *beta*. warnings.warn(msg, _BetaConfiguration) HOOK STDOUT: running dist_info HOOK STDOUT: writing zarr.egg-info/PKG-INFO HOOK STDOUT: writing dependency_links to zarr.egg-info/dependency_links.txt HOOK STDOUT: writing requirements to zarr.egg-info/requires.txt HOOK STDOUT: writing top-level names to zarr.egg-info/top_level.txt HOOK STDOUT: reading manifest file 'zarr.egg-info/SOURCES.txt' HOOK STDOUT: adding license file 'LICENSE.txt' HOOK STDOUT: writing manifest file 'zarr.egg-info/SOURCES.txt' HOOK STDOUT: creating '/builddir/build/BUILD/zarr-2.14.2/zarr-2.14.2.dist-info' Handling asciitree from hook generated metadata: Requires-Dist Requirement satisfied: asciitree (installed: asciitree 0.3.3) Handling numpy (>=1.20) from hook generated metadata: Requires-Dist Requirement satisfied: numpy (>=1.20) (installed: numpy 1.22.0) Handling fasteners from hook generated metadata: Requires-Dist Requirement satisfied: fasteners (installed: fasteners 0.18) Handling numcodecs (>=0.10.0) from hook generated metadata: Requires-Dist Requirement satisfied: numcodecs (>=0.10.0) (installed: numcodecs 0.11.0) Handling notebook ; extra == 'jupyter' from hook generated metadata: Requires-Dist Ignoring alien requirement: notebook ; extra == 'jupyter' Handling ipytree (>=0.2.2) ; extra == 'jupyter' from hook generated metadata: Requires-Dist Ignoring alien requirement: ipytree (>=0.2.2) ; extra == 'jupyter' Handling ipywidgets (>=8.0.0) ; extra == 'jupyter' from hook generated metadata: Requires-Dist Ignoring alien requirement: ipywidgets (>=8.0.0) ; extra == 'jupyter' + rm -rfv zarr-2.14.2.dist-info/ removed 'zarr-2.14.2.dist-info/LICENSE.txt' removed 'zarr-2.14.2.dist-info/top_level.txt' removed 'zarr-2.14.2.dist-info/METADATA' removed directory 'zarr-2.14.2.dist-info/' + RPM_EC=0 ++ jobs -p + exit 0 Executing(%build): /bin/sh -e /var/tmp/rpm-tmp.x30uKH + umask 022 + cd /builddir/build/BUILD + CFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd zarr-2.14.2 + mkdir -p /builddir/build/BUILD/zarr-2.14.2/.pyproject-builddir + CFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + TMPDIR=/builddir/build/BUILD/zarr-2.14.2/.pyproject-builddir + /usr/bin/python3 -Bs /usr/lib/rpm/redhat/pyproject_wheel.py /builddir/build/BUILD/zarr-2.14.2/pyproject-wheeldir Processing /builddir/build/BUILD/zarr-2.14.2 Preparing metadata (pyproject.toml): started Running command Preparing metadata (pyproject.toml) /usr/lib/python3.11/site-packages/setuptools/config/pyprojecttoml.py:108: _BetaConfiguration: Support for `[tool.setuptools]` in `pyproject.toml` is still *beta*. warnings.warn(msg, _BetaConfiguration) running dist_info creating /builddir/build/BUILD/zarr-2.14.2/.pyproject-builddir/pip-modern-metadata-_12p9c3e/zarr.egg-info writing /builddir/build/BUILD/zarr-2.14.2/.pyproject-builddir/pip-modern-metadata-_12p9c3e/zarr.egg-info/PKG-INFO writing dependency_links to /builddir/build/BUILD/zarr-2.14.2/.pyproject-builddir/pip-modern-metadata-_12p9c3e/zarr.egg-info/dependency_links.txt writing requirements to /builddir/build/BUILD/zarr-2.14.2/.pyproject-builddir/pip-modern-metadata-_12p9c3e/zarr.egg-info/requires.txt writing top-level names to /builddir/build/BUILD/zarr-2.14.2/.pyproject-builddir/pip-modern-metadata-_12p9c3e/zarr.egg-info/top_level.txt writing manifest file '/builddir/build/BUILD/zarr-2.14.2/.pyproject-builddir/pip-modern-metadata-_12p9c3e/zarr.egg-info/SOURCES.txt' reading manifest file '/builddir/build/BUILD/zarr-2.14.2/.pyproject-builddir/pip-modern-metadata-_12p9c3e/zarr.egg-info/SOURCES.txt' adding license file 'LICENSE.txt' writing manifest file '/builddir/build/BUILD/zarr-2.14.2/.pyproject-builddir/pip-modern-metadata-_12p9c3e/zarr.egg-info/SOURCES.txt' creating '/builddir/build/BUILD/zarr-2.14.2/.pyproject-builddir/pip-modern-metadata-_12p9c3e/zarr-2.14.2.dist-info' Preparing metadata (pyproject.toml): finished with status 'done' Building wheels for collected packages: zarr Building wheel for zarr (pyproject.toml): started Running command Building wheel for zarr (pyproject.toml) /usr/lib/python3.11/site-packages/setuptools/config/pyprojecttoml.py:108: _BetaConfiguration: Support for `[tool.setuptools]` in `pyproject.toml` is still *beta*. warnings.warn(msg, _BetaConfiguration) running bdist_wheel running build running build_py creating build creating build/lib creating build/lib/zarr copying zarr/codecs.py -> build/lib/zarr copying zarr/convenience.py -> build/lib/zarr copying zarr/meta_v1.py -> build/lib/zarr copying zarr/hierarchy.py -> build/lib/zarr copying zarr/attrs.py -> build/lib/zarr copying zarr/version.py -> build/lib/zarr copying zarr/errors.py -> build/lib/zarr copying zarr/sync.py -> build/lib/zarr copying zarr/creation.py -> build/lib/zarr copying zarr/util.py -> build/lib/zarr copying zarr/indexing.py -> build/lib/zarr copying zarr/storage.py -> build/lib/zarr copying zarr/core.py -> build/lib/zarr copying zarr/meta.py -> build/lib/zarr copying zarr/n5.py -> build/lib/zarr copying zarr/__init__.py -> build/lib/zarr creating build/lib/zarr/_storage copying zarr/_storage/absstore.py -> build/lib/zarr/_storage copying zarr/_storage/v3.py -> build/lib/zarr/_storage copying zarr/_storage/store.py -> build/lib/zarr/_storage copying zarr/_storage/v3_storage_transformers.py -> build/lib/zarr/_storage copying zarr/_storage/__init__.py -> build/lib/zarr/_storage creating build/lib/zarr/tests copying zarr/tests/test_dim_separator.py -> build/lib/zarr/tests copying zarr/tests/test_convenience.py -> build/lib/zarr/tests copying zarr/tests/test_attrs.py -> build/lib/zarr/tests copying zarr/tests/test_util.py -> build/lib/zarr/tests copying zarr/tests/test_sync.py -> build/lib/zarr/tests copying zarr/tests/test_meta_array.py -> build/lib/zarr/tests copying zarr/tests/test_creation.py -> build/lib/zarr/tests copying zarr/tests/test_n5.py -> build/lib/zarr/tests copying zarr/tests/test_core.py -> build/lib/zarr/tests copying zarr/tests/test_storage.py -> build/lib/zarr/tests copying zarr/tests/util.py -> build/lib/zarr/tests copying zarr/tests/test_indexing.py -> build/lib/zarr/tests copying zarr/tests/test_storage_v3.py -> build/lib/zarr/tests copying zarr/tests/test_info.py -> build/lib/zarr/tests copying zarr/tests/conftest.py -> build/lib/zarr/tests copying zarr/tests/__init__.py -> build/lib/zarr/tests copying zarr/tests/test_meta.py -> build/lib/zarr/tests copying zarr/tests/test_filters.py -> build/lib/zarr/tests copying zarr/tests/test_hierarchy.py -> build/lib/zarr/tests running egg_info writing zarr.egg-info/PKG-INFO writing dependency_links to zarr.egg-info/dependency_links.txt writing requirements to zarr.egg-info/requires.txt writing top-level names to zarr.egg-info/top_level.txt reading manifest file 'zarr.egg-info/SOURCES.txt' adding license file 'LICENSE.txt' writing manifest file 'zarr.egg-info/SOURCES.txt' /usr/lib/python3.11/site-packages/setuptools/command/build_py.py:202: SetuptoolsDeprecationWarning: Installing 'zarr.tests.data' as data is deprecated, please list it in `packages`. !! ############################ # Package would be ignored # ############################ Python recognizes 'zarr.tests.data' as an importable package, but it is not listed in the `packages` configuration of setuptools. 'zarr.tests.data' has been automatically added to the distribution only because it may contain data files, but this behavior is likely to change in future versions of setuptools (and therefore is considered deprecated). Please make sure that 'zarr.tests.data' is included as a package by using the `packages` configuration field or the proper discovery methods (for example by using `find_namespace_packages(...)`/`find_namespace:` instead of `find_packages(...)`/`find:`). You can read more about "package discovery" and "data files" on setuptools documentation page. !! check.warn(importable) /usr/lib/python3.11/site-packages/setuptools/command/build_py.py:202: SetuptoolsDeprecationWarning: Installing 'zarr.tests.data.store' as data is deprecated, please list it in `packages`. !! ############################ # Package would be ignored # ############################ Python recognizes 'zarr.tests.data.store' as an importable package, but it is not listed in the `packages` configuration of setuptools. 'zarr.tests.data.store' has been automatically added to the distribution only because it may contain data files, but this behavior is likely to change in future versions of setuptools (and therefore is considered deprecated). Please make sure that 'zarr.tests.data.store' is included as a package by using the `packages` configuration field or the proper discovery methods (for example by using `find_namespace_packages(...)`/`find_namespace:` instead of `find_packages(...)`/`find:`). You can read more about "package discovery" and "data files" on setuptools documentation page. !! check.warn(importable) creating build/lib/zarr/tests/data copying zarr/tests/data/store.zip -> build/lib/zarr/tests/data creating build/lib/zarr/tests/data/store copying zarr/tests/data/store/foo -> build/lib/zarr/tests/data/store installing to build/bdist.linux-riscv64/wheel running install running install_lib creating build/bdist.linux-riscv64 creating build/bdist.linux-riscv64/wheel creating build/bdist.linux-riscv64/wheel/zarr copying build/lib/zarr/codecs.py -> build/bdist.linux-riscv64/wheel/zarr copying build/lib/zarr/convenience.py -> build/bdist.linux-riscv64/wheel/zarr copying build/lib/zarr/meta_v1.py -> build/bdist.linux-riscv64/wheel/zarr copying build/lib/zarr/hierarchy.py -> build/bdist.linux-riscv64/wheel/zarr copying build/lib/zarr/attrs.py -> build/bdist.linux-riscv64/wheel/zarr copying build/lib/zarr/version.py -> build/bdist.linux-riscv64/wheel/zarr copying build/lib/zarr/errors.py -> build/bdist.linux-riscv64/wheel/zarr copying build/lib/zarr/sync.py -> build/bdist.linux-riscv64/wheel/zarr copying build/lib/zarr/creation.py -> build/bdist.linux-riscv64/wheel/zarr copying build/lib/zarr/util.py -> build/bdist.linux-riscv64/wheel/zarr copying build/lib/zarr/indexing.py -> build/bdist.linux-riscv64/wheel/zarr copying build/lib/zarr/storage.py -> build/bdist.linux-riscv64/wheel/zarr copying build/lib/zarr/core.py -> build/bdist.linux-riscv64/wheel/zarr creating build/bdist.linux-riscv64/wheel/zarr/tests copying build/lib/zarr/tests/test_dim_separator.py -> build/bdist.linux-riscv64/wheel/zarr/tests copying build/lib/zarr/tests/test_convenience.py -> build/bdist.linux-riscv64/wheel/zarr/tests copying build/lib/zarr/tests/test_attrs.py -> build/bdist.linux-riscv64/wheel/zarr/tests copying build/lib/zarr/tests/test_util.py -> build/bdist.linux-riscv64/wheel/zarr/tests copying build/lib/zarr/tests/test_sync.py -> build/bdist.linux-riscv64/wheel/zarr/tests copying build/lib/zarr/tests/test_meta_array.py -> build/bdist.linux-riscv64/wheel/zarr/tests copying build/lib/zarr/tests/test_creation.py -> build/bdist.linux-riscv64/wheel/zarr/tests copying build/lib/zarr/tests/test_n5.py -> build/bdist.linux-riscv64/wheel/zarr/tests copying build/lib/zarr/tests/test_core.py -> build/bdist.linux-riscv64/wheel/zarr/tests copying build/lib/zarr/tests/test_storage.py -> build/bdist.linux-riscv64/wheel/zarr/tests copying build/lib/zarr/tests/util.py -> build/bdist.linux-riscv64/wheel/zarr/tests creating build/bdist.linux-riscv64/wheel/zarr/tests/data creating build/bdist.linux-riscv64/wheel/zarr/tests/data/store copying build/lib/zarr/tests/data/store/foo -> build/bdist.linux-riscv64/wheel/zarr/tests/data/store copying build/lib/zarr/tests/data/store.zip -> build/bdist.linux-riscv64/wheel/zarr/tests/data copying build/lib/zarr/tests/test_indexing.py -> build/bdist.linux-riscv64/wheel/zarr/tests copying build/lib/zarr/tests/test_storage_v3.py -> build/bdist.linux-riscv64/wheel/zarr/tests copying build/lib/zarr/tests/test_info.py -> build/bdist.linux-riscv64/wheel/zarr/tests copying build/lib/zarr/tests/conftest.py -> build/bdist.linux-riscv64/wheel/zarr/tests copying build/lib/zarr/tests/__init__.py -> build/bdist.linux-riscv64/wheel/zarr/tests copying build/lib/zarr/tests/test_meta.py -> build/bdist.linux-riscv64/wheel/zarr/tests copying build/lib/zarr/tests/test_filters.py -> build/bdist.linux-riscv64/wheel/zarr/tests copying build/lib/zarr/tests/test_hierarchy.py -> build/bdist.linux-riscv64/wheel/zarr/tests copying build/lib/zarr/meta.py -> build/bdist.linux-riscv64/wheel/zarr copying build/lib/zarr/n5.py -> build/bdist.linux-riscv64/wheel/zarr copying build/lib/zarr/__init__.py -> build/bdist.linux-riscv64/wheel/zarr creating build/bdist.linux-riscv64/wheel/zarr/_storage copying build/lib/zarr/_storage/absstore.py -> build/bdist.linux-riscv64/wheel/zarr/_storage copying build/lib/zarr/_storage/v3.py -> build/bdist.linux-riscv64/wheel/zarr/_storage copying build/lib/zarr/_storage/store.py -> build/bdist.linux-riscv64/wheel/zarr/_storage copying build/lib/zarr/_storage/v3_storage_transformers.py -> build/bdist.linux-riscv64/wheel/zarr/_storage copying build/lib/zarr/_storage/__init__.py -> build/bdist.linux-riscv64/wheel/zarr/_storage running install_egg_info Copying zarr.egg-info to build/bdist.linux-riscv64/wheel/zarr-2.14.2-py3.11.egg-info running install_scripts creating build/bdist.linux-riscv64/wheel/zarr-2.14.2.dist-info/WHEEL creating '/builddir/build/BUILD/zarr-2.14.2/.pyproject-builddir/pip-wheel-2lgy2o2j/tmptyaaw27o/zarr-2.14.2-py3-none-any.whl' and adding 'build/bdist.linux-riscv64/wheel' to it adding 'zarr/__init__.py' adding 'zarr/attrs.py' adding 'zarr/codecs.py' adding 'zarr/convenience.py' adding 'zarr/core.py' adding 'zarr/creation.py' adding 'zarr/errors.py' adding 'zarr/hierarchy.py' adding 'zarr/indexing.py' adding 'zarr/meta.py' adding 'zarr/meta_v1.py' adding 'zarr/n5.py' adding 'zarr/storage.py' adding 'zarr/sync.py' adding 'zarr/util.py' adding 'zarr/version.py' adding 'zarr/_storage/__init__.py' adding 'zarr/_storage/absstore.py' adding 'zarr/_storage/store.py' adding 'zarr/_storage/v3.py' adding 'zarr/_storage/v3_storage_transformers.py' adding 'zarr/tests/__init__.py' adding 'zarr/tests/conftest.py' adding 'zarr/tests/test_attrs.py' adding 'zarr/tests/test_convenience.py' adding 'zarr/tests/test_core.py' adding 'zarr/tests/test_creation.py' adding 'zarr/tests/test_dim_separator.py' adding 'zarr/tests/test_filters.py' adding 'zarr/tests/test_hierarchy.py' adding 'zarr/tests/test_indexing.py' adding 'zarr/tests/test_info.py' adding 'zarr/tests/test_meta.py' adding 'zarr/tests/test_meta_array.py' adding 'zarr/tests/test_n5.py' adding 'zarr/tests/test_storage.py' adding 'zarr/tests/test_storage_v3.py' adding 'zarr/tests/test_sync.py' adding 'zarr/tests/test_util.py' adding 'zarr/tests/util.py' adding 'zarr/tests/data/store.zip' adding 'zarr/tests/data/store/foo' adding 'zarr-2.14.2.dist-info/LICENSE.txt' adding 'zarr-2.14.2.dist-info/METADATA' adding 'zarr-2.14.2.dist-info/WHEEL' adding 'zarr-2.14.2.dist-info/top_level.txt' adding 'zarr-2.14.2.dist-info/RECORD' removing build/bdist.linux-riscv64/wheel Building wheel for zarr (pyproject.toml): finished with status 'done' Created wheel for zarr: filename=zarr-2.14.2-py3-none-any.whl size=203324 sha256=f906ce050f4f956343eef78abd9c71ff52ec16bbe2829c5d24d8fd278669bbe4 Stored in directory: /builddir/.cache/pip/wheels/04/87/e3/40f09746dbc04583d6f6da9a3d3b167c0b4af58c85a7c41835 Successfully built zarr + PYTHONPATH=/builddir/build/BUILD/zarr-2.14.2 + sphinx-build-3 docs html Running Sphinx v5.3.0 making output directory... done [autosummary] generating autosummary for: acknowledgments.rst, api.rst, api/attrs.rst, api/codecs.rst, api/convenience.rst, api/core.rst, api/creation.rst, api/hierarchy.rst, api/n5.rst, api/storage.rst, ..., getting_started.rst, index.rst, installation.rst, license.rst, release.rst, spec.rst, spec/v1.rst, spec/v2.rst, spec/v3.rst, tutorial.rst loading intersphinx inventory from https://numpy.org/doc/stable/objects.inv... loading intersphinx inventory from https://docs.python.org/objects.inv... WARNING: failed to reach any of the inventories with the following issues: intersphinx inventory 'https://docs.python.org/objects.inv' not fetchable due to : HTTPSConnectionPool(host='docs.python.org', port=443): Max retries exceeded with url: /objects.inv (Caused by NewConnectionError(': Failed to establish a new connection: [Errno -3] Temporary failure in name resolution')) WARNING: failed to reach any of the inventories with the following issues: intersphinx inventory 'https://numpy.org/doc/stable/objects.inv' not fetchable due to : HTTPSConnectionPool(host='numpy.org', port=443): Max retries exceeded with url: /doc/stable/objects.inv (Caused by NewConnectionError(': Failed to establish a new connection: [Errno -3] Temporary failure in name resolution')) building [mo]: targets for 0 po files that are out of date building [html]: targets for 22 source files that are out of date updating environment: [new config] 22 added, 0 changed, 0 removed reading sources... [ 4%] acknowledgments reading sources... [ 9%] api reading sources... [ 13%] api/attrs reading sources... [ 18%] api/codecs reading sources... [ 22%] api/convenience reading sources... [ 27%] api/core reading sources... [ 31%] api/creation reading sources... [ 36%] api/hierarchy reading sources... [ 40%] api/n5 reading sources... [ 45%] api/storage reading sources... [ 50%] api/sync reading sources... [ 54%] contributing reading sources... [ 59%] getting_started reading sources... [ 63%] index reading sources... [ 68%] installation reading sources... [ 72%] license reading sources... [ 77%] release reading sources... [ 81%] spec reading sources... [ 86%] spec/v1 reading sources... [ 90%] spec/v2 reading sources... [ 95%] spec/v3 reading sources... [100%] tutorial /builddir/build/BUILD/zarr-2.14.2/docs/release.rst:53: ERROR: Unexpected indentation. /builddir/build/BUILD/zarr-2.14.2/docs/release.rst:57: WARNING: Block quote ends without a blank line; unexpected unindent. looking for now-outdated files... none found pickling environment... done checking consistency... done preparing documents... done writing output... [ 4%] acknowledgments writing output... [ 9%] api writing output... [ 13%] api/attrs writing output... [ 18%] api/codecs writing output... [ 22%] api/convenience writing output... [ 27%] api/core writing output... [ 31%] api/creation writing output... [ 36%] api/hierarchy writing output... [ 40%] api/n5 writing output... [ 45%] api/storage writing output... [ 50%] api/sync writing output... [ 54%] contributing writing output... [ 59%] getting_started writing output... [ 63%] index writing output... [ 68%] installation writing output... [ 72%] license writing output... [ 77%] release writing output... [ 81%] spec writing output... [ 86%] spec/v1 writing output... [ 90%] spec/v2 writing output... [ 95%] spec/v3 writing output... [100%] tutorial /builddir/build/BUILD/zarr-2.14.2/docs/index.rst:62: WARNING: more than one target found for 'any' cross-reference 'tutorial': could be :doc:`To the Tutorial` or :std:ref:`To the Tutorial` /builddir/build/BUILD/zarr-2.14.2/docs/spec/v2.rst:130: WARNING: undefined label: 'numpy:arrays.interface' /builddir/build/BUILD/zarr-2.14.2/docs/spec/v2.rst:148: WARNING: undefined label: 'numpy:arrays.dtypes.dateunits' generating indices... /builddir/build/BUILD/zarr-2.14.2/docs/spec/v2.rst:154: WARNING: undefined label: 'numpy:arrays.interface' genindex py-modindex done highlighting module code... [ 11%] zarr._storage.absstore highlighting module code... [ 22%] zarr.attrs highlighting module code... [ 33%] zarr.convenience highlighting module code... [ 44%] zarr.core highlighting module code... [ 55%] zarr.creation highlighting module code... [ 66%] zarr.hierarchy highlighting module code... [ 77%] zarr.n5 highlighting module code... [ 88%] zarr.storage highlighting module code... [100%] zarr.sync writing additional pages... search done copying images... [ 25%] _static/index_getting_started.svg copying images... [ 50%] _static/index_user_guide.svg copying images... [ 75%] _static/index_api.svg copying images... [100%] _static/index_contribute.svg copying static files... done copying extra files... done dumping search index in English (code: en)... done dumping object inventory... done build succeeded, 8 warnings. The HTML pages are in html. + rm -rf html/.doctrees html/.buildinfo html/._static/donotdelete + RPM_EC=0 ++ jobs -p + exit 0 Executing(%install): /bin/sh -e /var/tmp/rpm-tmp.MqtrK1 + umask 022 + cd /builddir/build/BUILD + '[' /builddir/build/BUILDROOT/python-zarr-2.14.2-1.fc38.noarch '!=' / ']' + rm -rf /builddir/build/BUILDROOT/python-zarr-2.14.2-1.fc38.noarch ++ dirname /builddir/build/BUILDROOT/python-zarr-2.14.2-1.fc38.noarch + mkdir -p /builddir/build/BUILDROOT + mkdir /builddir/build/BUILDROOT/python-zarr-2.14.2-1.fc38.noarch + CFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd zarr-2.14.2 ++ xargs basename --multiple ++ ls /builddir/build/BUILD/zarr-2.14.2/pyproject-wheeldir/zarr-2.14.2-py3-none-any.whl ++ sed -E 's/([^-]+)-([^-]+)-.+\.whl/\1==\2/' + specifier=zarr==2.14.2 + TMPDIR=/builddir/build/BUILD/zarr-2.14.2/.pyproject-builddir + /usr/bin/python3 -m pip install --root /builddir/build/BUILDROOT/python-zarr-2.14.2-1.fc38.noarch --prefix /usr --no-deps --disable-pip-version-check --progress-bar off --verbose --ignore-installed --no-warn-script-location --no-index --no-cache-dir --find-links /builddir/build/BUILD/zarr-2.14.2/pyproject-wheeldir zarr==2.14.2 Using pip 22.3.1 from /usr/lib/python3.11/site-packages/pip (python 3.11) Looking in links: /builddir/build/BUILD/zarr-2.14.2/pyproject-wheeldir Processing ./pyproject-wheeldir/zarr-2.14.2-py3-none-any.whl Installing collected packages: zarr Successfully installed zarr-2.14.2 + '[' -d /builddir/build/BUILDROOT/python-zarr-2.14.2-1.fc38.noarch/usr/bin ']' + rm -f /builddir/build/BUILD/python-zarr-2.14.2-1.fc38.noarch-pyproject-ghost-distinfo + site_dirs=() + '[' -d /builddir/build/BUILDROOT/python-zarr-2.14.2-1.fc38.noarch/usr/lib/python3.11/site-packages ']' + site_dirs+=("/usr/lib/python3.11/site-packages") + '[' /builddir/build/BUILDROOT/python-zarr-2.14.2-1.fc38.noarch/usr/lib64/python3.11/site-packages '!=' /builddir/build/BUILDROOT/python-zarr-2.14.2-1.fc38.noarch/usr/lib/python3.11/site-packages ']' + '[' -d /builddir/build/BUILDROOT/python-zarr-2.14.2-1.fc38.noarch/usr/lib64/python3.11/site-packages ']' + for site_dir in ${site_dirs[@]} + for distinfo in /builddir/build/BUILDROOT/python-zarr-2.14.2-1.fc38.noarch$site_dir/*.dist-info + echo '%ghost /usr/lib/python3.11/site-packages/zarr-2.14.2.dist-info' + sed -i s/pip/rpm/ /builddir/build/BUILDROOT/python-zarr-2.14.2-1.fc38.noarch/usr/lib/python3.11/site-packages/zarr-2.14.2.dist-info/INSTALLER + PYTHONPATH=/usr/lib/rpm/redhat + /usr/bin/python3 -B /usr/lib/rpm/redhat/pyproject_preprocess_record.py --buildroot /builddir/build/BUILDROOT/python-zarr-2.14.2-1.fc38.noarch --record /builddir/build/BUILDROOT/python-zarr-2.14.2-1.fc38.noarch/usr/lib/python3.11/site-packages/zarr-2.14.2.dist-info/RECORD --output /builddir/build/BUILD/python-zarr-2.14.2-1.fc38.noarch-pyproject-record + rm -fv /builddir/build/BUILDROOT/python-zarr-2.14.2-1.fc38.noarch/usr/lib/python3.11/site-packages/zarr-2.14.2.dist-info/RECORD removed '/builddir/build/BUILDROOT/python-zarr-2.14.2-1.fc38.noarch/usr/lib/python3.11/site-packages/zarr-2.14.2.dist-info/RECORD' + rm -fv /builddir/build/BUILDROOT/python-zarr-2.14.2-1.fc38.noarch/usr/lib/python3.11/site-packages/zarr-2.14.2.dist-info/REQUESTED removed '/builddir/build/BUILDROOT/python-zarr-2.14.2-1.fc38.noarch/usr/lib/python3.11/site-packages/zarr-2.14.2.dist-info/REQUESTED' ++ cut -f1 '-d ' ++ wc -l /builddir/build/BUILD/python-zarr-2.14.2-1.fc38.noarch-pyproject-ghost-distinfo + lines=1 + '[' 1 -ne 1 ']' + /usr/bin/python3 /usr/lib/rpm/redhat/pyproject_save_files.py --output-files /builddir/build/BUILD/python-zarr-2.14.2-1.fc38.noarch-pyproject-files --output-modules /builddir/build/BUILD/python-zarr-2.14.2-1.fc38.noarch-pyproject-modules --buildroot /builddir/build/BUILDROOT/python-zarr-2.14.2-1.fc38.noarch --sitelib /usr/lib/python3.11/site-packages --sitearch /usr/lib64/python3.11/site-packages --python-version 3.11 --pyproject-record /builddir/build/BUILD/python-zarr-2.14.2-1.fc38.noarch-pyproject-record --prefix /usr zarr + /usr/bin/find-debuginfo -j8 --strict-build-id -m -i --build-id-seed 2.14.2-1.fc38 --unique-debug-suffix -2.14.2-1.fc38.noarch --unique-debug-src-base python-zarr-2.14.2-1.fc38.noarch --run-dwz --dwz-low-mem-die-limit 10000000 --dwz-max-die-limit 50000000 -S debugsourcefiles.list /builddir/build/BUILD/zarr-2.14.2 find: 'debug': No such file or directory + /usr/lib/rpm/check-buildroot + /usr/lib/rpm/redhat/brp-ldconfig + /usr/lib/rpm/brp-compress + /usr/lib/rpm/redhat/brp-strip-lto /usr/bin/strip + /usr/lib/rpm/brp-strip-static-archive /usr/bin/strip + /usr/lib/rpm/check-rpaths + /usr/lib/rpm/redhat/brp-mangle-shebangs + /usr/lib/rpm/brp-remove-la-files + env /usr/lib/rpm/redhat/brp-python-bytecompile '' 1 0 -j8 Bytecompiling .py files below /builddir/build/BUILDROOT/python-zarr-2.14.2-1.fc38.noarch/usr/lib/python3.11 using python3.11 + /usr/lib/rpm/redhat/brp-python-hardlink Executing(%check): /bin/sh -e /var/tmp/rpm-tmp.CV7YdI + umask 022 + cd /builddir/build/BUILD + CFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + export CFLAGS + CXXFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + export CXXFLAGS + FFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FFLAGS + FCFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer -I/usr/lib/gfortran/modules ' + export FCFLAGS + VALAFLAGS=-g + export VALAFLAGS + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + export LDFLAGS + LT_SYS_LIBRARY_PATH=/usr/lib: + export LT_SYS_LIBRARY_PATH + CC=gcc + export CC + CXX=g++ + export CXX + cd zarr-2.14.2 + CFLAGS='-O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-U_FORTIFY_SOURCE,-D_FORTIFY_SOURCE=3 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -fasynchronous-unwind-tables -fstack-clash-protection -fno-omit-frame-pointer ' + LDFLAGS='-Wl,-z,relro -Wl,--as-needed -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -Wl,--build-id=sha1 ' + PATH=/builddir/build/BUILDROOT/python-zarr-2.14.2-1.fc38.noarch/usr/bin:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/sbin + PYTHONPATH=/builddir/build/BUILDROOT/python-zarr-2.14.2-1.fc38.noarch/usr/lib64/python3.11/site-packages:/builddir/build/BUILDROOT/python-zarr-2.14.2-1.fc38.noarch/usr/lib/python3.11/site-packages + PYTHONDONTWRITEBYTECODE=1 + PYTEST_ADDOPTS=' --ignore=/builddir/build/BUILD/zarr-2.14.2/.pyproject-builddir' + PYTEST_XDIST_AUTO_NUM_WORKERS=8 + /usr/bin/pytest -ra ============================= test session starts ============================== platform linux -- Python 3.11.2, pytest-7.2.2, pluggy-1.0.0 rootdir: /builddir/build/BUILD/zarr-2.14.2, configfile: pyproject.toml collected 5114 items zarr/tests/test_attrs.py .................... [ 0%] zarr/tests/test_convenience.py .....ss...................ssssssss.s..... [ 1%] .......................................................sssssssssssssssss [ 2%] ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss [ 4%] ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss [ 5%] sssssssssssssssssss [ 5%] zarr/tests/test_core.py ................................................ [ 6%] ........................................................................ [ 8%] .....................................................................sss [ 9%] ssssssssssssssssssssssssssssssssssssssssssss............................ [ 10%] ........................................................................ [ 12%] ........................................................................ [ 13%] .................................................................FFFFFFF [ 15%] FFFFFFFF.FFFFFFFFF.FFFFFFFFFFFFFF.FFFFFFFFFFFFFFFFFFFFF.FFFFFFFFF.FFFFFF [ 16%] FFFFFFFFFFFFFFF......................................................... [ 17%] ........................................................................ [ 19%] ........................................................................ [ 20%] ........................................................................ [ 22%] ........................................................................ [ 23%] ........................................................................ [ 25%] ........................................................................ [ 26%] ........................................................................ [ 27%] ......s................................................................. [ 29%] ...............................s...sssssssssssssssssssssssssssssssssssss [ 30%] ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss [ 32%] ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss [ 33%] ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss [ 34%] ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss [ 36%] ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss [ 37%] ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss [ 39%] ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss [ 40%] ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss [ 41%] ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss [ 43%] ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss [ 44%] ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss [ 46%] ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss [ 47%] ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss [ 48%] ssssssssssssssssssssssssssssssssssssssssssssssssss. [ 49%] zarr/tests/test_creation.py ............................................ [ 50%] .......................................ss [ 51%] zarr/tests/test_dim_separator.py ...x...........x...........x.........x. [ 52%] ...x....x [ 52%] zarr/tests/test_filters.py ....... [ 52%] zarr/tests/test_hierarchy.py ..........................sssssssssssssssss [ 53%] sssssss........................ssssssssssssssssssssssss................. [ 54%] .......sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss [ 56%] sssssss.................................................ssssssssssssssss [ 57%] sssssssss..........................ssssssssssssssssssssssssss........... [ 59%] .............ssssssssssssssssssssssss........................sssssssssss [ 60%] sssssssssssss........................ssssssssssssssssssssssssFFFFFFFFFFF [ 61%] FFFFFFFFFFFFFssssssssssssssssssssssss........................sssssssssss [ 63%] sssssssssssss.........................sssssssssssssssssssssssss......... [ 64%] ...............ssssssssssssssssssssssss.......s. [ 65%] zarr/tests/test_indexing.py ....................................xx...... [ 66%] [ 66%] zarr/tests/test_info.py .. [ 66%] zarr/tests/test_meta.py ................... [ 67%] zarr/tests/test_meta_array.py .sss.sss.sss.sss.sss.sss.sss.sss.sss.sss.s [ 67%] ss.sss [ 67%] zarr/tests/test_n5.py ..... [ 68%] zarr/tests/test_storage.py ....................s........................ [ 68%] .s.........................s............................................ [ 70%] ..................................ss..............s..................... [ 71%] ........................................................................ [ 73%] ....................................ss.................................. [ 74%] ..............................s......................................... [ 75%] ........................................................................ [ 77%] ....................................................FFFFFFFFFFFFFFFFFFFF [ 78%] FFFFF....................................................sssssssssssssss [ 80%] sssssssssssssssssssssssssssssssssss..............s.................sssss [ 81%] ssssssssssssssssssssss....... [ 82%] zarr/tests/test_storage_v3.py ssssssssssssssssssssssssssssssssssssssssss [ 82%] ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss [ 84%] ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss [ 85%] ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss [ 87%] ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss [ 88%] ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss [ 90%] ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss [ 91%] ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss [ 92%] ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss [ 94%] zarr/tests/test_sync.py ................................................ [ 95%] ........................................................................ [ 96%] ........................................................................ [ 97%] ........................................................................ [ 99%] ................... [ 99%] zarr/tests/test_util.py ................. [100%] =================================== FAILURES =================================== ___________________ TestArrayWithLMDBStore.test_0len_dim_1d ____________________ self = def test_0len_dim_1d(self): # Test behaviour for 1D array with zero-length dimension. > z = self.create_array(shape=0, fill_value=0) zarr/tests/test_core.py:892: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpquiw30cj.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpquiw30cj.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ___________________ TestArrayWithLMDBStore.test_0len_dim_2d ____________________ self = def test_0len_dim_2d(self): # Test behavioud for 2D array with a zero-length dimension. > z = self.create_array(shape=(10, 0), fill_value=0) zarr/tests/test_core.py:927: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpr1qbc6bf.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpr1qbc6bf.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ____________________ TestArrayWithLMDBStore.test_append_1d _____________________ self = def test_append_1d(self): a = np.arange(105) > z = self.create_array(shape=a.shape, chunks=10, dtype=a.dtype) zarr/tests/test_core.py:712: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmplvj9eu4z.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmplvj9eu4z.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ____________________ TestArrayWithLMDBStore.test_append_2d _____________________ self = def test_append_2d(self): a = np.arange(105*105, dtype='i4').reshape((105, 105)) > z = self.create_array(shape=a.shape, chunks=(10, 10), dtype=a.dtype) zarr/tests/test_core.py:741: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpoq07xgu0.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpoq07xgu0.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError __________________ TestArrayWithLMDBStore.test_append_2d_axis __________________ self = def test_append_2d_axis(self): a = np.arange(105*105, dtype='i4').reshape((105, 105)) > z = self.create_array(shape=a.shape, chunks=(10, 10), dtype=a.dtype) zarr/tests/test_core.py:763: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpbata8tn6.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpbata8tn6.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _________________ TestArrayWithLMDBStore.test_append_bad_shape _________________ self = def test_append_bad_shape(self): a = np.arange(100) > z = self.create_array(shape=a.shape, chunks=10, dtype=a.dtype) zarr/tests/test_core.py:782: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpaq1jg8c4.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpaq1jg8c4.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _____________________ TestArrayWithLMDBStore.test_array_0d _____________________ self = def test_array_0d(self): # test behaviour for array with 0 dimensions # setup a = np.zeros(()) > z = self.create_array(shape=(), dtype=a.dtype, fill_value=0, write_empty_chunks=False) zarr/tests/test_core.py:968: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp6nfl33fk.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp6nfl33fk.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _____________________ TestArrayWithLMDBStore.test_array_1d _____________________ self = def test_array_1d(self): a = np.arange(1050) > z = self.create_array(shape=a.shape, chunks=100, dtype=a.dtype) zarr/tests/test_core.py:205: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp__a7ct7g.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp__a7ct7g.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _______________ TestArrayWithLMDBStore.test_array_1d_fill_value ________________ self = def test_array_1d_fill_value(self): for fill_value in -1, 0, 1, 10: a = np.arange(1050) f = np.empty_like(a) f.fill(fill_value) > z = self.create_array(shape=a.shape, chunks=100, dtype=a.dtype, fill_value=fill_value) zarr/tests/test_core.py:291: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpj98rr6a9.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpj98rr6a9.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _______________ TestArrayWithLMDBStore.test_array_1d_selections ________________ self = def test_array_1d_selections(self): # light test here, full tests in test_indexing # setup a = np.arange(1050) > z = self.create_array(shape=a.shape, chunks=100, dtype=a.dtype) zarr/tests/test_core.py:325: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp6dqij_xz.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp6dqij_xz.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _______________ TestArrayWithLMDBStore.test_array_1d_set_scalar ________________ self = def test_array_1d_set_scalar(self): # test setting the contents of an array with a scalar value # setup a = np.zeros(100) > z = self.create_array(shape=a.shape, chunks=10, dtype=a.dtype) zarr/tests/test_core.py:306: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpdg0vqdxt.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpdg0vqdxt.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _____________________ TestArrayWithLMDBStore.test_array_2d _____________________ self = def test_array_2d(self): a = np.arange(10000).reshape((1000, 10)) > z = self.create_array(shape=a.shape, chunks=(100, 2), dtype=a.dtype) zarr/tests/test_core.py:367: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp6t6021vs.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp6t6021vs.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ________________ TestArrayWithLMDBStore.test_array_2d_edge_case ________________ self = def test_array_2d_edge_case(self): # this fails with filters - chunks extend beyond edge of array, messes with delta # filter if no fill value? shape = 1000, 10 chunks = 300, 30 dtype = 'i8' > z = self.create_array(shape=shape, dtype=dtype, chunks=chunks) zarr/tests/test_core.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpdcnwc5cs.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpdcnwc5cs.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _________________ TestArrayWithLMDBStore.test_array_2d_partial _________________ self = def test_array_2d_partial(self): > z = self.create_array(shape=(1000, 10), chunks=(100, 2), dtype='i4', fill_value=0) zarr/tests/test_core.py:489: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp40x1fj64.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp40x1fj64.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ________________ TestArrayWithLMDBStore.test_array_dtype_shape _________________ self = def test_array_dtype_shape(self): dt = "(2, 2)f4" # setup some data d = np.array([((0, 1), (1, 2)), ((1, 2), (2, 3)), ((2, 3), (3, 4))], dtype=dt) for a in (d, d[:0]): for fill_value in None, 0: > z = self.create_array(shape=a.shape[:-2], chunks=2, dtype=dt, fill_value=fill_value) zarr/tests/test_core.py:1051: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpcz6guxae.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpcz6guxae.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ___________________ TestArrayWithLMDBStore.test_array_order ____________________ self = def test_array_order(self): # 1D a = np.arange(1050) for order in 'C', 'F': > z = self.create_array(shape=a.shape, chunks=100, dtype=a.dtype, order=order) zarr/tests/test_core.py:535: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpg501j2hh.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpg501j2hh.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ____________________ TestArrayWithLMDBStore.test_attributes ____________________ self = def test_attributes(self): > a = self.create_array(shape=10, chunks=10, dtype='i8') zarr/tests/test_core.py:1539: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpb_y5vyls.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpb_y5vyls.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ___________________ TestArrayWithLMDBStore.test_compressors ____________________ self = def test_compressors(self): compressors = [ None, BZ2(), Blosc(), LZ4(), Zlib(), GZip() ] if LZMA: compressors.append(LZMA()) for compressor in compressors: > a = self.create_array(shape=1000, chunks=100, compressor=compressor) zarr/tests/test_core.py:1519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpjyeympis.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpjyeympis.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ______________________ TestArrayWithLMDBStore.test_dtypes ______________________ self = def test_dtypes(self): # integers for dtype in 'u1', 'u2', 'u4', 'u8', 'i1', 'i2', 'i4', 'i8': > z = self.create_array(shape=10, chunks=3, dtype=dtype) zarr/tests/test_core.py:1129: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpm3gqldoy.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpm3gqldoy.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ______________________ TestArrayWithLMDBStore.test_endian ______________________ self = def test_endian(self): dtype = np.dtype('float32') > a1 = self.create_array(shape=1000, chunks=100, dtype=dtype.newbyteorder('<')) zarr/tests/test_core.py:1528: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpurdb5xb0.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpurdb5xb0.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ____________________ TestArrayWithLMDBStore.test_hexdigest _____________________ self = def test_hexdigest(self): found = [] # Check basic 1-D array > z = self.create_array(shape=(1050,), chunks=100, dtype=' path = '/tmp/tmpf5bkmdw2.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpf5bkmdw2.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ______________________ TestArrayWithLMDBStore.test_islice ______________________ self = def test_islice(self): params = ( ((1,), (1,), 0, 1), ((2,), (1,), 0, 1), ((1,), (2,), 0, 1), ((3,), (3,), 1, 2), ((1000,), (100,), 150, 1050), ((100,), (1000,), 25, 75), ((1, 100), (1, 1), 0, 1), ((100, 1), (3, 1), 56, 100), ((100, 100), (10, 10), 13, 99), ((10, 10, 10), (3, 3, 3), 2, 4), ) for shape, chunks, start, end in params: > z = self.create_array(shape=shape, chunks=chunks, dtype=int) zarr/tests/test_core.py:1502: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpzxgbvp4k.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpzxgbvp4k.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _______________________ TestArrayWithLMDBStore.test_iter _______________________ self = def test_iter(self): params = ( ((1,), (1,)), ((2,), (1,)), ((1,), (2,)), ((3,), (3,)), ((1000,), (100,)), ((100,), (1000,)), ((1, 100), (1, 1)), ((1, 0), (1, 1)), ((0, 1), (1, 1)), ((0, 1), (2, 1)), ((100, 1), (3, 1)), ((100, 100), (10, 10)), ((10, 10, 10), (3, 3, 3)), ) for shape, chunks in params: > z = self.create_array(shape=shape, chunks=chunks, dtype=int) zarr/tests/test_core.py:1481: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmprhbanwuf.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmprhbanwuf.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _______________ TestArrayWithLMDBStore.test_iteration_exceptions _______________ self = def test_iteration_exceptions(self): # zero d array a = np.array(1, dtype=int) > z = self.create_array(shape=a.shape, dtype=int) zarr/tests/test_core.py:1433: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpb83557ka.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpb83557ka.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _______________ TestArrayWithLMDBStore.test_nchunks_initialized ________________ self = def test_nchunks_initialized(self): for fill_value in (0, 1.0, np.nan): if isinstance(fill_value, int): dtype = 'int' else: dtype = 'float' > z = self.create_array(shape=100, chunks=10, fill_value=fill_value, dtype=dtype, write_empty_chunks=True) zarr/tests/test_core.py:1019: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpgl9tqbz1.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpgl9tqbz1.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ____________________ TestArrayWithLMDBStore.test_np_ufuncs _____________________ self = def test_np_ufuncs(self): > z = self.create_array(shape=(100, 100), chunks=(10, 10)) zarr/tests/test_core.py:855: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpod_8i1j3.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpod_8i1j3.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError __________________ TestArrayWithLMDBStore.test_object_arrays ___________________ self = def test_object_arrays(self): # an object_codec is required for object arrays with pytest.raises(ValueError): > self.create_array(shape=10, chunks=3, dtype=object) zarr/tests/test_core.py:1191: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpadktes8e.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpadktes8e.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _______________ TestArrayWithLMDBStore.test_object_arrays_danger _______________ self = def test_object_arrays_danger(self): # do something dangerous - manually force an object array with no object codec > z = self.create_array(shape=5, chunks=2, dtype=object, fill_value=0, object_codec=MsgPack()) zarr/tests/test_core.py:1362: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpbof9ldin.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpbof9ldin.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _____________ TestArrayWithLMDBStore.test_object_arrays_vlen_array _____________ self = def test_object_arrays_vlen_array(self): data = np.array([np.array([1, 3, 7]), np.array([5]), np.array([2, 8, 12])] * 1000, dtype=object) def compare_arrays(expected, actual, item_dtype): assert isinstance(actual, np.ndarray) assert actual.dtype == object assert actual.shape == expected.shape for ev, av in zip(expected.flat, actual.flat): assert isinstance(av, np.ndarray) assert_array_equal(ev, av) assert av.dtype == item_dtype codecs = VLenArray(int), VLenArray(' z = self.create_array(shape=data.shape, dtype=object, object_codec=codec) zarr/tests/test_core.py:1340: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpxe8hp_m_.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpxe8hp_m_.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _____________ TestArrayWithLMDBStore.test_object_arrays_vlen_bytes _____________ self = def test_object_arrays_vlen_bytes(self): greetings_bytes = [g.encode('utf8') for g in greetings] data = np.array(greetings_bytes * 1000, dtype=object) > z = self.create_array(shape=data.shape, dtype=object, object_codec=VLenBytes()) zarr/tests/test_core.py:1297: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp0p7kkmbe.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp0p7kkmbe.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _____________ TestArrayWithLMDBStore.test_object_arrays_vlen_text ______________ self = def test_object_arrays_vlen_text(self): data = np.array(greetings * 1000, dtype=object) > z = self.create_array(shape=data.shape, dtype=object, object_codec=VLenUTF8()) zarr/tests/test_core.py:1250: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpr6yc4j1b.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpr6yc4j1b.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ______________ TestArrayWithLMDBStore.test_object_codec_warnings _______________ self = def test_object_codec_warnings(self): with pytest.warns(UserWarning): # provide object_codec, but not object dtype > z = self.create_array(shape=10, chunks=5, dtype="i4", object_codec=JSON()) zarr/tests/test_core.py:1389: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpgg3_hnqf.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpgg3_hnqf.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ______________________ TestArrayWithLMDBStore.test_pickle ______________________ self = def test_pickle(self): # setup array > z = self.create_array(shape=1000, chunks=100, dtype=int, cache_metadata=False, cache_attrs=False) zarr/tests/test_core.py:821: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp1wsftf9s.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp1wsftf9s.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ____________________ TestArrayWithLMDBStore.test_read_only _____________________ self = def test_read_only(self): > z = self.create_array(shape=1000, chunks=100) zarr/tests/test_core.py:791: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp5ux_jv5v.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp5ux_jv5v.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ____________________ TestArrayWithLMDBStore.test_resize_1d _____________________ self = def test_resize_1d(self): > z = self.create_array(shape=105, chunks=10, dtype='i4', fill_value=0) zarr/tests/test_core.py:619: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp6wux__ti.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp6wux__ti.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ____________________ TestArrayWithLMDBStore.test_resize_2d _____________________ self = def test_resize_2d(self): > z = self.create_array(shape=(105, 105), chunks=(10, 10), dtype='i4', fill_value=0) zarr/tests/test_core.py:656: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmphm05nht2.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmphm05nht2.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _____________ TestArrayWithLMDBStore.test_setitem_data_not_shared ______________ self = def test_setitem_data_not_shared(self): # check that data don't end up being shared with another array # https://github.com/alimanfoo/zarr/issues/79 > z = self.create_array(shape=20, chunks=10, dtype='i4') zarr/tests/test_core.py:566: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp_s0ldhn1.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp_s0ldhn1.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _____________ TestArrayWithLMDBStore.test_store_has_binary_values ______________ self = def test_store_has_binary_values(self): # Initialize array np.random.seed(42) > z = self.create_array(shape=(1050,), chunks=100, dtype='f8', compressor=[]) zarr/tests/test_core.py:144: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpr0t38in7.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpr0t38in7.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _______________ TestArrayWithLMDBStore.test_store_has_text_keys ________________ self = def test_store_has_text_keys(self): # Initialize array np.random.seed(42) > z = self.create_array(shape=(1050,), chunks=100, dtype='f8', compressor=[]) zarr/tests/test_core.py:130: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp94p40a5k.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp94p40a5k.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _________________ TestArrayWithLMDBStore.test_structured_array _________________ self = def test_structured_array(self): d = np.array([(b'aaa', 1, 4.2), (b'bbb', 2, 8.4), (b'ccc', 3, 12.6)], dtype=[('foo', 'S3'), ('bar', 'i4'), ('baz', 'f8')]) fill_values = None, b'', (b'zzz', 42, 16.8) > self.check_structured_array(d, fill_values) zarr/tests/test_core.py:1106: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:1062: in check_structured_array z = self.create_array(shape=a.shape, chunks=2, dtype=a.dtype, fill_value=fill_value) zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpk9w73bav.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpk9w73bav.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _________ TestArrayWithLMDBStore.test_structured_array_contain_object __________ self = @unittest.skipIf(parse_version(np.__version__) < parse_version('1.14.0'), "unsupported numpy version") def test_structured_array_contain_object(self): if "PartialRead" in self.__class__.__name__: pytest.skip("partial reads of object arrays not supported") # ----------- creation -------------- structured_dtype = [('c_obj', object), ('c_int', int)] a = np.array([(b'aaa', 1), (b'bbb', 2)], dtype=structured_dtype) # zarr-array with structured dtype require object codec with pytest.raises(ValueError): > self.create_array(shape=a.shape, dtype=structured_dtype) zarr/tests/test_core.py:1407: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp6o3te8re.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp6o3te8re.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _____________ TestArrayWithLMDBStore.test_structured_array_nested ______________ self = def test_structured_array_nested(self): d = np.array([(0, (0, ((0, 1), (1, 2), (2, 3)), 0), b'aaa'), (1, (1, ((1, 2), (2, 3), (3, 4)), 1), b'bbb'), (2, (2, ((2, 3), (3, 4), (4, 5)), 2), b'ccc')], dtype=[('foo', 'i8'), ('bar', [('foo', 'i4'), ('bar', '(3, 2)f4'), ('baz', 'u1')]), ('baz', 'S3')]) fill_values = None, b'', (0, (0, ((0, 0), (1, 1), (2, 2)), 0), b'zzz') > self.check_structured_array(d, fill_values) zarr/tests/test_core.py:1123: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:1062: in check_structured_array z = self.create_array(shape=a.shape, chunks=2, dtype=a.dtype, fill_value=fill_value) zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpuit6griz.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpuit6griz.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ____________ TestArrayWithLMDBStore.test_structured_array_subshapes ____________ self = def test_structured_array_subshapes(self): d = np.array([(0, ((0, 1, 2), (1, 2, 3)), b'aaa'), (1, ((1, 2, 3), (2, 3, 4)), b'bbb'), (2, ((2, 3, 4), (3, 4, 5)), b'ccc')], dtype=[('foo', 'i8'), ('bar', '(2, 3)f4'), ('baz', 'S3')]) fill_values = None, b'', (0, ((0, 0, 0), (1, 1, 1)), b'zzz') > self.check_structured_array(d, fill_values) zarr/tests/test_core.py:1114: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:1062: in check_structured_array z = self.create_array(shape=a.shape, chunks=2, dtype=a.dtype, fill_value=fill_value) zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpthlw4gc_.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpthlw4gc_.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ______________ TestArrayWithLMDBStore.test_structured_with_object ______________ self = def test_structured_with_object(self): > a = self.create_array(fill_value=(0.0, None), shape=10, chunks=10, dtype=[('x', float), ('y', object)], object_codec=Pickle()) zarr/tests/test_core.py:1559: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2108: in create_array store = LMDBStore(path, buffers=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmppq4nmqe9.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmppq4nmqe9.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _______________ TestArrayWithLMDBStoreNoBuffers.test_0len_dim_1d _______________ self = def test_0len_dim_1d(self): # Test behaviour for 1D array with zero-length dimension. > z = self.create_array(shape=0, fill_value=0) zarr/tests/test_core.py:892: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpjma133k6.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpjma133k6.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _______________ TestArrayWithLMDBStoreNoBuffers.test_0len_dim_2d _______________ self = def test_0len_dim_2d(self): # Test behavioud for 2D array with a zero-length dimension. > z = self.create_array(shape=(10, 0), fill_value=0) zarr/tests/test_core.py:927: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpgpfqoj73.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpgpfqoj73.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ________________ TestArrayWithLMDBStoreNoBuffers.test_append_1d ________________ self = def test_append_1d(self): a = np.arange(105) > z = self.create_array(shape=a.shape, chunks=10, dtype=a.dtype) zarr/tests/test_core.py:712: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpbcnjf8by.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpbcnjf8by.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ________________ TestArrayWithLMDBStoreNoBuffers.test_append_2d ________________ self = def test_append_2d(self): a = np.arange(105*105, dtype='i4').reshape((105, 105)) > z = self.create_array(shape=a.shape, chunks=(10, 10), dtype=a.dtype) zarr/tests/test_core.py:741: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmphbdb8hmf.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmphbdb8hmf.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _____________ TestArrayWithLMDBStoreNoBuffers.test_append_2d_axis ______________ self = def test_append_2d_axis(self): a = np.arange(105*105, dtype='i4').reshape((105, 105)) > z = self.create_array(shape=a.shape, chunks=(10, 10), dtype=a.dtype) zarr/tests/test_core.py:763: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp9dlu6e9_.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp9dlu6e9_.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ____________ TestArrayWithLMDBStoreNoBuffers.test_append_bad_shape _____________ self = def test_append_bad_shape(self): a = np.arange(100) > z = self.create_array(shape=a.shape, chunks=10, dtype=a.dtype) zarr/tests/test_core.py:782: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpjcfakfh2.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpjcfakfh2.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ________________ TestArrayWithLMDBStoreNoBuffers.test_array_0d _________________ self = def test_array_0d(self): # test behaviour for array with 0 dimensions # setup a = np.zeros(()) > z = self.create_array(shape=(), dtype=a.dtype, fill_value=0, write_empty_chunks=False) zarr/tests/test_core.py:968: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpwa7hnuyi.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpwa7hnuyi.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ________________ TestArrayWithLMDBStoreNoBuffers.test_array_1d _________________ self = def test_array_1d(self): a = np.arange(1050) > z = self.create_array(shape=a.shape, chunks=100, dtype=a.dtype) zarr/tests/test_core.py:205: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpmepe6yv2.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpmepe6yv2.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ___________ TestArrayWithLMDBStoreNoBuffers.test_array_1d_fill_value ___________ self = def test_array_1d_fill_value(self): for fill_value in -1, 0, 1, 10: a = np.arange(1050) f = np.empty_like(a) f.fill(fill_value) > z = self.create_array(shape=a.shape, chunks=100, dtype=a.dtype, fill_value=fill_value) zarr/tests/test_core.py:291: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp6x3rhjhi.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp6x3rhjhi.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ___________ TestArrayWithLMDBStoreNoBuffers.test_array_1d_selections ___________ self = def test_array_1d_selections(self): # light test here, full tests in test_indexing # setup a = np.arange(1050) > z = self.create_array(shape=a.shape, chunks=100, dtype=a.dtype) zarr/tests/test_core.py:325: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpk43iopu4.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpk43iopu4.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ___________ TestArrayWithLMDBStoreNoBuffers.test_array_1d_set_scalar ___________ self = def test_array_1d_set_scalar(self): # test setting the contents of an array with a scalar value # setup a = np.zeros(100) > z = self.create_array(shape=a.shape, chunks=10, dtype=a.dtype) zarr/tests/test_core.py:306: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp7otzdzje.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp7otzdzje.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ________________ TestArrayWithLMDBStoreNoBuffers.test_array_2d _________________ self = def test_array_2d(self): a = np.arange(10000).reshape((1000, 10)) > z = self.create_array(shape=a.shape, chunks=(100, 2), dtype=a.dtype) zarr/tests/test_core.py:367: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmptoh2zvnk.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmptoh2zvnk.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ___________ TestArrayWithLMDBStoreNoBuffers.test_array_2d_edge_case ____________ self = def test_array_2d_edge_case(self): # this fails with filters - chunks extend beyond edge of array, messes with delta # filter if no fill value? shape = 1000, 10 chunks = 300, 30 dtype = 'i8' > z = self.create_array(shape=shape, dtype=dtype, chunks=chunks) zarr/tests/test_core.py:480: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp3ro9_yu2.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp3ro9_yu2.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ____________ TestArrayWithLMDBStoreNoBuffers.test_array_2d_partial _____________ self = def test_array_2d_partial(self): > z = self.create_array(shape=(1000, 10), chunks=(100, 2), dtype='i4', fill_value=0) zarr/tests/test_core.py:489: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmplxc1q7lp.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmplxc1q7lp.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ____________ TestArrayWithLMDBStoreNoBuffers.test_array_dtype_shape ____________ self = def test_array_dtype_shape(self): dt = "(2, 2)f4" # setup some data d = np.array([((0, 1), (1, 2)), ((1, 2), (2, 3)), ((2, 3), (3, 4))], dtype=dt) for a in (d, d[:0]): for fill_value in None, 0: > z = self.create_array(shape=a.shape[:-2], chunks=2, dtype=dt, fill_value=fill_value) zarr/tests/test_core.py:1051: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp4_dzkheb.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp4_dzkheb.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _______________ TestArrayWithLMDBStoreNoBuffers.test_array_order _______________ self = def test_array_order(self): # 1D a = np.arange(1050) for order in 'C', 'F': > z = self.create_array(shape=a.shape, chunks=100, dtype=a.dtype, order=order) zarr/tests/test_core.py:535: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp7dkcq6l7.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp7dkcq6l7.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _______________ TestArrayWithLMDBStoreNoBuffers.test_attributes ________________ self = def test_attributes(self): > a = self.create_array(shape=10, chunks=10, dtype='i8') zarr/tests/test_core.py:1539: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpq2fqn80_.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpq2fqn80_.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _______________ TestArrayWithLMDBStoreNoBuffers.test_compressors _______________ self = def test_compressors(self): compressors = [ None, BZ2(), Blosc(), LZ4(), Zlib(), GZip() ] if LZMA: compressors.append(LZMA()) for compressor in compressors: > a = self.create_array(shape=1000, chunks=100, compressor=compressor) zarr/tests/test_core.py:1519: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp84us_s48.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp84us_s48.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _________________ TestArrayWithLMDBStoreNoBuffers.test_dtypes __________________ self = def test_dtypes(self): # integers for dtype in 'u1', 'u2', 'u4', 'u8', 'i1', 'i2', 'i4', 'i8': > z = self.create_array(shape=10, chunks=3, dtype=dtype) zarr/tests/test_core.py:1129: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpozhozyx8.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpozhozyx8.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _________________ TestArrayWithLMDBStoreNoBuffers.test_endian __________________ self = def test_endian(self): dtype = np.dtype('float32') > a1 = self.create_array(shape=1000, chunks=100, dtype=dtype.newbyteorder('<')) zarr/tests/test_core.py:1528: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpfe2_4rng.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpfe2_4rng.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ________________ TestArrayWithLMDBStoreNoBuffers.test_hexdigest ________________ self = def test_hexdigest(self): found = [] # Check basic 1-D array > z = self.create_array(shape=(1050,), chunks=100, dtype=' path = '/tmp/tmp2lwbyjed.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp2lwbyjed.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _________________ TestArrayWithLMDBStoreNoBuffers.test_islice __________________ self = def test_islice(self): params = ( ((1,), (1,), 0, 1), ((2,), (1,), 0, 1), ((1,), (2,), 0, 1), ((3,), (3,), 1, 2), ((1000,), (100,), 150, 1050), ((100,), (1000,), 25, 75), ((1, 100), (1, 1), 0, 1), ((100, 1), (3, 1), 56, 100), ((100, 100), (10, 10), 13, 99), ((10, 10, 10), (3, 3, 3), 2, 4), ) for shape, chunks, start, end in params: > z = self.create_array(shape=shape, chunks=chunks, dtype=int) zarr/tests/test_core.py:1502: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmps0ocxp94.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmps0ocxp94.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError __________________ TestArrayWithLMDBStoreNoBuffers.test_iter ___________________ self = def test_iter(self): params = ( ((1,), (1,)), ((2,), (1,)), ((1,), (2,)), ((3,), (3,)), ((1000,), (100,)), ((100,), (1000,)), ((1, 100), (1, 1)), ((1, 0), (1, 1)), ((0, 1), (1, 1)), ((0, 1), (2, 1)), ((100, 1), (3, 1)), ((100, 100), (10, 10)), ((10, 10, 10), (3, 3, 3)), ) for shape, chunks in params: > z = self.create_array(shape=shape, chunks=chunks, dtype=int) zarr/tests/test_core.py:1481: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpky5n_6fe.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpky5n_6fe.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError __________ TestArrayWithLMDBStoreNoBuffers.test_iteration_exceptions ___________ self = def test_iteration_exceptions(self): # zero d array a = np.array(1, dtype=int) > z = self.create_array(shape=a.shape, dtype=int) zarr/tests/test_core.py:1433: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpy87mjjdm.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpy87mjjdm.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ___________ TestArrayWithLMDBStoreNoBuffers.test_nchunks_initialized ___________ self = def test_nchunks_initialized(self): for fill_value in (0, 1.0, np.nan): if isinstance(fill_value, int): dtype = 'int' else: dtype = 'float' > z = self.create_array(shape=100, chunks=10, fill_value=fill_value, dtype=dtype, write_empty_chunks=True) zarr/tests/test_core.py:1019: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpqc41ylos.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpqc41ylos.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ________________ TestArrayWithLMDBStoreNoBuffers.test_np_ufuncs ________________ self = def test_np_ufuncs(self): > z = self.create_array(shape=(100, 100), chunks=(10, 10)) zarr/tests/test_core.py:855: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp_s_xgy51.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp_s_xgy51.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ______________ TestArrayWithLMDBStoreNoBuffers.test_object_arrays ______________ self = def test_object_arrays(self): # an object_codec is required for object arrays with pytest.raises(ValueError): > self.create_array(shape=10, chunks=3, dtype=object) zarr/tests/test_core.py:1191: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpds7aew15.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpds7aew15.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError __________ TestArrayWithLMDBStoreNoBuffers.test_object_arrays_danger ___________ self = def test_object_arrays_danger(self): # do something dangerous - manually force an object array with no object codec > z = self.create_array(shape=5, chunks=2, dtype=object, fill_value=0, object_codec=MsgPack()) zarr/tests/test_core.py:1362: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpqjthzhfp.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpqjthzhfp.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ________ TestArrayWithLMDBStoreNoBuffers.test_object_arrays_vlen_array _________ self = def test_object_arrays_vlen_array(self): data = np.array([np.array([1, 3, 7]), np.array([5]), np.array([2, 8, 12])] * 1000, dtype=object) def compare_arrays(expected, actual, item_dtype): assert isinstance(actual, np.ndarray) assert actual.dtype == object assert actual.shape == expected.shape for ev, av in zip(expected.flat, actual.flat): assert isinstance(av, np.ndarray) assert_array_equal(ev, av) assert av.dtype == item_dtype codecs = VLenArray(int), VLenArray(' z = self.create_array(shape=data.shape, dtype=object, object_codec=codec) zarr/tests/test_core.py:1340: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpvzt1916a.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpvzt1916a.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ________ TestArrayWithLMDBStoreNoBuffers.test_object_arrays_vlen_bytes _________ self = def test_object_arrays_vlen_bytes(self): greetings_bytes = [g.encode('utf8') for g in greetings] data = np.array(greetings_bytes * 1000, dtype=object) > z = self.create_array(shape=data.shape, dtype=object, object_codec=VLenBytes()) zarr/tests/test_core.py:1297: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpbr0fl3kk.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpbr0fl3kk.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _________ TestArrayWithLMDBStoreNoBuffers.test_object_arrays_vlen_text _________ self = def test_object_arrays_vlen_text(self): data = np.array(greetings * 1000, dtype=object) > z = self.create_array(shape=data.shape, dtype=object, object_codec=VLenUTF8()) zarr/tests/test_core.py:1250: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmphvghwt70.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmphvghwt70.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError __________ TestArrayWithLMDBStoreNoBuffers.test_object_codec_warnings __________ self = def test_object_codec_warnings(self): with pytest.warns(UserWarning): # provide object_codec, but not object dtype > z = self.create_array(shape=10, chunks=5, dtype="i4", object_codec=JSON()) zarr/tests/test_core.py:1389: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpbx38jrhd.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpbx38jrhd.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _________________ TestArrayWithLMDBStoreNoBuffers.test_pickle __________________ self = def test_pickle(self): # setup array > z = self.create_array(shape=1000, chunks=100, dtype=int, cache_metadata=False, cache_attrs=False) zarr/tests/test_core.py:821: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpps6dambg.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpps6dambg.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ________________ TestArrayWithLMDBStoreNoBuffers.test_read_only ________________ self = def test_read_only(self): > z = self.create_array(shape=1000, chunks=100) zarr/tests/test_core.py:791: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpvhf2caaw.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpvhf2caaw.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ________________ TestArrayWithLMDBStoreNoBuffers.test_resize_1d ________________ self = def test_resize_1d(self): > z = self.create_array(shape=105, chunks=10, dtype='i4', fill_value=0) zarr/tests/test_core.py:619: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpji60af12.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpji60af12.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ________________ TestArrayWithLMDBStoreNoBuffers.test_resize_2d ________________ self = def test_resize_2d(self): > z = self.create_array(shape=(105, 105), chunks=(10, 10), dtype='i4', fill_value=0) zarr/tests/test_core.py:656: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpoielo7l0.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpoielo7l0.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _________ TestArrayWithLMDBStoreNoBuffers.test_setitem_data_not_shared _________ self = def test_setitem_data_not_shared(self): # check that data don't end up being shared with another array # https://github.com/alimanfoo/zarr/issues/79 > z = self.create_array(shape=20, chunks=10, dtype='i4') zarr/tests/test_core.py:566: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpj608ms10.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpj608ms10.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _________ TestArrayWithLMDBStoreNoBuffers.test_store_has_binary_values _________ self = def test_store_has_binary_values(self): # Initialize array np.random.seed(42) > z = self.create_array(shape=(1050,), chunks=100, dtype='f8', compressor=[]) zarr/tests/test_core.py:144: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp8bc9swz1.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp8bc9swz1.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _________ TestArrayWithLMDBStoreNoBuffers.test_store_has_bytes_values __________ self = def test_store_has_bytes_values(self): # Test that many stores do hold bytes values. # Though this is not a strict requirement. # Should be disabled by any stores that fail this as needed. # Initialize array np.random.seed(42) > z = self.create_array(shape=(1050,), chunks=100, dtype='f8', compressor=[]) zarr/tests/test_core.py:162: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpmc8j3mi9.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpmc8j3mi9.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ___________ TestArrayWithLMDBStoreNoBuffers.test_store_has_text_keys ___________ self = def test_store_has_text_keys(self): # Initialize array np.random.seed(42) > z = self.create_array(shape=(1050,), chunks=100, dtype='f8', compressor=[]) zarr/tests/test_core.py:130: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp64_btzzh.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp64_btzzh.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ____________ TestArrayWithLMDBStoreNoBuffers.test_structured_array _____________ self = def test_structured_array(self): d = np.array([(b'aaa', 1, 4.2), (b'bbb', 2, 8.4), (b'ccc', 3, 12.6)], dtype=[('foo', 'S3'), ('bar', 'i4'), ('baz', 'f8')]) fill_values = None, b'', (b'zzz', 42, 16.8) > self.check_structured_array(d, fill_values) zarr/tests/test_core.py:1106: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:1062: in check_structured_array z = self.create_array(shape=a.shape, chunks=2, dtype=a.dtype, fill_value=fill_value) zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmppzz9102i.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmppzz9102i.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _____ TestArrayWithLMDBStoreNoBuffers.test_structured_array_contain_object _____ self = @unittest.skipIf(parse_version(np.__version__) < parse_version('1.14.0'), "unsupported numpy version") def test_structured_array_contain_object(self): if "PartialRead" in self.__class__.__name__: pytest.skip("partial reads of object arrays not supported") # ----------- creation -------------- structured_dtype = [('c_obj', object), ('c_int', int)] a = np.array([(b'aaa', 1), (b'bbb', 2)], dtype=structured_dtype) # zarr-array with structured dtype require object codec with pytest.raises(ValueError): > self.create_array(shape=a.shape, dtype=structured_dtype) zarr/tests/test_core.py:1407: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp83lb3uux.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp83lb3uux.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _________ TestArrayWithLMDBStoreNoBuffers.test_structured_array_nested _________ self = def test_structured_array_nested(self): d = np.array([(0, (0, ((0, 1), (1, 2), (2, 3)), 0), b'aaa'), (1, (1, ((1, 2), (2, 3), (3, 4)), 1), b'bbb'), (2, (2, ((2, 3), (3, 4), (4, 5)), 2), b'ccc')], dtype=[('foo', 'i8'), ('bar', [('foo', 'i4'), ('bar', '(3, 2)f4'), ('baz', 'u1')]), ('baz', 'S3')]) fill_values = None, b'', (0, (0, ((0, 0), (1, 1), (2, 2)), 0), b'zzz') > self.check_structured_array(d, fill_values) zarr/tests/test_core.py:1123: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:1062: in check_structured_array z = self.create_array(shape=a.shape, chunks=2, dtype=a.dtype, fill_value=fill_value) zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmptdk3s39g.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmptdk3s39g.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _______ TestArrayWithLMDBStoreNoBuffers.test_structured_array_subshapes ________ self = def test_structured_array_subshapes(self): d = np.array([(0, ((0, 1, 2), (1, 2, 3)), b'aaa'), (1, ((1, 2, 3), (2, 3, 4)), b'bbb'), (2, ((2, 3, 4), (3, 4, 5)), b'ccc')], dtype=[('foo', 'i8'), ('bar', '(2, 3)f4'), ('baz', 'S3')]) fill_values = None, b'', (0, ((0, 0, 0), (1, 1, 1)), b'zzz') > self.check_structured_array(d, fill_values) zarr/tests/test_core.py:1114: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:1062: in check_structured_array z = self.create_array(shape=a.shape, chunks=2, dtype=a.dtype, fill_value=fill_value) zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmph9l5ha1t.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmph9l5ha1t.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _________ TestArrayWithLMDBStoreNoBuffers.test_structured_with_object __________ self = def test_structured_with_object(self): > a = self.create_array(fill_value=(0.0, None), shape=10, chunks=10, dtype=[('x', float), ('y', object)], object_codec=Pickle()) zarr/tests/test_core.py:1559: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_core.py:2131: in create_array store = LMDBStore(path, buffers=False) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp0hjho1qs.lmdb', buffers = False, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp0hjho1qs.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError __________________ TestGroupWithLMDBStore.test_array_creation __________________ self = def test_array_creation(self): > grp = self.create_group() zarr/tests/test_hierarchy.py:939: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_hierarchy.py:54: in create_group store, chunk_store = self.create_store() zarr/tests/test_hierarchy.py:1432: in create_store store = LMDBStore(path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpvayn_6_5.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpvayn_6_5.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _________________ TestGroupWithLMDBStore.test_context_manager __________________ self = def test_context_manager(self): > with self.create_group() as g: zarr/tests/test_hierarchy.py:1085: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_hierarchy.py:54: in create_group store, chunk_store = self.create_store() zarr/tests/test_hierarchy.py:1432: in create_store store = LMDBStore(path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmptldd4mmq.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmptldd4mmq.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError __________________ TestGroupWithLMDBStore.test_create_dataset __________________ self = def test_create_dataset(self): > g = self.create_group() zarr/tests/test_hierarchy.py:294: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_hierarchy.py:54: in create_group store, chunk_store = self.create_store() zarr/tests/test_hierarchy.py:1432: in create_store store = LMDBStore(path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpchjy0rc7.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpchjy0rc7.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError __________________ TestGroupWithLMDBStore.test_create_errors ___________________ self = def test_create_errors(self): > g = self.create_group() zarr/tests/test_hierarchy.py:424: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_hierarchy.py:54: in create_group store, chunk_store = self.create_store() zarr/tests/test_hierarchy.py:1432: in create_store store = LMDBStore(path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpqdbmya8c.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpqdbmya8c.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ___________________ TestGroupWithLMDBStore.test_create_group ___________________ self = def test_create_group(self): > g1 = self.create_group() zarr/tests/test_hierarchy.py:120: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_hierarchy.py:54: in create_group store, chunk_store = self.create_store() zarr/tests/test_hierarchy.py:1432: in create_store store = LMDBStore(path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpualu883m.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpualu883m.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _________________ TestGroupWithLMDBStore.test_create_overwrite _________________ self = def test_create_overwrite(self): try: for method_name in 'create_dataset', 'create', 'empty', 'zeros', \ 'ones': > g = self.create_group() zarr/tests/test_hierarchy.py:482: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_hierarchy.py:54: in create_group store, chunk_store = self.create_store() zarr/tests/test_hierarchy.py:1432: in create_store store = LMDBStore(path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp_a2szswm.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp_a2szswm.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _____________________ TestGroupWithLMDBStore.test_delitem ______________________ self = def test_delitem(self): > g = self.create_group() zarr/tests/test_hierarchy.py:856: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_hierarchy.py:54: in create_group store, chunk_store = self.create_store() zarr/tests/test_hierarchy.py:1432: in create_store store = LMDBStore(path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmplhrau6tr.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmplhrau6tr.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _____________ TestGroupWithLMDBStore.test_double_counting_group_v3 _____________ self = def test_double_counting_group_v3(self): > root_group = self.create_group() zarr/tests/test_hierarchy.py:775: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_hierarchy.py:54: in create_group store, chunk_store = self.create_store() zarr/tests/test_hierarchy.py:1432: in create_store store = LMDBStore(path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp0w4ssymi.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp0w4ssymi.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _________ TestGroupWithLMDBStore.test_empty_getitem_contains_iterators _________ self = def test_empty_getitem_contains_iterators(self): # setup > g = self.create_group() zarr/tests/test_hierarchy.py:788: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_hierarchy.py:54: in create_group store, chunk_store = self.create_store() zarr/tests/test_hierarchy.py:1432: in create_store store = LMDBStore(path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpiam88dlf.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpiam88dlf.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _____________________ TestGroupWithLMDBStore.test_getattr ______________________ self = def test_getattr(self): # setup > g1 = self.create_group() zarr/tests/test_hierarchy.py:826: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_hierarchy.py:54: in create_group store, chunk_store = self.create_store() zarr/tests/test_hierarchy.py:1432: in create_store store = LMDBStore(path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp5ug0okxl.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp5ug0okxl.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ____________ TestGroupWithLMDBStore.test_getitem_contains_iterators ____________ self = def test_getitem_contains_iterators(self): # setup > g1 = self.create_group() zarr/tests/test_hierarchy.py:508: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_hierarchy.py:54: in create_group store, chunk_store = self.create_store() zarr/tests/test_hierarchy.py:1432: in create_store store = LMDBStore(path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpp_wcbt7y.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpp_wcbt7y.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ___________________ TestGroupWithLMDBStore.test_group_init_1 ___________________ self = def test_group_init_1(self): > store, chunk_store = self.create_store() zarr/tests/test_hierarchy.py:61: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_hierarchy.py:1432: in create_store store = LMDBStore(path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp_g65s5gs.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp_g65s5gs.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ___________________ TestGroupWithLMDBStore.test_group_init_2 ___________________ self = def test_group_init_2(self): > store, chunk_store = self.create_store() zarr/tests/test_hierarchy.py:81: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_hierarchy.py:1432: in create_store store = LMDBStore(path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpje2wuep8.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpje2wuep8.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _______________ TestGroupWithLMDBStore.test_group_init_errors_1 ________________ self = def test_group_init_errors_1(self): > store, chunk_store = self.create_store() zarr/tests/test_hierarchy.py:93: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_hierarchy.py:1432: in create_store store = LMDBStore(path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmphewx74uh.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmphewx74uh.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _______________ TestGroupWithLMDBStore.test_group_init_errors_2 ________________ self = def test_group_init_errors_2(self): > store, chunk_store = self.create_store() zarr/tests/test_hierarchy.py:105: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_hierarchy.py:1432: in create_store store = LMDBStore(path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpy3bn1f_t.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpy3bn1f_t.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ____________________ TestGroupWithLMDBStore.test_group_repr ____________________ self = def test_group_repr(self): > store, chunk_store = self.create_store() zarr/tests/test_hierarchy.py:100: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_hierarchy.py:1432: in create_store store = LMDBStore(path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp1jy_do0k.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp1jy_do0k.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ________________ TestGroupWithLMDBStore.test_iterators_recurse _________________ self = def test_iterators_recurse(self): # setup > g1 = self.create_group() zarr/tests/test_hierarchy.py:800: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_hierarchy.py:54: in create_group store, chunk_store = self.create_store() zarr/tests/test_hierarchy.py:1432: in create_store store = LMDBStore(path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpohlxfx_b.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpohlxfx_b.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _______________________ TestGroupWithLMDBStore.test_move _______________________ self = def test_move(self): > g = self.create_group() zarr/tests/test_hierarchy.py:875: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_hierarchy.py:54: in create_group store, chunk_store = self.create_store() zarr/tests/test_hierarchy.py:1432: in create_store store = LMDBStore(path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp6k_6vxj3.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp6k_6vxj3.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ______________________ TestGroupWithLMDBStore.test_paths _______________________ self = def test_paths(self): > g1 = self.create_group() zarr/tests/test_hierarchy.py:1002: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_hierarchy.py:54: in create_group store, chunk_store = self.create_store() zarr/tests/test_hierarchy.py:1432: in create_store store = LMDBStore(path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpgvqj9xz_.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpgvqj9xz_.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ______________________ TestGroupWithLMDBStore.test_pickle ______________________ self = def test_pickle(self): # setup group > g = self.create_group() zarr/tests/test_hierarchy.py:1058: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_hierarchy.py:54: in create_group store, chunk_store = self.create_store() zarr/tests/test_hierarchy.py:1432: in create_store store = LMDBStore(path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp5yk24ho3.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp5yk24ho3.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _________________ TestGroupWithLMDBStore.test_require_dataset __________________ self = def test_require_dataset(self): > g = self.create_group() zarr/tests/test_hierarchy.py:376: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_hierarchy.py:54: in create_group store, chunk_store = self.create_store() zarr/tests/test_hierarchy.py:1432: in create_store store = LMDBStore(path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpu9e2rmf1.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpu9e2rmf1.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError __________________ TestGroupWithLMDBStore.test_require_group ___________________ self = def test_require_group(self): > g1 = self.create_group() zarr/tests/test_hierarchy.py:201: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_hierarchy.py:54: in create_group store, chunk_store = self.create_store() zarr/tests/test_hierarchy.py:1432: in create_store store = LMDBStore(path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmplwjwl051.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmplwjwl051.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _______ TestGroupWithLMDBStore.test_rmdir_group_and_array_metadata_files _______ self = def test_rmdir_group_and_array_metadata_files(self): """Test group.store's rmdir method. This test case was added to complete test coverage of `ABSStore.rmdir`. """ > g1 = self.create_group() zarr/tests/test_hierarchy.py:259: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_hierarchy.py:54: in create_group store, chunk_store = self.create_store() zarr/tests/test_hierarchy.py:1432: in create_store store = LMDBStore(path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpfvwdx83h.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpfvwdx83h.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _____________________ TestGroupWithLMDBStore.test_setitem ______________________ self = def test_setitem(self): > g = self.create_group() zarr/tests/test_hierarchy.py:839: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_hierarchy.py:54: in create_group store, chunk_store = self.create_store() zarr/tests/test_hierarchy.py:1432: in create_store store = LMDBStore(path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp5kcrfqb9.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp5kcrfqb9.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ___________________ TestLMDBStore.test_get_set_del_contains ____________________ self = def test_get_set_del_contains(self): > store = self.create_store() zarr/tests/test_storage.py:125: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_storage.py:1926: in create_store store = LMDBStore(path, buffers=buffers, **kwargs) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp7t12ujce.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp7t12ujce.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ____________________ TestLMDBStore.test_set_invalid_content ____________________ self = def test_set_invalid_content(self): > store = self.create_store() zarr/tests/test_storage.py:154: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_storage.py:1926: in create_store store = LMDBStore(path, buffers=buffers, **kwargs) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpna9agw_m.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpna9agw_m.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ___________________________ TestLMDBStore.test_clear ___________________________ self = def test_clear(self): > store = self.create_store() zarr/tests/test_storage.py:162: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_storage.py:1926: in create_store store = LMDBStore(path, buffers=buffers, **kwargs) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpbyuewb09.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpbyuewb09.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ____________________________ TestLMDBStore.test_pop ____________________________ self = def test_pop(self): > store = self.create_store() zarr/tests/test_storage.py:174: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_storage.py:1926: in create_store store = LMDBStore(path, buffers=buffers, **kwargs) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpd0ha9yd1.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpd0ha9yd1.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError __________________________ TestLMDBStore.test_popitem __________________________ self = def test_popitem(self): > store = self.create_store() zarr/tests/test_storage.py:196: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_storage.py:1926: in create_store store = LMDBStore(path, buffers=buffers, **kwargs) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpu3f0_gvv.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpu3f0_gvv.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _____________________ TestLMDBStore.test_writeable_values ______________________ self = def test_writeable_values(self): > store = self.create_store() zarr/tests/test_storage.py:208: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_storage.py:1926: in create_store store = LMDBStore(path, buffers=buffers, **kwargs) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpzxfg__q9.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpzxfg__q9.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError __________________________ TestLMDBStore.test_update ___________________________ self = def test_update(self): > store = self.create_store() zarr/tests/test_storage.py:219: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_storage.py:1926: in create_store store = LMDBStore(path, buffers=buffers, **kwargs) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpm0o8pou1.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpm0o8pou1.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _________________________ TestLMDBStore.test_iterators _________________________ self = def test_iterators(self): > store = self.create_store() zarr/tests/test_storage.py:236: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_storage.py:1926: in create_store store = LMDBStore(path, buffers=buffers, **kwargs) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp3z_klw_w.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp3z_klw_w.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError __________________________ TestLMDBStore.test_pickle ___________________________ self = def test_pickle(self): # setup store > store = self.create_store() zarr/tests/test_storage.py:269: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_storage.py:1926: in create_store store = LMDBStore(path, buffers=buffers, **kwargs) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpbon007uq.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpbon007uq.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError __________________________ TestLMDBStore.test_getsize __________________________ self = def test_getsize(self): > store = self.create_store() zarr/tests/test_storage.py:293: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_storage.py:1926: in create_store store = LMDBStore(path, buffers=buffers, **kwargs) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmphes5ahii.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmphes5ahii.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _________________________ TestLMDBStore.test_hierarchy _________________________ self = def test_hierarchy(self): # setup > store = self.create_store() zarr/tests/test_storage.py:317: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_storage.py:1926: in create_store store = LMDBStore(path, buffers=buffers, **kwargs) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpl5yfjo01.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpl5yfjo01.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _________ TestLMDBStore.test_init_array[dimension_separator_fixture0] __________ self = dimension_separator_fixture = (None, '.') def test_init_array(self, dimension_separator_fixture): pass_dim_sep, want_dim_sep = dimension_separator_fixture > store = self.create_store(dimension_separator=pass_dim_sep) zarr/tests/test_storage.py:465: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_storage.py:1926: in create_store store = LMDBStore(path, buffers=buffers, **kwargs) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpdrma6758.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpdrma6758.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _________ TestLMDBStore.test_init_array[dimension_separator_fixture1] __________ self = dimension_separator_fixture = ('.', '.') def test_init_array(self, dimension_separator_fixture): pass_dim_sep, want_dim_sep = dimension_separator_fixture > store = self.create_store(dimension_separator=pass_dim_sep) zarr/tests/test_storage.py:465: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_storage.py:1926: in create_store store = LMDBStore(path, buffers=buffers, **kwargs) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmphftnc9zi.lmdb', buffers = True, dimension_separator = '.' kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmphftnc9zi.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _________ TestLMDBStore.test_init_array[dimension_separator_fixture2] __________ self = dimension_separator_fixture = ('/', '/') def test_init_array(self, dimension_separator_fixture): pass_dim_sep, want_dim_sep = dimension_separator_fixture > store = self.create_store(dimension_separator=pass_dim_sep) zarr/tests/test_storage.py:465: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_storage.py:1926: in create_store store = LMDBStore(path, buffers=buffers, **kwargs) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpohm0zfe4.lmdb', buffers = True, dimension_separator = '/' kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpohm0zfe4.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ___________________ TestLMDBStore.test_init_array_overwrite ____________________ self = def test_init_array_overwrite(self): > self._test_init_array_overwrite('F') zarr/tests/test_storage.py:483: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_storage.py:502: in _test_init_array_overwrite store = self.create_store() zarr/tests/test_storage.py:1926: in create_store store = LMDBStore(path, buffers=buffers, **kwargs) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpzltw7c2c.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpzltw7c2c.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _________________ TestLMDBStore.test_init_array_overwrite_path _________________ self = def test_init_array_overwrite_path(self): > self._test_init_array_overwrite_path('F') zarr/tests/test_storage.py:486: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_storage.py:580: in _test_init_array_overwrite_path store = self.create_store() zarr/tests/test_storage.py:1926: in create_store store = LMDBStore(path, buffers=buffers, **kwargs) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpsvz_xy_s.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpsvz_xy_s.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _____________ TestLMDBStore.test_init_array_overwrite_chunk_store ______________ self = def test_init_array_overwrite_chunk_store(self): > self._test_init_array_overwrite_chunk_store('F') zarr/tests/test_storage.py:489: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_storage.py:670: in _test_init_array_overwrite_chunk_store store = self.create_store() zarr/tests/test_storage.py:1926: in create_store store = LMDBStore(path, buffers=buffers, **kwargs) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpu1ty9258.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpu1ty9258.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ___________________ TestLMDBStore.test_init_group_overwrite ____________________ self = def test_init_group_overwrite(self): > self._test_init_group_overwrite('F') zarr/tests/test_storage.py:492: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_storage.py:772: in _test_init_group_overwrite store = self.create_store() zarr/tests/test_storage.py:1926: in create_store store = LMDBStore(path, buffers=buffers, **kwargs) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpb6qgrdly.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpb6qgrdly.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _________________ TestLMDBStore.test_init_group_overwrite_path _________________ self = def test_init_group_overwrite_path(self): > self._test_init_group_overwrite_path('F') zarr/tests/test_storage.py:495: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_storage.py:807: in _test_init_group_overwrite_path store = self.create_store() zarr/tests/test_storage.py:1926: in create_store store = LMDBStore(path, buffers=buffers, **kwargs) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp3jf00c5b.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp3jf00c5b.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _____________ TestLMDBStore.test_init_group_overwrite_chunk_store ______________ self = def test_init_group_overwrite_chunk_store(self): > self._test_init_group_overwrite_chunk_store('F') zarr/tests/test_storage.py:498: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_storage.py:864: in _test_init_group_overwrite_chunk_store store = self.create_store() zarr/tests/test_storage.py:1926: in create_store store = LMDBStore(path, buffers=buffers, **kwargs) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpmt7r5a9f.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpmt7r5a9f.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ______________________ TestLMDBStore.test_init_array_path ______________________ self = def test_init_array_path(self): path = 'foo/bar' > store = self.create_store() zarr/tests/test_storage.py:553: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_storage.py:1926: in create_store store = LMDBStore(path, buffers=buffers, **kwargs) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp9huxm14e.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp9huxm14e.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ________________ TestLMDBStore.test_init_array_overwrite_group _________________ self = def test_init_array_overwrite_group(self): # setup path = 'foo/bar' > store = self.create_store() zarr/tests/test_storage.py:634: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_storage.py:1926: in create_store store = LMDBStore(path, buffers=buffers, **kwargs) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp7frj27gv.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp7frj27gv.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError _____________________ TestLMDBStore.test_init_array_compat _____________________ self = def test_init_array_compat(self): > store = self.create_store() zarr/tests/test_storage.py:731: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_storage.py:1926: in create_store store = LMDBStore(path, buffers=buffers, **kwargs) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp1c93w8ek.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp1c93w8ek.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ________________________ TestLMDBStore.test_init_group _________________________ self = def test_init_group(self): > store = self.create_store() zarr/tests/test_storage.py:747: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_storage.py:1926: in create_store store = LMDBStore(path, buffers=buffers, **kwargs) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmp8z8gdls8.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmp8z8gdls8.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError ______________________ TestLMDBStore.test_context_manager ______________________ self = def test_context_manager(self): > with self.create_store() as store: zarr/tests/test_storage.py:1930: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ zarr/tests/test_storage.py:1926: in create_store store = LMDBStore(path, buffers=buffers, **kwargs) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = path = '/tmp/tmpu91a37hh.lmdb', buffers = True, dimension_separator = None kwargs = {'map_async': False, 'map_size': 1099511627776, 'max_spare_txns': 8, 'meminit': False, ...} lmdb = map_size = 1099511627776, writemap = True, max_spare_txns = 8 def __init__(self, path, buffers=True, dimension_separator=None, **kwargs): import lmdb # set default memory map size to something larger than the lmdb default, which is # very likely to be too small for any moderate array (logic copied from zict) map_size = (2**40 if sys.maxsize >= 2**32 else 2**28) kwargs.setdefault('map_size', map_size) # don't initialize buffers to zero by default, shouldn't be necessary kwargs.setdefault('meminit', False) # decide whether to use the writemap option based on the operating system's # support for sparse files - writemap requires sparse file support otherwise # the whole# `map_size` may be reserved up front on disk (logic copied from zict) writemap = sys.platform.startswith('linux') kwargs.setdefault('writemap', writemap) # decide options for when data are flushed to disk - choose to delay syncing # data to filesystem, otherwise pay a large performance penalty (zict also does # this) kwargs.setdefault('metasync', False) kwargs.setdefault('sync', False) kwargs.setdefault('map_async', False) # set default option for number of cached transactions max_spare_txns = multiprocessing.cpu_count() kwargs.setdefault('max_spare_txns', max_spare_txns) # normalize path path = os.path.abspath(path) # open database > self.db = lmdb.open(path, **kwargs) E lmdb.MemoryError: /tmp/tmpu91a37hh.lmdb: Cannot allocate memory zarr/storage.py:2223: MemoryError =============================== warnings summary =============================== zarr/tests/test_storage.py::TestZipStore::test_store_and_retrieve_ndarray /usr/lib64/python3.11/zipfile.py:1547: UserWarning: Duplicate name: 'foo' return self._open_to_write(zinfo, force_zip64=force_zip64) -- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html ============================= slowest 10 durations ============================= 207.34s call zarr/tests/test_indexing.py::test_set_orthogonal_selection_3d 33.79s call zarr/tests/test_indexing.py::test_set_orthogonal_selection_2d 32.07s call zarr/tests/test_core.py::TestArrayWithFSStorePartialRead::test_non_cont 30.77s call zarr/tests/test_storage.py::test_format_compatibility 29.92s call zarr/tests/test_core.py::TestArrayWithFSStorePartialRead::test_array_2d 19.08s call zarr/tests/test_core.py::TestArrayWithStoreCache::test_object_arrays_vlen_array 18.54s call zarr/tests/test_core.py::TestArrayWithFSStoreNestedPartialRead::test_non_cont 18.01s call zarr/tests/test_core.py::TestArrayWithFSStoreNestedPartialRead::test_array_2d 16.38s call zarr/tests/test_core.py::TestArrayWithFSStoreNested::test_object_arrays_vlen_array 15.94s call zarr/tests/test_sync.py::TestArrayWithProcessSynchronizer::test_object_arrays_vlen_array =========================== short test summary info ============================ SKIPPED [1] zarr/tests/test_convenience.py:125: V3 is disabled SKIPPED [1] zarr/tests/test_convenience.py:139: V3 is disabled SKIPPED [1] zarr/tests/test_convenience.py:428: V3 is disabled SKIPPED [1] zarr/tests/test_convenience.py:490: V3 is disabled SKIPPED [1] zarr/tests/test_convenience.py:459: V3 is disabled SKIPPED [1] zarr/tests/test_convenience.py:496: V3 is disabled SKIPPED [1] zarr/tests/test_convenience.py:544: V3 is disabled SKIPPED [1] zarr/tests/test_convenience.py:406: V3 is disabled SKIPPED [1] zarr/tests/test_convenience.py:442: V3 is disabled SKIPPED [1] zarr/tests/test_convenience.py:414: V3 is disabled SKIPPED [1] zarr/tests/test_convenience.py:665: V3 is disabled SKIPPED [12] zarr/tests/test_convenience.py:722: V3 is disabled SKIPPED [12] zarr/tests/test_convenience.py:729: V3 is disabled SKIPPED [12] zarr/tests/test_convenience.py:735: V3 is disabled SKIPPED [12] zarr/tests/test_convenience.py:757: V3 is disabled SKIPPED [12] zarr/tests/test_convenience.py:782: V3 is disabled SKIPPED [12] zarr/tests/test_convenience.py:802: V3 is disabled SKIPPED [12] zarr/tests/test_convenience.py:824: V3 is disabled SKIPPED [12] zarr/tests/test_convenience.py:837: V3 is disabled SKIPPED [12] zarr/tests/test_convenience.py:843: V3 is disabled SKIPPED [12] zarr/tests/test_convenience.py:848: V3 is disabled SKIPPED [12] zarr/tests/test_convenience.py:854: V3 is disabled SKIPPED [12] zarr/tests/test_convenience.py:874: V3 is disabled SKIPPED [12] zarr/tests/test_convenience.py:914: V3 is disabled SKIPPED [12] zarr/tests/test_convenience.py:988: V3 is disabled SKIPPED [12] zarr/tests/test_convenience.py:1004: V3 is disabled SKIPPED [1] zarr/tests/test_core.py:889: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:924: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:709: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:738: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:760: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:780: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:963: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:203: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:285: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:320: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:301: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:365: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:474: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:488: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:1037: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:71: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:530: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:1538: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:1512: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:1125: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:1526: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:585: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:1488: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:1464: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:1430: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:1709: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:1013: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:854: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:1187: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:1359: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:1323: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:1292: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:1247: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:1385: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:1713: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:789: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:617: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:654: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:563: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:141: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:155: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:127: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:1100: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:1392: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:1116: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:1108: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_core.py:1558: Tests not enabled via environment variable SKIPPED [2] zarr/tests/test_core.py:1397: partial reads of object arrays not supported SKIPPED [19] zarr/tests/test_core.py:889: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:924: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:709: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:738: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:760: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:780: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:963: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:203: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:285: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:320: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:301: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:365: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:474: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:488: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:1037: V3 is disabled SKIPPED [1] zarr/tests/test_core.py:71: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:530: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:1538: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:1512: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:1125: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:1526: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:585: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:1488: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:1464: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:1430: V3 is disabled SKIPPED [1] zarr/tests/test_core.py:170: V3 is disabled SKIPPED [1] zarr/tests/test_core.py:1013: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:854: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:1187: V3 is disabled SKIPPED [18] zarr/tests/test_core.py:1359: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:1323: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:1292: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:1247: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:1385: V3 is disabled SKIPPED [18] zarr/tests/test_core.py:818: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:789: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:617: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:654: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:563: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:141: V3 is disabled SKIPPED [17] zarr/tests/test_core.py:155: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:127: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:1100: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:1392: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:1116: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:1108: V3 is disabled SKIPPED [19] zarr/tests/test_core.py:1558: V3 is disabled SKIPPED [18] zarr/tests/test_core.py:2762: V3 is disabled SKIPPED [8] zarr/tests/test_core.py:2825: V3 is disabled SKIPPED [18] zarr/tests/test_core.py:2869: V3 is disabled SKIPPED [18] zarr/tests/test_core.py:2843: V3 is disabled SKIPPED [2] zarr/tests/test_core.py:2909: V3 is disabled SKIPPED [1] zarr/tests/test_core.py:2944: V3 is disabled SKIPPED [1] zarr/tests/test_core.py:1709: V3 is disabled SKIPPED [1] zarr/tests/test_core.py:1713: V3 is disabled SKIPPED [1] zarr/tests/test_core.py:2996: V3 is disabled SKIPPED [1] zarr/tests/test_core.py:3017: V3 is disabled SKIPPED [1] zarr/tests/test_core.py:3041: V3 is disabled SKIPPED [1] zarr/tests/test_core.py:3038: V3 is disabled SKIPPED [1] zarr/tests/test_core.py:3062: V3 is disabled SKIPPED [1] zarr/tests/test_core.py:3083: V3 is disabled SKIPPED [1] zarr/tests/test_core.py:3159: V3 is disabled SKIPPED [1] zarr/tests/test_core.py:3151: V3 is disabled SKIPPED [1] zarr/tests/test_core.py:2478: V3 is disabled SKIPPED [1] zarr/tests/test_core.py:2437: V3 is disabled SKIPPED [1] zarr/tests/test_core.py:3180: V3 is disabled SKIPPED [1] zarr/tests/test_core.py:3199: V3 is disabled SKIPPED [1] zarr/tests/test_core.py:2608: V3 is disabled SKIPPED [1] zarr/tests/test_core.py:2624: V3 is disabled SKIPPED [1] zarr/tests/test_core.py:2614: V3 is disabled SKIPPED [2] zarr/tests/test_core.py:889: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:924: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:709: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:738: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:760: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:780: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:963: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:203: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:285: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:320: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:301: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:365: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:474: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:488: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:1037: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:2762: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:530: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:1538: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:1512: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:1125: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:1526: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:585: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:1488: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:1464: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:1430: sharding is disabled SKIPPED [1] zarr/tests/test_core.py:3330: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:2869: sharding is disabled SKIPPED [1] zarr/tests/test_core.py:2608: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:854: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:1187: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:1359: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:1323: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:1292: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:1247: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:1385: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:818: sharding is disabled SKIPPED [1] zarr/tests/test_core.py:2624: sharding is disabled SKIPPED [1] zarr/tests/test_core.py:2614: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:789: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:617: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:654: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:563: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:141: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:155: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:127: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:1100: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:1392: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:1116: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:1108: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:1558: sharding is disabled SKIPPED [1] zarr/tests/test_core.py:3338: sharding is disabled SKIPPED [2] zarr/tests/test_core.py:2843: sharding is disabled SKIPPED [1] zarr/tests/test_core.py:2693: V3 is disabled SKIPPED [1] zarr/tests/test_core.py:2709: V3 is disabled SKIPPED [1] zarr/tests/test_core.py:2699: V3 is disabled SKIPPED [1] zarr/tests/test_core.py:3478: sharding is disabled SKIPPED [1] zarr/tests/test_core.py:3466: sharding is disabled SKIPPED [1] zarr/tests/test_core.py:3488: sharding is disabled SKIPPED [1] zarr/tests/test_core.py:3503: V3 is disabled SKIPPED [2] zarr/tests/test_creation.py:754: V3 is disabled SKIPPED [13] zarr/tests/test_hierarchy.py:938: V3 is disabled SKIPPED [12] zarr/tests/test_hierarchy.py:1083: V3 is disabled SKIPPED [13] zarr/tests/test_hierarchy.py:293: V3 is disabled SKIPPED [13] zarr/tests/test_hierarchy.py:423: V3 is disabled SKIPPED [13] zarr/tests/test_hierarchy.py:119: V3 is disabled SKIPPED [13] zarr/tests/test_hierarchy.py:478: V3 is disabled SKIPPED [13] zarr/tests/test_hierarchy.py:855: V3 is disabled SKIPPED [13] zarr/tests/test_hierarchy.py:774: V3 is disabled SKIPPED [13] zarr/tests/test_hierarchy.py:786: V3 is disabled SKIPPED [13] zarr/tests/test_hierarchy.py:824: V3 is disabled SKIPPED [13] zarr/tests/test_hierarchy.py:506: V3 is disabled SKIPPED [13] zarr/tests/test_hierarchy.py:1125: V3 is disabled SKIPPED [13] zarr/tests/test_hierarchy.py:80: V3 is disabled SKIPPED [13] zarr/tests/test_hierarchy.py:92: V3 is disabled SKIPPED [13] zarr/tests/test_hierarchy.py:1148: V3 is disabled SKIPPED [13] zarr/tests/test_hierarchy.py:99: V3 is disabled SKIPPED [13] zarr/tests/test_hierarchy.py:798: V3 is disabled SKIPPED [12] zarr/tests/test_hierarchy.py:874: V3 is disabled SKIPPED [13] zarr/tests/test_hierarchy.py:1001: V3 is disabled SKIPPED [12] zarr/tests/test_hierarchy.py:1055: V3 is disabled SKIPPED [13] zarr/tests/test_hierarchy.py:375: V3 is disabled SKIPPED [13] zarr/tests/test_hierarchy.py:200: V3 is disabled SKIPPED [13] zarr/tests/test_hierarchy.py:254: V3 is disabled SKIPPED [13] zarr/tests/test_hierarchy.py:838: V3 is disabled SKIPPED [1] zarr/tests/test_hierarchy.py:938: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_hierarchy.py:1083: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_hierarchy.py:293: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_hierarchy.py:423: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_hierarchy.py:119: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_hierarchy.py:478: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_hierarchy.py:855: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_hierarchy.py:774: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_hierarchy.py:786: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_hierarchy.py:824: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_hierarchy.py:506: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_hierarchy.py:60: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_hierarchy.py:80: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_hierarchy.py:92: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_hierarchy.py:104: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_hierarchy.py:99: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_hierarchy.py:798: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_hierarchy.py:874: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_hierarchy.py:1001: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_hierarchy.py:1205: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_hierarchy.py:375: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_hierarchy.py:200: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_hierarchy.py:254: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_hierarchy.py:838: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_hierarchy.py:1222: V3 is disabled SKIPPED [2] zarr/tests/test_hierarchy.py:1272: V3 is disabled SKIPPED [1] zarr/tests/test_hierarchy.py:1330: V3 is disabled SKIPPED [1] zarr/tests/test_hierarchy.py:1352: V3 is disabled SKIPPED [1] zarr/tests/test_hierarchy.py:1364: V3 is disabled SKIPPED [1] zarr/tests/test_hierarchy.py:1507: V3 is disabled SKIPPED [1] zarr/tests/test_hierarchy.py:1860: V3 is disabled SKIPPED [36] zarr/tests/test_meta_array.py:109: could not import 'cupy': No module named 'cupy' SKIPPED [6] zarr/tests/test_storage.py:59: nested chunks are unsupported SKIPPED [2] zarr/tests/test_storage.py:1260: could not import 's3fs': No module named 's3fs' SKIPPED [2] zarr/tests/test_storage.py:1278: could not import 's3fs': No module named 's3fs' SKIPPED [6] zarr/tests/test_storage.py:120: Tests not enabled via environment variable SKIPPED [6] zarr/tests/test_storage.py:124: Tests not enabled via environment variable SKIPPED [6] zarr/tests/test_storage.py:153: Tests not enabled via environment variable SKIPPED [6] zarr/tests/test_storage.py:161: Tests not enabled via environment variable SKIPPED [6] zarr/tests/test_storage.py:173: Tests not enabled via environment variable SKIPPED [6] zarr/tests/test_storage.py:195: Tests not enabled via environment variable SKIPPED [6] zarr/tests/test_storage.py:207: Tests not enabled via environment variable SKIPPED [6] zarr/tests/test_storage.py:218: Tests not enabled via environment variable SKIPPED [6] zarr/tests/test_storage.py:235: Tests not enabled via environment variable SKIPPED [4] zarr/tests/test_storage.py:266: Tests not enabled via environment variable SKIPPED [2] zarr/tests/test_storage.py:292: Tests not enabled via environment variable SKIPPED [4] zarr/tests/test_storage.py:315: Tests not enabled via environment variable SKIPPED [9] zarr/tests/test_storage.py:461: Tests not enabled via environment variable SKIPPED [6] zarr/tests/test_storage.py:482: Tests not enabled via environment variable SKIPPED [6] zarr/tests/test_storage.py:485: Tests not enabled via environment variable SKIPPED [6] zarr/tests/test_storage.py:488: Tests not enabled via environment variable SKIPPED [6] zarr/tests/test_storage.py:491: Tests not enabled via environment variable SKIPPED [6] zarr/tests/test_storage.py:494: Tests not enabled via environment variable SKIPPED [6] zarr/tests/test_storage.py:497: Tests not enabled via environment variable SKIPPED [6] zarr/tests/test_storage.py:551: Tests not enabled via environment variable SKIPPED [6] zarr/tests/test_storage.py:631: Tests not enabled via environment variable SKIPPED [6] zarr/tests/test_storage.py:730: Tests not enabled via environment variable SKIPPED [6] zarr/tests/test_storage.py:746: Tests not enabled via environment variable SKIPPED [2] zarr/tests/test_storage.py:2421: Tests not enabled via environment variable SKIPPED [2] zarr/tests/test_storage.py:2432: Tests not enabled via environment variable SKIPPED [2] zarr/tests/test_storage.py:2466: Tests not enabled via environment variable SKIPPED [2] zarr/tests/test_storage.py:2469: Tests not enabled via environment variable SKIPPED [2] zarr/tests/test_storage.py:2472: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_storage_v3.py:106: v3 api is not available SKIPPED [1] zarr/tests/test_storage_v3.py:127: v3 api is not available SKIPPED [1] zarr/tests/test_storage_v3.py:143: v3 api is not available SKIPPED [8] zarr/tests/test_storage.py:120: v3 api is not available SKIPPED [15] zarr/tests/test_storage.py:124: v3 api is not available SKIPPED [15] zarr/tests/test_storage.py:161: v3 api is not available SKIPPED [14] zarr/tests/test_storage.py:173: v3 api is not available SKIPPED [14] zarr/tests/test_storage.py:195: v3 api is not available SKIPPED [15] zarr/tests/test_storage.py:207: v3 api is not available SKIPPED [15] zarr/tests/test_storage.py:218: v3 api is not available SKIPPED [15] zarr/tests/test_storage.py:235: v3 api is not available SKIPPED [14] zarr/tests/test_storage.py:266: v3 api is not available SKIPPED [15] zarr/tests/test_storage.py:315: v3 api is not available SKIPPED [15] zarr/tests/test_storage.py:482: v3 api is not available SKIPPED [15] zarr/tests/test_storage.py:485: v3 api is not available SKIPPED [15] zarr/tests/test_storage.py:488: v3 api is not available SKIPPED [15] zarr/tests/test_storage.py:491: v3 api is not available SKIPPED [15] zarr/tests/test_storage.py:494: v3 api is not available SKIPPED [15] zarr/tests/test_storage.py:497: v3 api is not available SKIPPED [15] zarr/tests/test_storage.py:551: v3 api is not available SKIPPED [15] zarr/tests/test_storage.py:631: v3 api is not available SKIPPED [15] zarr/tests/test_storage.py:730: v3 api is not available SKIPPED [15] zarr/tests/test_storage.py:746: v3 api is not available SKIPPED [15] zarr/tests/test_storage_v3.py:171: v3 api is not available SKIPPED [42] zarr/tests/test_storage_v3.py:202: v3 api is not available SKIPPED [15] zarr/tests/test_storage_v3.py:230: v3 api is not available SKIPPED [15] zarr/tests/test_storage_v3.py:246: v3 api is not available SKIPPED [14] zarr/tests/test_storage_v3.py:250: v3 api is not available SKIPPED [15] zarr/tests/test_storage_v3.py:259: v3 api is not available SKIPPED [15] zarr/tests/test_storage_v3.py:284: v3 api is not available SKIPPED [1] zarr/tests/test_storage_v3.py:326: v3 api is not available SKIPPED [14] zarr/tests/test_storage.py:153: v3 api is not available SKIPPED [1] zarr/tests/test_storage.py:957: v3 api is not available SKIPPED [1] zarr/tests/test_storage.py:962: v3 api is not available SKIPPED [1] zarr/tests/test_storage.py:999: v3 api is not available SKIPPED [1] zarr/tests/test_storage.py:1024: v3 api is not available SKIPPED [1] zarr/tests/test_storage.py:1029: v3 api is not available SKIPPED [1] zarr/tests/test_storage.py:1041: v3 api is not available SKIPPED [1] zarr/tests/test_storage.py:1045: v3 api is not available SKIPPED [1] zarr/tests/test_storage.py:1051: v3 api is not available SKIPPED [1] zarr/tests/test_storage.py:1064: v3 api is not available SKIPPED [1] zarr/tests/test_storage_v3.py:349: v3 api is not available SKIPPED [1] zarr/tests/test_storage.py:1112: v3 api is not available SKIPPED [1] zarr/tests/test_storage.py:1124: v3 api is not available SKIPPED [1] zarr/tests/test_storage.py:1140: v3 api is not available SKIPPED [1] zarr/tests/test_storage.py:1160: v3 api is not available SKIPPED [1] zarr/tests/test_storage.py:1169: v3 api is not available SKIPPED [2] zarr/tests/test_storage.py:1191: v3 api is not available SKIPPED [2] zarr/tests/test_storage.py:1212: v3 api is not available SKIPPED [1] zarr/tests/test_storage.py:1227: v3 api is not available SKIPPED [1] zarr/tests/test_storage.py:1255: v3 api is not available SKIPPED [1] zarr/tests/test_storage.py:1260: v3 api is not available SKIPPED [1] zarr/tests/test_storage.py:1278: v3 api is not available SKIPPED [1] zarr/tests/test_storage_v3.py:374: v3 api is not available SKIPPED [1] zarr/tests/test_storage.py:1787: v3 api is not available SKIPPED [1] zarr/tests/test_storage.py:1796: v3 api is not available SKIPPED [1] zarr/tests/test_storage.py:1806: v3 api is not available SKIPPED [1] zarr/tests/test_storage.py:1812: v3 api is not available SKIPPED [1] zarr/tests/test_storage.py:1819: v3 api is not available SKIPPED [1] zarr/tests/test_storage.py:1826: v3 api is not available SKIPPED [1] zarr/tests/test_storage.py:1852: v3 api is not available SKIPPED [5] zarr/tests/test_storage.py:1869: v3 api is not available SKIPPED [1] zarr/tests/test_storage.py:1929: v3 api is not available SKIPPED [2] zarr/tests/test_storage.py:1945: v3 api is not available SKIPPED [1] zarr/tests/test_storage.py:1962: v3 api is not available SKIPPED [2] zarr/tests/test_storage_v3.py:171: Tests not enabled via environment variable SKIPPED [9] zarr/tests/test_storage_v3.py:202: Tests not enabled via environment variable SKIPPED [3] zarr/tests/test_storage_v3.py:230: Tests not enabled via environment variable SKIPPED [3] zarr/tests/test_storage_v3.py:246: Tests not enabled via environment variable SKIPPED [3] zarr/tests/test_storage_v3.py:250: Tests not enabled via environment variable SKIPPED [3] zarr/tests/test_storage_v3.py:259: Tests not enabled via environment variable SKIPPED [3] zarr/tests/test_storage_v3.py:284: Tests not enabled via environment variable SKIPPED [1] zarr/tests/test_storage.py:120: sharding is disabled SKIPPED [1] zarr/tests/test_storage.py:124: sharding is disabled SKIPPED [1] zarr/tests/test_storage.py:161: sharding is disabled SKIPPED [1] zarr/tests/test_storage.py:173: sharding is disabled SKIPPED [1] zarr/tests/test_storage.py:195: sharding is disabled SKIPPED [1] zarr/tests/test_storage.py:207: sharding is disabled SKIPPED [1] zarr/tests/test_storage.py:218: sharding is disabled SKIPPED [1] zarr/tests/test_storage.py:235: sharding is disabled SKIPPED [1] zarr/tests/test_storage.py:266: sharding is disabled SKIPPED [1] zarr/tests/test_storage.py:315: sharding is disabled SKIPPED [1] zarr/tests/test_storage.py:482: sharding is disabled SKIPPED [1] zarr/tests/test_storage.py:485: sharding is disabled SKIPPED [1] zarr/tests/test_storage.py:488: sharding is disabled SKIPPED [1] zarr/tests/test_storage.py:491: sharding is disabled SKIPPED [1] zarr/tests/test_storage.py:494: sharding is disabled SKIPPED [1] zarr/tests/test_storage.py:497: sharding is disabled SKIPPED [1] zarr/tests/test_storage.py:551: sharding is disabled SKIPPED [1] zarr/tests/test_storage.py:631: sharding is disabled SKIPPED [1] zarr/tests/test_storage.py:730: sharding is disabled SKIPPED [1] zarr/tests/test_storage.py:746: sharding is disabled SKIPPED [1] zarr/tests/test_storage_v3.py:171: sharding is disabled SKIPPED [3] zarr/tests/test_storage_v3.py:202: sharding is disabled SKIPPED [1] zarr/tests/test_storage_v3.py:230: sharding is disabled SKIPPED [1] zarr/tests/test_storage_v3.py:246: sharding is disabled SKIPPED [1] zarr/tests/test_storage_v3.py:250: sharding is disabled SKIPPED [1] zarr/tests/test_storage_v3.py:259: sharding is disabled SKIPPED [1] zarr/tests/test_storage_v3.py:284: sharding is disabled SKIPPED [1] zarr/tests/test_storage_v3.py:326: sharding is disabled SKIPPED [1] zarr/tests/test_storage_v3.py:548: sharding is disabled SKIPPED [1] zarr/tests/test_storage.py:2009: v3 api is not available SKIPPED [1] zarr/tests/test_storage.py:2075: v3 api is not available SKIPPED [1] zarr/tests/test_storage.py:2173: v3 api is not available SKIPPED [1] zarr/tests/test_storage_v3.py:577: v3 api is not available SKIPPED [1] zarr/tests/test_storage.py:2487: v3 api is not available SKIPPED [1] zarr/tests/test_storage.py:2505: v3 api is not available SKIPPED [1] zarr/tests/test_storage_v3.py:621: v3 api is not available SKIPPED [1] zarr/tests/test_storage_v3.py:626: v3 api is not available SKIPPED [1] zarr/tests/test_storage_v3.py:649: v3 api is not available SKIPPED [1] zarr/tests/test_storage_v3.py:666: v3 api is not available XFAIL zarr/tests/test_dim_separator.py::test_open[static_nested_legacy] - reason: XFAIL zarr/tests/test_dim_separator.py::test_fsstore[static_nested_legacy] - reason: XFAIL zarr/tests/test_dim_separator.py::test_directory[static_nested_legacy] - reason: XFAIL zarr/tests/test_dim_separator.py::test_nested[static_flat_legacy] - reason: XFAIL zarr/tests/test_dim_separator.py::test_nested[directory_default] - reason: XFAIL zarr/tests/test_dim_separator.py::test_nested[fs_default] - reason: XFAIL zarr/tests/test_indexing.py::test_PartialChunkIterator[selection8-arr8-None] - slice 2 is out of range XFAIL zarr/tests/test_indexing.py::test_PartialChunkIterator[selection9-arr9-None] - slice 2 is out of range FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_0len_dim_1d - lm... FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_0len_dim_2d - lm... FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_append_1d - lmdb... FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_append_2d - lmdb... FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_append_2d_axis FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_append_bad_shape FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_array_0d - lmdb.... FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_array_1d - lmdb.... FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_array_1d_fill_value FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_array_1d_selections FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_array_1d_set_scalar FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_array_2d - lmdb.... FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_array_2d_edge_case FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_array_2d_partial FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_array_dtype_shape FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_array_order - lm... FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_attributes - lmd... FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_compressors - lm... FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_dtypes - lmdb.Me... FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_endian - lmdb.Me... FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_hexdigest - lmdb... FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_islice - lmdb.Me... FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_iter - lmdb.Memo... FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_iteration_exceptions FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_nchunks_initialized FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_np_ufuncs - lmdb... FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_object_arrays - ... FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_object_arrays_danger FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_object_arrays_vlen_array FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_object_arrays_vlen_bytes FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_object_arrays_vlen_text FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_object_codec_warnings FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_pickle - lmdb.Me... FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_read_only - lmdb... FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_resize_1d - lmdb... FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_resize_2d - lmdb... FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_setitem_data_not_shared FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_store_has_binary_values FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_store_has_text_keys FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_structured_array FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_structured_array_contain_object FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_structured_array_nested FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_structured_array_subshapes FAILED zarr/tests/test_core.py::TestArrayWithLMDBStore::test_structured_with_object FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_0len_dim_1d FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_0len_dim_2d FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_append_1d FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_append_2d FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_append_2d_axis FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_append_bad_shape FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_array_0d FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_array_1d FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_array_1d_fill_value FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_array_1d_selections FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_array_1d_set_scalar FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_array_2d FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_array_2d_edge_case FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_array_2d_partial FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_array_dtype_shape FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_array_order FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_attributes FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_compressors FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_dtypes FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_endian FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_hexdigest FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_islice FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_iter - ... FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_iteration_exceptions FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_nchunks_initialized FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_np_ufuncs FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_object_arrays FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_object_arrays_danger FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_object_arrays_vlen_array FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_object_arrays_vlen_bytes FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_object_arrays_vlen_text FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_object_codec_warnings FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_pickle FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_read_only FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_resize_1d FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_resize_2d FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_setitem_data_not_shared FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_store_has_binary_values FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_store_has_bytes_values FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_store_has_text_keys FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_structured_array FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_structured_array_contain_object FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_structured_array_nested FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_structured_array_subshapes FAILED zarr/tests/test_core.py::TestArrayWithLMDBStoreNoBuffers::test_structured_with_object FAILED zarr/tests/test_hierarchy.py::TestGroupWithLMDBStore::test_array_creation FAILED zarr/tests/test_hierarchy.py::TestGroupWithLMDBStore::test_context_manager FAILED zarr/tests/test_hierarchy.py::TestGroupWithLMDBStore::test_create_dataset FAILED zarr/tests/test_hierarchy.py::TestGroupWithLMDBStore::test_create_errors FAILED zarr/tests/test_hierarchy.py::TestGroupWithLMDBStore::test_create_group FAILED zarr/tests/test_hierarchy.py::TestGroupWithLMDBStore::test_create_overwrite FAILED zarr/tests/test_hierarchy.py::TestGroupWithLMDBStore::test_delitem - l... FAILED zarr/tests/test_hierarchy.py::TestGroupWithLMDBStore::test_double_counting_group_v3 FAILED zarr/tests/test_hierarchy.py::TestGroupWithLMDBStore::test_empty_getitem_contains_iterators FAILED zarr/tests/test_hierarchy.py::TestGroupWithLMDBStore::test_getattr - l... FAILED zarr/tests/test_hierarchy.py::TestGroupWithLMDBStore::test_getitem_contains_iterators FAILED zarr/tests/test_hierarchy.py::TestGroupWithLMDBStore::test_group_init_1 FAILED zarr/tests/test_hierarchy.py::TestGroupWithLMDBStore::test_group_init_2 FAILED zarr/tests/test_hierarchy.py::TestGroupWithLMDBStore::test_group_init_errors_1 FAILED zarr/tests/test_hierarchy.py::TestGroupWithLMDBStore::test_group_init_errors_2 FAILED zarr/tests/test_hierarchy.py::TestGroupWithLMDBStore::test_group_repr FAILED zarr/tests/test_hierarchy.py::TestGroupWithLMDBStore::test_iterators_recurse FAILED zarr/tests/test_hierarchy.py::TestGroupWithLMDBStore::test_move - lmdb... FAILED zarr/tests/test_hierarchy.py::TestGroupWithLMDBStore::test_paths - lmd... FAILED zarr/tests/test_hierarchy.py::TestGroupWithLMDBStore::test_pickle - lm... FAILED zarr/tests/test_hierarchy.py::TestGroupWithLMDBStore::test_require_dataset FAILED zarr/tests/test_hierarchy.py::TestGroupWithLMDBStore::test_require_group FAILED zarr/tests/test_hierarchy.py::TestGroupWithLMDBStore::test_rmdir_group_and_array_metadata_files FAILED zarr/tests/test_hierarchy.py::TestGroupWithLMDBStore::test_setitem - l... FAILED zarr/tests/test_storage.py::TestLMDBStore::test_get_set_del_contains FAILED zarr/tests/test_storage.py::TestLMDBStore::test_set_invalid_content - ... FAILED zarr/tests/test_storage.py::TestLMDBStore::test_clear - lmdb.MemoryErr... FAILED zarr/tests/test_storage.py::TestLMDBStore::test_pop - lmdb.MemoryError... FAILED zarr/tests/test_storage.py::TestLMDBStore::test_popitem - lmdb.MemoryE... FAILED zarr/tests/test_storage.py::TestLMDBStore::test_writeable_values - lmd... FAILED zarr/tests/test_storage.py::TestLMDBStore::test_update - lmdb.MemoryEr... FAILED zarr/tests/test_storage.py::TestLMDBStore::test_iterators - lmdb.Memor... FAILED zarr/tests/test_storage.py::TestLMDBStore::test_pickle - lmdb.MemoryEr... FAILED zarr/tests/test_storage.py::TestLMDBStore::test_getsize - lmdb.MemoryE... FAILED zarr/tests/test_storage.py::TestLMDBStore::test_hierarchy - lmdb.Memor... FAILED zarr/tests/test_storage.py::TestLMDBStore::test_init_array[dimension_separator_fixture0] FAILED zarr/tests/test_storage.py::TestLMDBStore::test_init_array[dimension_separator_fixture1] FAILED zarr/tests/test_storage.py::TestLMDBStore::test_init_array[dimension_separator_fixture2] FAILED zarr/tests/test_storage.py::TestLMDBStore::test_init_array_overwrite FAILED zarr/tests/test_storage.py::TestLMDBStore::test_init_array_overwrite_path FAILED zarr/tests/test_storage.py::TestLMDBStore::test_init_array_overwrite_chunk_store FAILED zarr/tests/test_storage.py::TestLMDBStore::test_init_group_overwrite FAILED zarr/tests/test_storage.py::TestLMDBStore::test_init_group_overwrite_path FAILED zarr/tests/test_storage.py::TestLMDBStore::test_init_group_overwrite_chunk_store FAILED zarr/tests/test_storage.py::TestLMDBStore::test_init_array_path - lmdb... FAILED zarr/tests/test_storage.py::TestLMDBStore::test_init_array_overwrite_group FAILED zarr/tests/test_storage.py::TestLMDBStore::test_init_array_compat - lm... FAILED zarr/tests/test_storage.py::TestLMDBStore::test_init_group - lmdb.Memo... FAILED zarr/tests/test_storage.py::TestLMDBStore::test_context_manager - lmdb... = 138 failed, 2627 passed, 2341 skipped, 8 xfailed, 1 warning in 2182.51s (0:36:22) = error: Bad exit status from /var/tmp/rpm-tmp.CV7YdI (%check) RPM build errors: Bad exit status from /var/tmp/rpm-tmp.CV7YdI (%check) Child return code was: 1 EXCEPTION: [Error()] Traceback (most recent call last): File "/usr/lib/python3.10/site-packages/mockbuild/trace_decorator.py", line 93, in trace result = func(*args, **kw) File "/usr/lib/python3.10/site-packages/mockbuild/util.py", line 598, in do_with_status raise exception.Error("Command failed: \n # %s\n%s" % (command, output), child.returncode) mockbuild.exception.Error: Command failed: # bash --login -c /usr/bin/rpmbuild -ba --noprep --target noarch --nodeps /builddir/build/SPECS/python-zarr.spec