From 6b2d0481fe17d93bb292286dfe060fedbe234f49 Mon Sep 17 00:00:00 2001 From: GbotemiB Date: Mon, 10 Oct 2022 20:53:41 +0100 Subject: [PATCH 01/11] tutorial file in upynb format --- docs/conf.py | 8 +- docs/data/example.n5/attributes.json | 3 + docs/data/example.sqldb | Bin 0 -> 40960 bytes docs/data/example.zarr/.zgroup | 3 + docs/data/example.zip | Bin 0 -> 34105 bytes docs/data/group.zarr/.zgroup | 3 + docs/data/group.zarr/foo/.zgroup | 3 + docs/data/group.zarr/foo/bar/.zgroup | 3 + docs/data/group.zarr/foo/bar/baz/.zarray | 22 + docs/index.rst | 1 + docs/tutorial_nb.ipynb | 3379 ++++++++++++++++++++++ 11 files changed, 3423 insertions(+), 2 deletions(-) create mode 100644 docs/data/example.n5/attributes.json create mode 100644 docs/data/example.sqldb create mode 100644 docs/data/example.zarr/.zgroup create mode 100644 docs/data/example.zip create mode 100644 docs/data/group.zarr/.zgroup create mode 100644 docs/data/group.zarr/foo/.zgroup create mode 100644 docs/data/group.zarr/foo/bar/.zgroup create mode 100644 docs/data/group.zarr/foo/bar/baz/.zarray create mode 100644 docs/tutorial_nb.ipynb diff --git a/docs/conf.py b/docs/conf.py index 733ac60801..2a47bf2d04 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -38,11 +38,12 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ + 'nbsphinx', 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.intersphinx', - 'numpydoc', + #'numpydoc', 'sphinx_issues', "sphinx_copybutton", ] @@ -51,6 +52,9 @@ numpydoc_class_members_toctree = False issues_github_path = 'zarr-developers/zarr-python' +#Handling errors with rendering notebook +nbsphinx_allow_errors = True + # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -79,7 +83,7 @@ # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = None +language = 'en' # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: diff --git a/docs/data/example.n5/attributes.json b/docs/data/example.n5/attributes.json new file mode 100644 index 0000000000..a659b3e01f --- /dev/null +++ b/docs/data/example.n5/attributes.json @@ -0,0 +1,3 @@ +{ + "n5": "2.0.0" +} \ No newline at end of file diff --git a/docs/data/example.sqldb b/docs/data/example.sqldb new file mode 100644 index 0000000000000000000000000000000000000000..f6b120d57b90eb28a1a982e49279b82e1de42500 GIT binary patch literal 40960 zcmeI5%~KO+9LIN)5XingMn$b!OhW6s%kJh~sS=b{1(X23;2SO>AY(KVz-i6&udDva^`A#(7XEzCB z<)OL7YGS6|c;YT5($X$TmZiQ#LXxC8zAAj(Uc&rNWE=ROa^U{k?#@Z(t*biwO$xD> z5_`$Mf7^y)TqGb85DAC`L;@lKk$^}*Bp?zH35Wzl0#p(x$w(&K}e)R7u?05Dn+hniU&u<3&L9vJg zL;@lKk$^}*Bp?zH35Wzl0wMvCfJi_j@SY?PyT>1B$1uNXKhJNXJ^ZFE!EYjQ{{sJP zY*}Kzu%FmETVtzig)K9W%`=ykS&7*!$>K~lHjNEq-B>eLjTK|r@QiuGHOfZGu#Kb< zH)MTN-_Y0fHGNfI(U)~kpVwWzte13KPwH`9);6^bZCzW_R<#vvS@X1c&DF|UNwc-2 z7T08TQ{7P4)irfhT~U`+Pn}m?wXBv@TTQBQRaQ2Y4P{+fQ&yD~Wm)l*dBs)AN=dPm zq!L%;*k)`awjNuHt;SZkj#xwjA_0+rNI)bY5)cWzKMAmKw>(p?_e{Bso}yWtMJ~)B z=c~xMY2<7LIWvWvc9ETj$o3?%^&rRvvv?o5a1S|u7ddwaIXi)zxs9B@h3wo!wr?O? z<3Y}w#WCcIvwM&;yOGmz zWM>z$&5$i4$d*~ukqa7fUPaC+$k`ZjrUN;X|>l`#OJRj7wFC{lLC6uIPi>uzFo7$HqEtwBL$OwB3!|4^M_%xxzR3 z_lwtzN4meB-hNrL&2&gkM*6;$B!7Ly54@MJ4quO)l%?12wsei0^q1CN{Idw}w$tmb zk(S1PxtG`6h4v0DdxrKlEqep)En4<^(4PNyNy}bC`vNU{747r1>=m@n(Xx-BeU_Ge z2ij+7*|(!Tv91?IdtzO$4eg0_y$ITq>Uv*23P(#=;f^;o7Czj)Rc#_O3UktG-0);@01lT6pBPjt5Z$WOy(ZgwcA} z{+kcz^8-1`mq$b4?p{CWtvUQw$bZ`|C5I37{=+u@{GZ8P5(^m-J*su6ilW7gj$Q4$ zqkG%-MGk}yh7R)|6J5M!BKS|*F^N_9BWO>o!grxPu?qho+7qkrhtZx`g+GM$#47wj zv?o^KKR|n875)I)ldAAubAQmDUwxx>;$k1#6YF}NXiu!`?L~WHU2hNC6YF}r(VkS- z6Z8L_PHlM2p5SvML+yt_Qaa-sh~Z7A0MqId=l-6HQ^IzPpkUX|njfP*v9q2Zp*^v#*NyhXx?T$HiFLhX(4K!Lf>PHL^Z%Xa z|L23xjV!h6|DT{eu_oM$_Qaa-IkYF%gwLWqu_jzZdtyzvfcC_ia31Z6HQ^lE6KlfR zpgsR|HKiu(H8W^W?5t-R?TK|g2knVw0}?Pps=*2-@@Syrk6i#QcBf`TxD(bE81*`u|yN33px}HD(SKIcz<@^73nj4Fsi3CIfB7y&70$y`A_?p0f4;QT_ zJcIVcns62Ei8bMAv?tbtD`-!w2~VLtsV3|-U9=~5*7G6S6FciUiT1?KdOkpVVrM;cK>#_v3&U^9VGtH3g z!0(%xXP(*oTH5)tL#=I_L*5b|IXt}m!l_Hwo7diYxmhENla+F9_H;+LS)9dOr7~75 zSEh4w&c5z=$C>P!@RbHGmdm}W0iIfTVPH5M3VG|*jRD4U6*Gw0ac-e;6#1#z%*m=b zfaCR}@{TMPjqb%y>aRzaH#zz8^lYV2t(Gf>SstT)HsfwNf3jLyESSxJJ9)P-UvSOF zls~4EpUF*o|Il$4lMZYzO?Vs4P}Mwcu1?j8Mb`sj^= zWAiz;1_$D8oGVnkvz)29Zi6mYCJNq3ICnMn6j0ViXEiLMCaUoK(o+;^{E zG|78w^AdVjAR39bgm<(ZI3EgmtM}JlY2s$>>IsJ~1zS3^J>F8MLJG?4rLnL7h+5g} zwL^(96uT*LhGK`3U?_GdNrqyFl42-!D18jY4kgV{>`*cc#SW#Pp{x!iMx(VgKNg|U zT3`FbA~aed6dJ7%3XN6>g+?ocLZcNzq0tJV&}fBFXtY)+aT={{v~e1(6bg-2xP54} zLMSv^Aru;|5DJY}2!%!~ghHbgLZQ)Gp(JRuw!9^1v{EQETH*Gg(F&o^XoXN{v_dE} zS|Jn~tq=;0RtSYgYlV`e(c1Esq|r*D&}fC*hej)eLZcNzq0tJV&}fBFXtY8oG+H4P z8m$#dibiY8TZ%?2g+ikhZXX)05DJY}2!%!~ghHbgLZQ(Lq0nfBP-wJPD19_qTi*I; zv{EQETH*Gg(F&o^XoXN{v_dE}S|Jn~tq=;0RtSYgYlV`g(c1EsrqN2F&}fC*hej)e zLZcNzq0tJV&}fBFXtY8oG+H4P8m$#dhDK}4TZTp}g+ikhZXX)05DJY}2!%!~ghHbg zLZQ(Lq0nfBP-wJPDE%~ATi*I}f+V9vs}?j$k4_Sk{oNsm`sA$fww;-r!&bl5170v0~ zgP@{0iM2M5WG3P{Rk?Wb8sJmisp&DnZ4 zf{Nzsxd%Z-a}L~#prSd$hY(b}=bX3?K}B<(K8&EEIj;;MsA$exM-Wsr=fk5uXnu_$ zcyqqIA3;TPetG~wMRWejBB*H2riTzzG-vn22r8O0cnm>Ba~>Q\n", + "##Storage Alternatives\n", + "\n", + "Zarr can use any object that implements the ```MutableMapping``` interface from the [```collections```](https://docs.python.org/3/library/collections.html#module-collections) module in the Python standard library as the store for a group or an array.\n", + "\n", + "Some pre-defined storage classes are provided in the [```zarr.storage```](https://zarr.readthedocs.io/en/stable/api/storage.html#module-zarr.storage) module. For example, the [```zarr.storage.DirectoryStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.DirectoryStore) class provides a ```MutableMapping``` interface to a directory on the local file system. This is used under the hood by the [```zarr.convenience.open()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.open) function. In other words, the following code:\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "IN2y5o6xQRZR" + }, + "outputs": [], + "source": [ + "z = zarr.open('data/example.zarr', mode='w', shape=1000000, dtype='i4')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "RX7a25OASMFA" + }, + "source": [ + "...is short-hand for:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Tb-geDRdSK8d" + }, + "outputs": [], + "source": [ + "store = zarr.DirectoryStore('data/example.zarr')\n", + "z = zarr.create(store=store, overwrite=True, shape=1000000, dtype='i4')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "TGRwYVzTSVSD" + }, + "source": [ + "...and the following code:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Y8I-6LaoSSzM" + }, + "outputs": [], + "source": [ + "root = zarr.open('data/example.zarr', mode='w')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "BIhLQVErSa93" + }, + "source": [ + "…is short-hand for:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "NzoiM3J7SZ_j" + }, + "outputs": [], + "source": [ + "store = zarr.DirectoryStore('data/example.zarr')\n", + "root = zarr.group(store=store, overwrite=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "1GYws83JShUU" + }, + "source": [ + "Any other compatible storage class could be used in place of [```zarr.storage.DirectoryStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.DirectoryStore) in the code examples above. For example, here is an array stored directly into a Zip file, via the [```zarr.storage.ZipStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.ZipStore) class:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ciRq3JCVSftZ" + }, + "outputs": [], + "source": [ + "store = zarr.ZipStore('data/example.zip', mode='w')\n", + "root = zarr.group(store=store)\n", + "z = root.zeros('foo/bar', shape=(1000, 1000), chunks=(100, 100), dtype='i4')\n", + "z[:] = 42\n", + "store.close()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Kkku2WrxSynN" + }, + "source": [ + "Re-open and check that data have been written:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "eWBjXkPPSv92" + }, + "outputs": [], + "source": [ + "store = zarr.ZipStore('data/example.zip', mode='r')\n", + "root = zarr.group(store=store)\n", + "z = root['foo/bar']\n", + "z[:]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "G_ondBMeS4DG" + }, + "outputs": [], + "source": [ + "store.close()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0cQQMoFZS-dR" + }, + "source": [ + "Note that there are some limitations on how Zip files can be used, because items within a Zip file cannot be updated in place. This means that data in the array should only be written once and write operations should be aligned with chunk boundaries. Note also that the ```close()``` method must be called after writing any data to the store, otherwise essential records will not be written to the underlying zip file.\n", + "\n", + "Another storage alternative is the [```zarr.storage.DBMStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.DBMStore) class, added in Zarr version 2.2. This class allows any DBM-style database to be used for storing an array or group. Here is an example using a Berkeley DB B-tree database for storage (requires [bsddb3](https://www.jcea.es/programacion/pybsddb.htm) to be installed):\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "GvwII6XoT_eZ" + }, + "source": [ + "```\n", + "import bsddb3\n", + "store = zarr.DBMStore('data/example.bdb', open=bsddb3.btopen)\n", + "root = zarr.group(store=store, overwrite=True)\n", + "z = root.zeros('foo/bar', shape=(1000, 1000), chunks=(100, 100), dtype='i4')\n", + "z[:] = 42\n", + "store.close()\n", + "```\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "LYx7PDVzUNy8" + }, + "source": [ + "Also added in Zarr version 2.2 is the [```zarr.storage.LMDBStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.LMDBStore) class which enables the lightning memory-mapped database (LMDB) to be used for storing an array or group (requires lmdb to be installed):\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Q7StQhu8bisP" + }, + "outputs": [], + "source": [ + "store = zarr.LMDBStore('data/example.lmdb')\n", + "root = zarr.group(store=store, overwrite=True)\n", + "z = root.zeros('foo/bar', shape=(1000, 1000), chunks=(100, 100), dtype='i4')\n", + "z[:] = 42\n", + "store.close()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "EWAldISLZGxT" + }, + "source": [ + "In Zarr version 2.3 is the [```zarr.storage.SQLiteStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.SQLiteStore) class which enables the SQLite database to be used for storing an array or group (requires Python is built with SQLite support):\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "0BXoPwjLbVXJ" + }, + "outputs": [], + "source": [ + "store = zarr.SQLiteStore('data/example.sqldb')\n", + "root = zarr.group(store=store, overwrite=True)\n", + "z = root.zeros('foo/bar', shape=(1000, 1000), chunks=(100, 100), dtype='i4')\n", + "z[:] = 42\n", + "store.close()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "miduiodpZvl6" + }, + "source": [ + "Also added in Zarr version 2.3 are two storage classes for interfacing with server-client databases. The [```zarr.storage.RedisStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.RedisStore) class interfaces [Redis](https://redis.io/) (an in memory data structure store), and the ```zarr.storage.MongoDB``` class interfaces with [MongoDB](https://www.mongodb.com/) (an object oriented NoSQL database). These stores respectively require the [redis-py](https://redis-py.readthedocs.io/en/stable/) and [pymongo](https://api.mongodb.com/python/current/) packages to be installed.\n", + "\n", + "For compatibility with the [N5](https://github.com/saalfeldlab/n5) data format, Zarr also provides an N5 backend (this is currently an experimental feature). Similar to the zip storage class, an [```zarr.n5.N5Store```](https://zarr.readthedocs.io/en/stable/api/n5.html#zarr.n5.N5Store) can be instantiated directly:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "uieVvPVXbZ6N" + }, + "outputs": [], + "source": [ + "store = zarr.N5Store('data/example.n5')\n", + "root = zarr.group(store=store)\n", + "z = root.zeros('foo/bar', shape=(1000, 1000), chunks=(100, 100), dtype='i4')\n", + "z[:] = 42" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "GUwszCEXbAci" + }, + "source": [ + "For convenience, the N5 backend will automatically be chosen when the filename ends with *.n5*:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "PTRe2x-EZuLT" + }, + "outputs": [], + "source": [ + "root = zarr.open('data/example.n5', mode='w')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "o9nezRjYbvo6" + }, + "source": [ + "###Distributed/Cloud Storage" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Ue1DwmYfUTo1" + }, + "source": [ + "It is also possible to use distributed storage systems. The Dask project has implementations of the `MutableMapping` interface for Amazon S3 ([S3Map](https://s3fs.readthedocs.io/en/latest/api.html#s3fs.mapping.S3Map)), Hadoop Distributed File System ([HDFSMap](https://hdfs3.readthedocs.io/en/latest/api.html#hdfs3.mapping.HDFSMap)) and Google Cloud Storage ([GCSMap](https://gcsfs.readthedocs.io/en/latest/api.html#gcsfs.mapping.GCSMap)), which can be used with Zarr.\n", + "\n", + "Here is an example using S3Map to read an array created previously:" + ] + }, + { + "cell_type": "code", + "source": [ + "pip install s3fs" + ], + "metadata": { + "id": "KxMAnxN85U7-" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "import s3fs\n", + "import zarr\n", + "s3 = s3fs.S3FileSystem(anon=True, client_kwargs=dict(region_name='eu-west-2'))\n", + "store = s3fs.S3Map(root='zarr-demo/store', s3=s3, check=False)\n", + "root = zarr.group(store=store)\n", + "z = root['foo/bar/baz']\n", + "z" + ], + "metadata": { + "id": "g72KoI8rLKur" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "z.info" + ], + "metadata": { + "id": "9Ky3c5rPLgcJ" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "z[:]" + ], + "metadata": { + "id": "rbYmDc7OLmQt" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "z[:].tobytes()" + ], + "metadata": { + "id": "hCK8I6yALyWw" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "Zarr now also has a builtin storage backend for Azure Blob Storage. The class is [```zarr.storage.ABSStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.ABSStore) (requires [azure-storage-blob](https://docs.microsoft.com/en-us/azure/storage/blobs/storage-quickstart-blobs-python?tabs=environment-variable-windows) to be installed):" + ], + "metadata": { + "id": "A6dbWR4WL4tX" + } + }, + { + "cell_type": "markdown", + "source": [ + "\n", + "```\n", + "import azure.storage.blob\n", + "container_client = azure.storage.blob.ContainerClient(...) \n", + "store = zarr.ABSStore(client=container_client, prefix='zarr-testing') \n", + "root = zarr.group(store=store, overwrite=True) \n", + "z = root.zeros('foo/bar', shape=(1000, 1000), chunks=(100, 100), dtype='i4') \n", + "z[:] = 42 \n", + "```" + ], + "metadata": { + "id": "LhLw9qqSNaI1" + } + }, + { + "cell_type": "markdown", + "source": [ + "When using an actual storage account, provide ```account_name``` and ```account_key``` arguments to [```zarr.storage.ABSStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.ABSStore), the above client is just testing against the emulator. Please also note that this is an experimental feature.\n", + "\n", + "Note that retrieving data from a remote service via the network can be significantly slower than retrieving data from a local file system, and will depend on network latency and bandwidth between the client and server systems. If you are experiencing poor performance, there are several things you can try. One option is to increase the array chunk size, which will reduce the number of chunks and thus reduce the number of network round-trips required to retrieve data for an array (and thus reduce the impact of network latency). Another option is to try to increase the compression ratio by changing compression options or trying a different compressor (which will reduce the impact of limited network bandwidth).\n", + "\n", + "As of version 2.2, Zarr also provides the [```zarr.storage.LRUStoreCache```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.LRUStoreCache) which can be used to implement a local in-memory cache layer over a remote store. E.g.:" + ], + "metadata": { + "id": "U-WqeNmbNjup" + } + }, + { + "cell_type": "code", + "source": [ + "s3 = s3fs.S3FileSystem(anon=True, client_kwargs=dict(region_name='eu-west-2'))\n", + "store = s3fs.S3Map(root='zarr-demo/store', s3=s3, check=False)\n", + "cache = zarr.LRUStoreCache(store, max_size=2**28)\n", + "root = zarr.group(store=cache)\n", + "z = root['foo/bar/baz']\n", + "\n", + "from timeit import timeit\n", + "# First data access is relatively slow, retrieved from store\n", + "timeit('print(z[:].tobytes())', number=1, globals=globals())" + ], + "metadata": { + "id": "nXhc_UdsNPOn" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# Second data access is faster, uses cache\n", + "timeit('print(z[:].tobytes())', number=1, globals=globals())" + ], + "metadata": { + "id": "SDLYWK0sOUbW" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "If you are still experiencing poor performance with distributed/cloud storage, please raise an issue on the GitHub issue tracker with any profiling data you can provide, as there may be opportunities to optimise further either within Zarr or within the mapping interface to the storage." + ], + "metadata": { + "id": "boHOqg5LO4Y4" + } + }, + { + "cell_type": "markdown", + "source": [ + "###IO with ```fsspec```\n", + "\n", + "As of version 2.5, zarr supports passing URLs directly to [fsspec](https://filesystem-spec.readthedocs.io/en/latest/), and having it create the “mapping” instance automatically. This means, that for all of the backend storage implementations [supported by fsspec](https://filesystem-spec.readthedocs.io/en/latest/api.html#built-in-implementations), you can skip importing and configuring the storage explicitly. For example:\n", + "\n" + ], + "metadata": { + "id": "vhkal_STPQrH" + } + }, + { + "cell_type": "code", + "source": [ + "g = zarr.open_group(\"s3://zarr-demo/store\", storage_options={'anon': True}) \n", + "g['foo/bar/baz'][:].tobytes() " + ], + "metadata": { + "id": "tDKp3B4FO1it" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "The provision of the protocol specifier “s3://” will select the correct backend. Notice the kwargs ```storage_options```, used to pass parameters to that backend.\n", + "\n", + "As of version 2.6, write mode and complex URLs are also supported, such as" + ], + "metadata": { + "id": "CmuZHo8PRl5D" + } + }, + { + "cell_type": "code", + "source": [ + "g = zarr.open_group(\"simplecache::s3://zarr-demo/store\",\n", + " storage_options={\"s3\": {'anon': True}}) " + ], + "metadata": { + "id": "1r9raC-kRAU8" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# Downloads Target File \n", + "g['foo/bar/baz'][:].tobytes()" + ], + "metadata": { + "id": "TuBM3BotR55K" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# Uses Cached File \n", + "g['foo/bar/baz'][:].tobytes()" + ], + "metadata": { + "id": "_ppDor9SSCsi" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "The second invocation here will be much faster. Note that the ```storage_options``` have become more complex here, to account for the two parts of the supplied URL." + ], + "metadata": { + "id": "AFG4kwY1SOv8" + } + }, + { + "cell_type": "markdown", + "source": [ + "###Consolidating Metadata\n", + "\n", + "Since there is a significant overhead for every connection to a cloud object store such as S3, the pattern described in the previous section may incur significant latency while scanning the metadata of the array hierarchy, even though each individual metadata object is small. For cases such as these, once the data are static and can be regarded as read-only, at least for the metadata/structure of the array hierarchy, the many metadata objects can be consolidated into a single one via [```zarr.convenience.consolidate_metadata()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.consolidate_metadata). Doing this can greatly increase the speed of reading the array metadata, e.g.:" + ], + "metadata": { + "id": "-HpqJgMyVoBR" + } + }, + { + "cell_type": "code", + "source": [ + "zarr.consolidate_metadata(store)" + ], + "metadata": { + "id": "vdfg41lgSLyA" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "This creates a special key with a copy of all of the metadata from all of the metadata objects in the store.\n", + "\n", + "Later, to open a Zarr store with consolidated metadata, use [```zarr.convenience.open_consolidated()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.open_consolidated), e.g.:" + ], + "metadata": { + "id": "CT6QiZsJWYHO" + } + }, + { + "cell_type": "code", + "source": [ + "root = zarr.open_consolidated(store)" + ], + "metadata": { + "id": "hGCR1WwnWIbf" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "This uses the special key to read all of the metadata in a single call to the backend storage.\n", + "\n", + "Note that, the hierarchy could still be opened in the normal way and altered, causing the consolidated metadata to become out of sync with the real state of the array hierarchy. In this case, [```zarr.convenience.consolidate_metadata()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.consolidate_metadata) would need to be called again.\n", + "\n", + "To protect against consolidated metadata accidentally getting out of sync, the root group returned by [```zarr.convenience.open_consolidated()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.open_consolidated) is read-only for the metadata, meaning that no new groups or arrays can be created, and arrays cannot be resized. However, data values with arrays can still be updated." + ], + "metadata": { + "id": "3f4QOII6XCyB" + } + }, + { + "cell_type": "markdown", + "source": [ + "##Copying/Migrating Data\n", + "\n", + "If you have some data in an HDF5 file and would like to copy some or all of it into a Zarr group, or vice-versa, the [```zarr.convenience.copy()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.copy) and [```zarr.convenience.copy_all()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.copy_all) functions can be used. Here’s an example copying a group named ‘foo’ from an HDF5 file to a Zarr group:" + ], + "metadata": { + "id": "2kR2ue-MXVxx" + } + }, + { + "cell_type": "markdown", + "source": [ + "Input\n", + "```\n", + "import h5py\n", + "import zarr\n", + "import numpy as np\n", + "source = h5py.File('data/example.h5', mode='w')\n", + "foo = source.create_group('foo')\n", + "baz = foo.create_dataset('bar/baz', data=np.arange(100), chunks=(50,))\n", + "spam = source.create_dataset('spam', data=np.arange(100, 200), chunks=(30,))\n", + "zarr.tree(source)\n", + "```\n", + "\n", + "Output\n", + "```\n", + "/\n", + " ├── foo\n", + " │ └── bar\n", + " │ └── baz (100,) int64\n", + " └── spam (100,) int64\n", + "```" + ], + "metadata": { + "id": "CcnmamX_Z1qx" + } + }, + { + "cell_type": "markdown", + "source": [ + "Input\n", + "```\n", + "est = zarr.open_group('data/example.zarr', mode='w')\n", + "from sys import stdout\n", + "zarr.copy(source['foo'], dest, log=stdout)\n", + "```\n", + "\n", + "Output\n", + "```\n", + "copy /foo\n", + "copy /foo/bar\n", + "copy /foo/bar/baz (100,) int64\n", + "all done: 3 copied, 0 skipped, 800 bytes copied\n", + "(3, 0, 800)\n", + "```" + ], + "metadata": { + "id": "T-IFelMyaObO" + } + }, + { + "cell_type": "markdown", + "source": [ + "Input\n", + "```\n", + "# N.B., no spam\n", + "dest.tree()\n", + "```\n", + "\n", + "Output\n", + "```\n", + "/\n", + " └── foo\n", + " └── bar\n", + " └── baz (100,) int64\n", + "```" + ], + "metadata": { + "id": "w9tc4osOa1h3" + } + }, + { + "cell_type": "markdown", + "source": [ + "Input\n", + "```\n", + "source.close()\n", + "```\n", + "\n" + ], + "metadata": { + "id": "GBESHf1QbGlD" + } + }, + { + "cell_type": "markdown", + "source": [ + "If rather than copying a single group or array you would like to copy all groups and arrays, use [```zarr.convenience.copy_all()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.copy_all), e.g.:" + ], + "metadata": { + "id": "ttbwjeTbbW58" + } + }, + { + "cell_type": "markdown", + "source": [ + "Input\n", + "```\n", + "source = h5py.File('data/example.h5', mode='r')\n", + "dest = zarr.open_group('data/example2.zarr', mode='w')\n", + "zarr.copy_all(source, dest, log=stdout)\n", + "```\n", + "\n", + "Output\n", + "```\n", + "copy /foo\n", + "copy /foo/bar\n", + "copy /foo/bar/baz (100,) int64\n", + "copy /spam (100,) int64\n", + "all done: 4 copied, 0 skipped, 1,600 bytes copied\n", + "(4, 0, 1600)\n", + "```" + ], + "metadata": { + "id": "f9ZEn7ZBcH9N" + } + }, + { + "cell_type": "markdown", + "source": [ + "Input\n", + "```\n", + "dest.tree()\n", + "```\n", + "\n", + "Output\n", + "```\n", + "/\n", + " ├── foo\n", + " │ └── bar\n", + " │ └── baz (100,) int64\n", + " └── spam (100,) int64\n", + "```" + ], + "metadata": { + "id": "72RBjYK0cku8" + } + }, + { + "cell_type": "markdown", + "source": [ + "If you need to copy data between two Zarr groups, the [```zarr.convenience.copy()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.copy) and [```zarr.convenience.copy_all()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.copy_all) functions can be used and provide the most flexibility. However, if you want to copy data in the most efficient way possible, without changing any configuration options, the [```zarr.convenience.copy_store()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.copy_store) function can be used. This function copies data directly between the underlying stores, without any decompression or re-compression, and so should be faster. E.g.:" + ], + "metadata": { + "id": "J61LvFlNcz1O" + } + }, + { + "cell_type": "markdown", + "source": [ + "Input\n", + "```\n", + "import zarr\n", + "import numpy as np\n", + "store1 = zarr.DirectoryStore('data/example.zarr')\n", + "root = zarr.group(store1, overwrite=True)\n", + "baz = root.create_dataset('foo/bar/baz', data=np.arange(100), chunks=(50,))\n", + "spam = root.create_dataset('spam', data=np.arange(100, 200), chunks=(30,))\n", + "root.tree()\n", + "```\n", + "\n", + "Output\n", + "```\n", + "/\n", + " ├── foo\n", + " │ └── bar\n", + " │ └── baz (100,) int64\n", + " └── spam (100,) int64\n", + "```" + ], + "metadata": { + "id": "VRQ0Pa4jfIG8" + } + }, + { + "cell_type": "markdown", + "source": [ + "Input\n", + "```\n", + "from sys import stdout\n", + "store2 = zarr.ZipStore('data/example.zip', mode='w')\n", + "zarr.copy_store(store1, store2, log=stdout)\n", + "```\n", + "\n", + "Output\n", + "```\n", + "copy .zgroup\n", + "copy foo/.zgroup\n", + "copy foo/bar/.zgroup\n", + "copy foo/bar/baz/.zarray\n", + "copy foo/bar/baz/0\n", + "copy foo/bar/baz/1\n", + "copy spam/.zarray\n", + "copy spam/0\n", + "copy spam/1\n", + "copy spam/2\n", + "copy spam/3\n", + "all done: 11 copied, 0 skipped, 1,138 bytes copied\n", + "(11, 0, 1138)\n", + "```\n" + ], + "metadata": { + "id": "sAYruuJcfgMU" + } + }, + { + "cell_type": "markdown", + "source": [ + "Input\n", + "```\n", + "new_root = zarr.group(store2)\n", + "new_root.tree()\n", + "```\n", + "\n", + "Output\n", + "```\n", + "/\n", + " ├── foo\n", + " │ └── bar\n", + " │ └── baz (100,) int64\n", + " └── spam (100,) int64\n", + "```\n" + ], + "metadata": { + "id": "iigpR9lwf1o7" + } + }, + { + "cell_type": "markdown", + "source": [ + "Input\n", + "```\n", + "new_root['foo/bar/baz'][:]\n", + "```\n", + "\n", + "Output\n", + "```\n", + "array([ 0, 1, 2, ..., 97, 98, 99])\n", + "```\n" + ], + "metadata": { + "id": "jHHUevzjf1w3" + } + }, + { + "cell_type": "markdown", + "source": [ + "Input\n", + "```\n", + "# zip stores need to be closed\n", + "store2.close()\n", + "```" + ], + "metadata": { + "id": "giKK0ZqKgk2q" + } + }, + { + "cell_type": "markdown", + "source": [ + "##String Arrays\n", + "\n", + "There are several options for storing arrays of strings.\n", + "\n", + "If your strings are all ASCII strings, and you know the maximum length of the string in your array, then you can use an array with a fixed-length bytes dtype. E.g.:" + ], + "metadata": { + "id": "hZ344AKaHgmM" + } + }, + { + "cell_type": "code", + "source": [ + "z = zarr.zeros(10, dtype='S6')\n", + "z" + ], + "metadata": { + "id": "-GVHB_BMHmfm" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "z[0] = b'Hello'\n", + "z[1] = b'world!'\n", + "z[:]" + ], + "metadata": { + "id": "yM-UTditHqFO" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "A fixed-length unicode dtype is also available, e.g.:" + ], + "metadata": { + "id": "1QS15HvmIY6d" + } + }, + { + "cell_type": "code", + "source": [ + "greetings = ['¡Hola mundo!', 'Hej Världen!', 'Servus Woid!', 'Hei maailma!',\n", + " 'Xin chào thế giới', 'Njatjeta Botë!', 'Γεια σου κόσμε!',\n", + " 'こんにちは世界', '世界,你好!', 'Helló, világ!', 'Zdravo svete!',\n", + " 'เฮลโลเวิลด์']\n", + "text_data = greetings * 10000\n", + "z = zarr.array(text_data, dtype='U20')\n", + "z" + ], + "metadata": { + "id": "K7zehuUFISkl" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "z[:]" + ], + "metadata": { + "id": "KDQOeo5KIpjH" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "For variable-length strings, the ```object``` dtype can be used, but a codec must be provided to encode the data (see also [Object Arrays](#object-arrays) below). At the time of writing there are four codecs available that can encode variable length string objects: ```numcodecs.VLenUTF8```, ```numcodecs.JSON```, ```numcodecs.MsgPack```. and ```numcodecs.Pickle```. E.g. using ```VLenUTF8```:" + ], + "metadata": { + "id": "E0xKyXTAI5Tc" + } + }, + { + "cell_type": "code", + "source": [ + "import numcodecs\n", + "z = zarr.array(text_data, dtype=object, object_codec=numcodecs.VLenUTF8())\n", + "z" + ], + "metadata": { + "id": "BC8vMTeFIsrh" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "z.filters" + ], + "metadata": { + "id": "XSxrxvkUJ21e" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "z[:]" + ], + "metadata": { + "id": "H-8U-ezSJ6Aj" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "As a convenience, ```dtype=str``` (or ```dtype=unicode``` on Python 2.7) can be used, which is a short-hand for ```dtype=object, object_codec=numcodecs.VLenUTF8()```, e.g.:" + ], + "metadata": { + "id": "KcDs9ELKKBFF" + } + }, + { + "cell_type": "code", + "source": [ + "z = zarr.array(text_data, dtype=str)\n", + "z" + ], + "metadata": { + "id": "kzD30Ju_J8eL" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "z.filters" + ], + "metadata": { + "id": "hJR2-qRCK7m1" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "z[:]" + ], + "metadata": { + "id": "nOX6EvwMK-vX" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "Variable-length byte strings are also supported via ```dtype=object```. Again an ```object_codec``` is required, which can be one of ```numcodecs.VLenBytes``` or ```numcodecs.Pickle```. For convenience, ```dtype=bytes``` (or ```dtype=str``` on Python 2.7) can be used as a short-hand for ```dtype=object, object_codec=numcodecs.VLenBytes()```, e.g.:" + ], + "metadata": { + "id": "i-FWDASWLD1q" + } + }, + { + "cell_type": "code", + "source": [ + "bytes_data = [g.encode('utf-8') for g in greetings] * 10000\n", + "z = zarr.array(bytes_data, dtype=bytes)\n", + "z" + ], + "metadata": { + "id": "9wWNRmTXLBIU" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "z.filters" + ], + "metadata": { + "id": "8me8bHY3L2RJ" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "z[:]" + ], + "metadata": { + "id": "8L65bqgsL3Zf" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "If you know ahead of time all the possible string values that can occur, you could also use the ```numcodecs.Categorize``` codec to encode each unique string value as an integer. E.g.:" + ], + "metadata": { + "id": "0kvGrdrzL9PU" + } + }, + { + "cell_type": "code", + "source": [ + "categorize = numcodecs.Categorize(greetings, dtype=object)\n", + "z = zarr.array(text_data, dtype=object, object_codec=categorize)\n", + "z" + ], + "metadata": { + "id": "85G6VRPhL6iG" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "z.filters" + ], + "metadata": { + "id": "6acl41GvMJI_" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "z[:]" + ], + "metadata": { + "id": "wvD-hB_0MRA_" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "\n", + "##Object Arrays\n", + "\n", + "Zarr supports arrays with an “object” dtype. This allows arrays to contain any type of object, such as variable length unicode strings, or variable length arrays of numbers, or other possibilities. When creating an object array, a codec must be provided via the ```object_codec``` argument. This codec handles encoding (serialization) of Python objects. The best codec to use will depend on what type of objects are present in the array.\n", + "\n", + "At the time of writing there are three codecs available that can serve as a general purpose object codec and support encoding of a mixture of object types: ```numcodecs.JSON```, ```numcodecs.MsgPack```. and ```numcodecs.Pickle```.\n", + "\n", + "For example, using the JSON codec:" + ], + "metadata": { + "id": "7IWmKR-TNwaJ" + } + }, + { + "cell_type": "code", + "source": [ + "z = zarr.empty(5, dtype=object, object_codec=numcodecs.JSON())\n", + "z[0] = 42\n", + "z[1] = 'foo'\n", + "z[2] = ['bar', 'baz', 'qux']\n", + "z[3] = {'a': 1, 'b': 2.2}\n", + "z[:]" + ], + "metadata": { + "id": "MLTurd8sMTJe" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "Not all codecs support encoding of all object types. The numcodecs.Pickle codec is the most flexible, supporting encoding any type of Python object. However, if you are sharing data with anyone other than yourself, then Pickle is not recommended as it is a potential security risk. This is because malicious code can be embedded within pickled data. The JSON and MsgPack codecs do not have any security issues and support encoding of unicode strings, lists and dictionaries. MsgPack is usually faster for both encoding and decoding." + ], + "metadata": { + "id": "3FikhuoHO2CD" + } + }, + { + "cell_type": "markdown", + "source": [ + "###Ragged Arrays\n", + "\n", + "If you need to store an array of arrays, where each member array can be of any length and stores the same primitive type (a.k.a. a ragged array), the ```numcodecs.VLenArray``` codec can be used, e.g.:" + ], + "metadata": { + "id": "PhJ4A1z2O4VF" + } + }, + { + "cell_type": "code", + "source": [ + "z = zarr.empty(4, dtype=object, object_codec=numcodecs.VLenArray(int))\n", + "z" + ], + "metadata": { + "id": "vUnQvnhoOzn4" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "z.filters" + ], + "metadata": { + "id": "gDyICbXcPLfb" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "z[0] = np.array([1, 3, 5])\n", + "z[1] = np.array([4])\n", + "z[2] = np.array([7, 9, 14])\n", + "z[:]" + ], + "metadata": { + "id": "3N6R7ZhsPNxL" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "As a convenience, ```dtype='array:T'``` can be used as a short-hand for ```dtype=object, object_codec=numcodecs.VLenArray('T')```, where ‘T’ can be any NumPy primitive dtype such as ‘i4’ or ‘f8’. E.g.:" + ], + "metadata": { + "id": "NpvHUL9sPT-g" + } + }, + { + "cell_type": "code", + "source": [ + "z = zarr.empty(4, dtype='array:i8')\n", + "z" + ], + "metadata": { + "id": "8H2jJDRAPRSL" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "z.filters" + ], + "metadata": { + "id": "OcHSVwGwPfdh" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "z[0] = np.array([1, 3, 5])\n", + "z[1] = np.array([4])\n", + "z[2] = np.array([7, 9, 14])\n", + "z[:]" + ], + "metadata": { + "id": "REN-2v9ZPlKK" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "##Chunk Optimisations" + ], + "metadata": { + "id": "rEtgRl4xeoOI" + } + }, + { + "cell_type": "markdown", + "source": [ + "###Chunk Size and Shape\n", + "\n", + "In general, chunks of at least 1 megabyte (1M) uncompressed size seem to provide better performance, at least when using the Blosc compression library.\n", + "\n", + "The optimal chunk shape will depend on how you want to access the data. E.g., for a 2-dimensional array, if you only ever take slices along the first dimension, then chunk across the second dimenson. If you know you want to chunk across an entire dimension you can use ```None``` or ```-1``` within the chunks argument, e.g.:" + ], + "metadata": { + "id": "JuuGDVbCPsYz" + } + }, + { + "cell_type": "code", + "source": [ + "z1 = zarr.zeros((10000, 10000), chunks=(100, None), dtype='i4')\n", + "z1.chunks" + ], + "metadata": { + "id": "tUORtisnPojh" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "Alternatively, if you only ever take slices along the second dimension, then chunk across the first dimension, e.g.:" + ], + "metadata": { + "id": "2Iwy4I7WRIO0" + } + }, + { + "cell_type": "code", + "source": [ + "z2 = zarr.zeros((10000, 10000), chunks=(None, 100), dtype='i4')\n", + "z2.chunks" + ], + "metadata": { + "id": "YJnljjafRDdi" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "If you require reasonable performance for both access patterns then you need to find a compromise, e.g.:" + ], + "metadata": { + "id": "UkT-P2qUROys" + } + }, + { + "cell_type": "code", + "source": [ + "z3 = zarr.zeros((10000, 10000), chunks=(1000, 1000), dtype='i4')\n", + "z3.chunks" + ], + "metadata": { + "id": "aoqVgUTqRNNf" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "If you are feeling lazy, you can let Zarr guess a chunk shape for your data by providing ```chunks=True```, although please note that the algorithm for guessing a chunk shape is based on simple heuristics and may be far from optimal. E.g.:" + ], + "metadata": { + "id": "7HmrTCRgRVaH" + } + }, + { + "cell_type": "code", + "source": [ + "z4 = zarr.zeros((10000, 10000), chunks=True, dtype='i4')\n", + "z4.chunks" + ], + "metadata": { + "id": "4lYaMhfORSTJ" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "If you know you are always going to be loading the entire array into memory, you can turn off chunks by providing ```chunks=False```, in which case there will be one single chunk for the array:" + ], + "metadata": { + "id": "GYouDAvMRhw-" + } + }, + { + "cell_type": "code", + "source": [ + "z5 = zarr.zeros((10000, 10000), chunks=False, dtype='i4')\n", + "z5.chunks" + ], + "metadata": { + "id": "ahiY5kezRfid" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "###Chunk Memory Layout\n", + "\n", + "The order of bytes **within each chunk** of an array can be changed via the ```order``` keyword argument, to use either C or Fortran layout. For multi-dimensional arrays, these two layouts may provide different compression ratios, depending on the correlation structure within the data. E.g.:" + ], + "metadata": { + "id": "HCKZ9GP1RtlF" + } + }, + { + "cell_type": "code", + "source": [ + "a = np.arange(100000000, dtype='i4').reshape(10000, 10000).T\n", + "c = zarr.array(a, chunks=(1000, 1000))\n", + "c.info" + ], + "metadata": { + "id": "IRGMXvmJRsNI" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "f = zarr.array(a, chunks=(1000, 1000), order='F')\n", + "f.info" + ], + "metadata": { + "id": "D9p8GjSvR-Y8" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "In the above example, Fortran order gives a better compression ratio. This is an artificial example but illustrates the general point that changing the order of bytes within chunks of an array may improve the compression ratio, depending on the structure of the data, the compression algorithm used, and which compression filters (e.g., byte-shuffle) have been applied." + ], + "metadata": { + "id": "UGJqatqMSN72" + } + }, + { + "cell_type": "markdown", + "source": [ + "###Empty Chunks\n", + "\n", + "As of version 2.11, it is possible to configure how Zarr handles the storage of chunks that are “empty” (i.e., every element in the chunk is equal to the array’s fill value). When creating an array with ```write_empty_chunks=False``` (the default), Zarr will check whether a chunk is empty before compression and storage. If a chunk is empty, then Zarr does not store it, and instead deletes the chunk from storage if the chunk had been previously stored.\n", + "\n", + "This optimization prevents storing redundant objects and can speed up reads, but the cost is added computation during array writes, since the contents of each chunk must be compared to the fill value, and these advantages are contingent on the content of the array. If you know that your data will form chunks that are almost always non-empty, then there is no advantage to the optimization described above. In this case, creating an array with ```write_empty_chunks=True``` will instruct Zarr to write every chunk without checking for emptiness.\n", + "\n", + "The following example illustrates the effect of the ```write_empty_chunks``` flag on the time required to write an array with different values.:" + ], + "metadata": { + "id": "VV_mOZ1VSQBN" + } + }, + { + "cell_type": "code", + "source": [ + "import zarr\n", + "import numpy as np\n", + "import time\n", + "from tempfile import TemporaryDirectory\n", + "def timed_write(write_empty_chunks):\n", + " \"\"\"\n", + " Measure the time required and number of objects created when writing\n", + " to a Zarr array with random ints or fill value.\n", + " \"\"\"\n", + " chunks = (8192,)\n", + " shape = (chunks[0] * 1024,)\n", + " data = np.random.randint(0, 255, shape)\n", + " dtype = 'uint8'\n", + " \n", + " with TemporaryDirectory() as store:\n", + " arr = zarr.open(store,\n", + " shape=shape,\n", + " chunks=chunks,\n", + " dtype=dtype,\n", + " write_empty_chunks=write_empty_chunks,\n", + " fill_value=0,\n", + " mode='w')\n", + " # Initialize all chunks\n", + " arr[:] = 100\n", + " result = []\n", + " for value in (data, arr.fill_value):\n", + " start = time.time()\n", + " arr[:] = value\n", + " elapsed = time.time() - start\n", + " result.append((elapsed, arr.nchunks_initialized))\n", + " \n", + " return result\n", + "\n", + "for write_empty_chunks in (True, False):\n", + " full, empty = timed_write(write_empty_chunks)\n", + " print(f'\\nwrite_empty_chunks={write_empty_chunks}:\\n\\tRandom Data: {full[0]:.4f}s, {full[1]} objects stored\\n\\t Empty Data: {empty[0]:.4f}s, {empty[1]} objects stored\\n')" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "RqUZDhlhSCUN", + "outputId": "0819c626-013c-43aa-9006-309cc435d96f" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\n", + "write_empty_chunks=True:\n", + "\tRandom Data: 0.3179s, 1024 objects stored\n", + "\t Empty Data: 0.2976s, 1024 objects stored\n", + "\n", + "\n", + "write_empty_chunks=False:\n", + "\tRandom Data: 0.3552s, 1024 objects stored\n", + "\t Empty Data: 0.0653s, 0 objects stored\n", + "\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "source": [ + "In this example, writing random data is slightly slower with ```write_empty_chunks=True```, but writing empty data is substantially faster and generates far fewer objects in storage." + ], + "metadata": { + "id": "cPzyeIPWU8sJ" + } + }, + { + "cell_type": "markdown", + "source": [ + "###Changing Chunk Shapes (Rechunking)\n", + "\n", + "Sometimes you are not free to choose the initial chunking of your input data, or you might have data saved with chunking which is not optimal for the analysis you have planned. In such cases it can be advantageous to re-chunk the data. For small datasets, or when the mismatch between input and output chunks is small such that only a few chunks of the input dataset need to be read to create each chunk in the output array, it is sufficient to simply copy the data to a new array with the desired chunking, e.g." + ], + "metadata": { + "id": "f22Cm6_tVEgL" + } + }, + { + "cell_type": "code", + "source": [ + "a = zarr.zeros((10000, 10000), chunks=(100,100), dtype='uint16', store='a.zarr')\n", + "b = zarr.array(a, chunks=(100, 200), store='b.zarr')" + ], + "metadata": { + "id": "B1B2WnPjXi83" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "If the chunk shapes mismatch, however, a simple copy can lead to non-optimal data access patterns and incur a substantial performance hit when using file based stores. One of the most pathological examples is switching from column-based chunking to row-based chunking e.g." + ], + "metadata": { + "id": "fHW5IzpoWj1g" + } + }, + { + "cell_type": "code", + "source": [ + "a = zarr.zeros((10000,10000), chunks=(10000, 1), dtype='uint16', store='a.zarr')\n", + "b = zarr.array(a, chunks=(1,10000), store='b.zarr')" + ], + "metadata": { + "id": "Eiby2yIWXUSd" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "which will require every chunk in the input data set to be repeatedly read when creating each output chunk. If the entire array will fit within memory, this is simply resolved by forcing the entire input array into memory as a numpy array before converting back to zarr with the desired chunking." + ], + "metadata": { + "id": "SqFfIcCtXyHm" + } + }, + { + "cell_type": "code", + "source": [ + "a = zarr.zeros((10000,10000), chunks=(10000, 1), dtype='uint16, store='a.zarr')\n", + "b = a[...]\n", + "c = zarr.array(b, chunks=(1,10000), store='c.zarr')" + ], + "metadata": { + "id": "M-VYVJntXvKt" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "For data sets which have mismatched chunks and which do not fit in memory, a more sophisticated approach to rechunking, such as offered by the [rechunker](https://github.com/pangeo-data/rechunker) package and discussed [here](https://medium.com/pangeo/rechunker-the-missing-link-for-chunked-array-analytics-5b2359e9dc11) may offer a substantial improvement in performance." + ], + "metadata": { + "id": "-ms0Z4rIX7Zb" + } + }, + { + "cell_type": "markdown", + "source": [ + "##Parallel Computing and Synchronization\n", + "\n", + "Zarr arrays have been designed for use as the source or sink for data in parallel computations. By data source we mean that multiple concurrent read operations may occur. By data sink we mean that multiple concurrent write operations may occur, with each writer updating a different region of the array. Zarr arrays have not been designed for situations where multiple readers and writers are concurrently operating on the same array.\n", + "\n", + "Both multi-threaded and multi-process parallelism are possible. The bottleneck for most storage and retrieval operations is compression/decompression, and the Python global interpreter lock (GIL) is released wherever possible during these operations, so Zarr will generally not block other Python threads from running.\n", + "\n", + "When using a Zarr array as a data sink, some synchronization (locking) may be required to avoid data loss, depending on how data are being updated. If each worker in a parallel computation is writing to a separate region of the array, and if region boundaries are perfectly aligned with chunk boundaries, then no synchronization is required. However, if region and chunk boundaries are not perfectly aligned, then synchronization is required to avoid two workers attempting to modify the same chunk at the same time, which could result in data loss.\n", + "\n", + "To give a simple example, consider a 1-dimensional array of length 60, ```z```, divided into three chunks of 20 elements each. If three workers are running and each attempts to write to a 20 element region (i.e., ```z[0:20]```, ```z[20:40]``` and ```z[40:60]```) then each worker will be writing to a separate chunk and no synchronization is required. However, if two workers are running and each attempts to write to a 30 element region (i.e., ```z[0:30]``` and ```z[30:60]```) then it is possible both workers will attempt to modify the middle chunk at the same time, and synchronization is required to prevent data loss.\n", + "\n", + "Zarr provides support for chunk-level synchronization. E.g., create an array with thread synchronization:" + ], + "metadata": { + "id": "PCRbVGiHY_Q6" + } + }, + { + "cell_type": "code", + "source": [ + "z = zarr.zeros((10000, 10000), chunks=(1000, 1000), dtype='i4',\n", + " synchronizer=zarr.ThreadSynchronizer())\n", + "z" + ], + "metadata": { + "id": "xryVyc_8ZoeE" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "This array is safe to read or write within a multi-threaded program.\n", + "\n", + "Zarr also provides support for process synchronization via file locking, provided that all processes have access to a shared file system, and provided that the underlying file system supports file locking (which is not the case for some networked file systems). E.g.:" + ], + "metadata": { + "id": "ko5oeTMcZxrG" + } + }, + { + "cell_type": "code", + "source": [ + "synchronizer = zarr.ProcessSynchronizer('data/example.sync')\n", + "z = zarr.open_array('data/example', mode='w', shape=(10000, 10000),\n", + " chunks=(1000, 1000), dtype='i4',\n", + " synchronizer=synchronizer)\n", + "z" + ], + "metadata": { + "id": "OcvYriWgZs99" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "This array is safe to read or write from multiple processes.\n", + "\n", + "When using multiple processes to parallelize reads or writes on arrays using the Blosc compression library, it may be necessary to set ```numcodecs.blosc.use_threads = False```, as otherwise Blosc may share incorrect global state amongst processes causing programs to hang. See also the section on [Configuring Blosc](#configuring-blosc) below.\n", + "\n", + "Please note that support for parallel computing is an area of ongoing research and development. If you are using Zarr for parallel computing, we welcome feedback, experience, discussion, ideas and advice, particularly about issues related to data integrity and performance." + ], + "metadata": { + "id": "LXZIsP64aCxU" + } + }, + { + "cell_type": "markdown", + "source": [ + "##Pickle Support\n", + "\n", + "Zarr arrays and groups can be pickled, as long as the underlying store object can be pickled. Instances of any of the storage classes provided in the ```zarr.storage``` module can be pickled, as can the built-in ```dict``` class which can also be used for storage.\n", + "\n", + "Note that if an array or group is backed by an in-memory store like a ```dict``` or [```zarr.storage.MemoryStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.MemoryStore), then when it is pickled all of the store data will be included in the pickled data. However, if an array or group is backed by a persistent store like a [```zarr.storage.DirectoryStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.DirectoryStore), [```zarr.storage.ZipStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.ZipStore) or [```zarr.storage.DBMStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.DBMStore) then the store data **are not** pickled. The only thing that is pickled is the necessary parameters to allow the store to re-open any underlying files or databases upon being unpickled.\n", + "\n", + "E.g., pickle/unpickle an in-memory array:" + ], + "metadata": { + "id": "w3jaHbGFaW0y" + } + }, + { + "cell_type": "code", + "source": [ + "import pickle\n", + "z1 = zarr.array(np.arange(100000))\n", + "s = pickle.dumps(z1)\n", + "# Relatively large because data have been pickled\n", + "len(s) > 5000" + ], + "metadata": { + "id": "9scF09UTZ5sj" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "z2 = pickle.loads(s)\n", + "z1 == z2" + ], + "metadata": { + "id": "38C6zF-7cYCz" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "np.all(z1[:] == z2[:])" + ], + "metadata": { + "id": "1J0DeU66cbrb" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "E.g., pickle/unpickle an array stored on disk:" + ], + "metadata": { + "id": "UlaInqMTcjGm" + } + }, + { + "cell_type": "code", + "source": [ + "z3 = zarr.open('data/walnuts.zarr', mode='w', shape=100000, dtype='i8')\n", + "z3[:] = np.arange(100000)\n", + "s = pickle.dumps(z3)\n", + "# Small because no data have been pickled\n", + "len(s) < 200 " + ], + "metadata": { + "id": "C4YKWMeTcd4q" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "z4 = pickle.loads(s)\n", + "z3 == z4" + ], + "metadata": { + "id": "tmlCtveEcrDD" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "np.all(z3[:] == z4[:])" + ], + "metadata": { + "id": "YX4tJKBKcu5p" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "##Datetimes and Timedeltas\n", + "\n", + "NumPy’s ```datetime64``` (‘M8’) and ```timedelta64``` (‘m8’) dtypes are supported for Zarr arrays, as long as the units are specified. E.g.:" + ], + "metadata": { + "id": "7jRxWiDZcx9D" + } + }, + { + "cell_type": "code", + "source": [ + "z = zarr.array(['2007-07-13', '2006-01-13', '2010-08-13'], dtype='M8[D]')\n", + "z" + ], + "metadata": { + "id": "bgtk4Hwfcw3x" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "z[:]" + ], + "metadata": { + "id": "45D0XtswdDuZ" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "z[0]" + ], + "metadata": { + "id": "Yn56VuJVdFag" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "z[0] = '1999-12-31'\n", + "z[:]" + ], + "metadata": { + "id": "HXhOTdoGdG4h" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "##Usage Tips" + ], + "metadata": { + "id": "JQfFoW6pdKlW" + } + }, + { + "cell_type": "markdown", + "source": [ + "###Copying Large Arrays\n", + "\n", + "Data can be copied between large arrays without needing much memory, e.g.:" + ], + "metadata": { + "id": "lvmF6UpIeaZW" + } + }, + { + "cell_type": "code", + "source": [ + "z1 = zarr.empty((10000, 10000), chunks=(1000, 1000), dtype='i4')\n", + "z1[:] = 42\n", + "z2 = zarr.empty_like(z1)\n", + "z2[:] = z1" + ], + "metadata": { + "id": "WjRcFECAdJYJ" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "Internally the example above works chunk-by-chunk, extracting only the data from ```z1``` required to fill each chunk in ```z2```. The source of the data (```z1```) could equally be an h5py Dataset." + ], + "metadata": { + "id": "udVHJz0pdbDP" + } + }, + { + "cell_type": "markdown", + "source": [ + "\n", + "###Configuring Blosc\n", + "\n", + "The Blosc compressor is able to use multiple threads internally to accelerate compression and decompression. By default, Blosc uses up to 8 internal threads. The number of Blosc threads can be changed to increase or decrease this number, e.g.:" + ], + "metadata": { + "id": "ImWdiPYGdknW" + } + }, + { + "cell_type": "code", + "source": [ + "from numcodecs import blosc\n", + "blosc.set_nthreads(2) " + ], + "metadata": { + "id": "R3ejeuK7dXs5" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "When a Zarr array is being used within a multi-threaded program, Zarr automatically switches to using Blosc in a single-threaded “contextual” mode. This is generally better as it allows multiple program threads to use Blosc simultaneously and prevents CPU thrashing from too many active threads. If you want to manually override this behaviour, set the value of the ```blosc.use_threads``` variable to ```True``` (Blosc always uses multiple internal threads) or ```False``` (Blosc always runs in single-threaded contextual mode). To re-enable automatic switching, set ```blosc.use_threads``` to ```None```.\n", + "\n", + "Please note that if Zarr is being used within a multi-process program, Blosc may not be safe to use in multi-threaded mode and may cause the program to hang. If using Blosc in a multi-process program then it is recommended to set ```blosc.use_threads = False```." + ], + "metadata": { + "id": "KSpXynP-dym6" + } + } + ], + "metadata": { + "colab": { + "collapsed_sections": [], + "name": "Tutorial.ipynb", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + }, + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file From 74071748b11a5fcdb1ccdb184dcfb229db414874 Mon Sep 17 00:00:00 2001 From: GbotemiB Date: Mon, 10 Oct 2022 22:05:49 +0100 Subject: [PATCH 02/11] Update GitHub Actions (#1134) Co-authored-by: Saransh Chopra Co-authored-by: Saransh Chopra Co-authored-by: jakirkham --- requirements_rtfd.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements_rtfd.txt b/requirements_rtfd.txt index 8452f7af77..5012509a2a 100644 --- a/requirements_rtfd.txt +++ b/requirements_rtfd.txt @@ -1,6 +1,7 @@ asciitree setuptools setuptools_scm +nbsphinx sphinx sphinx-issues sphinx-copybutton From 5ea095013ee1e6580ad636a4612d1b33464fafb6 Mon Sep 17 00:00:00 2001 From: GbotemiB Date: Mon, 10 Oct 2022 22:30:52 +0100 Subject: [PATCH 03/11] running notebook --- docs/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index 2a47bf2d04..758b707b5c 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -53,7 +53,7 @@ issues_github_path = 'zarr-developers/zarr-python' #Handling errors with rendering notebook -nbsphinx_allow_errors = True +#nbsphinx_allow_errors = True # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] From 919d17ce73a9ac2e547ea74d2c9dad192e1bdcf8 Mon Sep 17 00:00:00 2001 From: GbotemiB Date: Mon, 10 Oct 2022 23:07:25 +0100 Subject: [PATCH 04/11] running notebook --- docs/conf.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/conf.py b/docs/conf.py index 758b707b5c..140e4337d8 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -52,6 +52,8 @@ numpydoc_class_members_toctree = False issues_github_path = 'zarr-developers/zarr-python' +nbsphinx_execute = 'auto' + #Handling errors with rendering notebook #nbsphinx_allow_errors = True From af5463bbb8a5ab5f7e550cab80fa02f3000ee239 Mon Sep 17 00:00:00 2001 From: GbotemiB Date: Tue, 11 Oct 2022 02:16:00 +0100 Subject: [PATCH 05/11] fix heading of tutorial file --- docs/conf.py | 2 +- docs/index.rst | 1 - docs/tutorial_nb.ipynb | 1278 ++++++++++++++++++++-------------------- 3 files changed, 642 insertions(+), 639 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 140e4337d8..f5941540ae 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -52,7 +52,7 @@ numpydoc_class_members_toctree = False issues_github_path = 'zarr-developers/zarr-python' -nbsphinx_execute = 'auto' +nbsphinx_execute = 'never' #Handling errors with rendering notebook #nbsphinx_allow_errors = True diff --git a/docs/index.rst b/docs/index.rst index eb05da09bc..949bef4eb9 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -63,7 +63,6 @@ Contents .. toctree:: :maxdepth: 2 - tutorial tutorial_nb.ipynb api spec diff --git a/docs/tutorial_nb.ipynb b/docs/tutorial_nb.ipynb index aa6a29c0ca..85891a5255 100644 --- a/docs/tutorial_nb.ipynb +++ b/docs/tutorial_nb.ipynb @@ -1,13 +1,19 @@ { - "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Tutorial" + ] + }, { "cell_type": "markdown", "metadata": { "id": "M-JjZsSt2ow1" }, "source": [ - "##*Installation*\n", + "## Installation\n", "*We use PyPI to install Zarr.*" ] }, @@ -39,7 +45,7 @@ "id": "7UFcR9eN_p5R" }, "source": [ - "##Creating an Array\n", + "## Creating an Array\n", "\n", "Zarr has several functions for creating arrays. For example:" ] @@ -74,7 +80,7 @@ "id": "GSMxGKBNuG_L" }, "source": [ - "##Reading and Writing Data\n", + "## Reading and Writing Data\n", "\n", "Zarr arrays support a similar interface to NumPy arrays for reading and writing data. For example, the entire array can be filled with a scalar value:" ] @@ -182,7 +188,7 @@ "id": "hzDLo0qNvpFq" }, "source": [ - "##Persistent Arrays\n", + "## Persistent Arrays\n", "\n", "In the examples above, compressed data for each chunk of the array was stored in main memory. Zarr arrays can also be stored on a file system, enabling persistence of data between sessions. For example:" ] @@ -281,7 +287,7 @@ "id": "k_NL5O9LzLeS" }, "source": [ - "##Resizing and Appending\n", + "## Resizing and Appending\n", "\n", "A Zarr array can be resized, which means that any of its dimensions can be increased or decreased in length. For example:" ] @@ -363,7 +369,7 @@ "id": "E2pKJkTS0PF1" }, "source": [ - "##Compressors\n", + "## Compressors\n", "\n", "A number of different compressors can be used with Zarr. A separate package called [NumCodecs](https://numcodecs.readthedocs.io/en/stable/) is available which provides a common interface to various compressor libraries including Blosc, Zstandard, LZ4, Zlib, BZ2 and LZMA. Different compressors can be provided via the ```compressor``` keyword argument accepted by all array creation functions. For example:" ] @@ -581,7 +587,7 @@ "id": "uS9Zzpgr7upR" }, "source": [ - "##Groups\n", + "## Groups\n", "\n", "Zarr supports hierarchical organization of arrays via groups. As with arrays, groups can be stored in memory, on disk, or via other storage systems that support a similar interface.\n", "\n", @@ -794,7 +800,7 @@ "id": "d-kppYhY_JqK" }, "source": [ - "##Array and Group Diagnostics\n", + "## Array and Group Diagnostics\n", "Diagnostic information about arrays and groups is available via the info property. E.g.:" ] }, @@ -893,7 +899,7 @@ "id": "cYlwjH9pE5ox" }, "source": [ - "##User Attributes\n", + "## User Attributes\n", "\n", "Zarr arrays and groups support custom key/value attributes, which can be useful for storing application-specific metadata. For example:\n" ] @@ -984,7 +990,7 @@ "id": "t6wyJ5GCKw0G" }, "source": [ - "##Advanced Indexing\n", + "## Advanced Indexing\n", "As of version 2.2, Zarr arrays support several methods for advanced or “fancy” indexing, which enable a subset of data items to be extracted or updated in an array without loading the entire array into memory.\n", "\n", "Note that although this functionality is similar to some of the advanced indexing capabilities available on NumPy arrays and on h5py datasets, **the Zarr API for advanced indexing is different from both NumPy and h5py**, so please read this section carefully. For a complete description of the indexing API, see the documentation for the [```zarr.core.Array```](https://zarr.readthedocs.io/en/stable/api/core.html#zarr.core.Array) class." @@ -996,7 +1002,7 @@ "id": "BknVdBuRLo_Q" }, "source": [ - "###Indexing with Coordinate Arrays\n", + "## #Indexing with Coordinate Arrays\n", "Items from a Zarr array can be extracted by providing an integer array of coordinates. E.g.:" ] }, @@ -1168,7 +1174,7 @@ "id": "vd8qKAgYMmLk" }, "source": [ - "###Indexing with Mask Array\n", + "## #Indexing with Mask Array\n", "Items can also be extracted by providing a Boolean mask. E.g.:" ] }, @@ -1304,7 +1310,7 @@ "id": "AX7Ttfc_Nbfh" }, "source": [ - "###Orthogonal Indexing\n", + "## #Orthogonal Indexing\n", "Zarr arrays also support methods for orthogonal indexing, which allows selections to be made along each dimension of an array independently. For example, this allows selecting a subset of rows and/or columns from a 2-dimensional array. E.g.:" ] }, @@ -1451,7 +1457,7 @@ "id": "hcb6xE8FPqgX" }, "source": [ - "###Indexing Fields in Structured Arrays\n", + "## #Indexing Fields in Structured Arrays\n", "All selection methods support a ```fields``` parameter which allows retrieving or replacing data for a specific field in an array with a structured dtype. E.g.:" ] }, @@ -1511,7 +1517,7 @@ }, "source": [ "\n", - "##Storage Alternatives\n", + "## Storage Alternatives\n", "\n", "Zarr can use any object that implements the ```MutableMapping``` interface from the [```collections```](https://docs.python.org/3/library/collections.html#module-collections) module in the Python standard library as the store for a group or an array.\n", "\n", @@ -1779,7 +1785,7 @@ "id": "o9nezRjYbvo6" }, "source": [ - "###Distributed/Cloud Storage" + "## #Distributed/Cloud Storage" ] }, { @@ -1795,17 +1801,22 @@ }, { "cell_type": "code", - "source": [ - "pip install s3fs" - ], + "execution_count": null, "metadata": { "id": "KxMAnxN85U7-" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "pip install s3fs" + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "g72KoI8rLKur" + }, + "outputs": [], "source": [ "import s3fs\n", "import zarr\n", @@ -1814,57 +1825,55 @@ "root = zarr.group(store=store)\n", "z = root['foo/bar/baz']\n", "z" - ], - "metadata": { - "id": "g72KoI8rLKur" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", - "source": [ - "z.info" - ], + "execution_count": null, "metadata": { "id": "9Ky3c5rPLgcJ" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "z.info" + ] }, { "cell_type": "code", - "source": [ - "z[:]" - ], + "execution_count": null, "metadata": { "id": "rbYmDc7OLmQt" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "z[:]" + ] }, { "cell_type": "code", - "source": [ - "z[:].tobytes()" - ], + "execution_count": null, "metadata": { "id": "hCK8I6yALyWw" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "z[:].tobytes()" + ] }, { "cell_type": "markdown", - "source": [ - "Zarr now also has a builtin storage backend for Azure Blob Storage. The class is [```zarr.storage.ABSStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.ABSStore) (requires [azure-storage-blob](https://docs.microsoft.com/en-us/azure/storage/blobs/storage-quickstart-blobs-python?tabs=environment-variable-windows) to be installed):" - ], "metadata": { "id": "A6dbWR4WL4tX" - } + }, + "source": [ + "Zarr now also has a builtin storage backend for Azure Blob Storage. The class is [```zarr.storage.ABSStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.ABSStore) (requires [azure-storage-blob](https://docs.microsoft.com/en-us/azure/storage/blobs/storage-quickstart-blobs-python?tabs=environment-variable-windows) to be installed):" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "LhLw9qqSNaI1" + }, "source": [ "\n", "```\n", @@ -1875,26 +1884,28 @@ "z = root.zeros('foo/bar', shape=(1000, 1000), chunks=(100, 100), dtype='i4') \n", "z[:] = 42 \n", "```" - ], - "metadata": { - "id": "LhLw9qqSNaI1" - } + ] }, { "cell_type": "markdown", + "metadata": { + "id": "U-WqeNmbNjup" + }, "source": [ "When using an actual storage account, provide ```account_name``` and ```account_key``` arguments to [```zarr.storage.ABSStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.ABSStore), the above client is just testing against the emulator. Please also note that this is an experimental feature.\n", "\n", "Note that retrieving data from a remote service via the network can be significantly slower than retrieving data from a local file system, and will depend on network latency and bandwidth between the client and server systems. If you are experiencing poor performance, there are several things you can try. One option is to increase the array chunk size, which will reduce the number of chunks and thus reduce the number of network round-trips required to retrieve data for an array (and thus reduce the impact of network latency). Another option is to try to increase the compression ratio by changing compression options or trying a different compressor (which will reduce the impact of limited network bandwidth).\n", "\n", "As of version 2.2, Zarr also provides the [```zarr.storage.LRUStoreCache```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.LRUStoreCache) which can be used to implement a local in-memory cache layer over a remote store. E.g.:" - ], - "metadata": { - "id": "U-WqeNmbNjup" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "nXhc_UdsNPOn" + }, + "outputs": [], "source": [ "s3 = s3fs.S3FileSystem(anon=True, client_kwargs=dict(region_name='eu-west-2'))\n", "store = s3fs.S3Map(root='zarr-demo/store', s3=s3, check=False)\n", @@ -1905,184 +1916,182 @@ "from timeit import timeit\n", "# First data access is relatively slow, retrieved from store\n", "timeit('print(z[:].tobytes())', number=1, globals=globals())" - ], - "metadata": { - "id": "nXhc_UdsNPOn" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", - "source": [ - "# Second data access is faster, uses cache\n", - "timeit('print(z[:].tobytes())', number=1, globals=globals())" - ], + "execution_count": null, "metadata": { "id": "SDLYWK0sOUbW" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "# Second data access is faster, uses cache\n", + "timeit('print(z[:].tobytes())', number=1, globals=globals())" + ] }, { "cell_type": "markdown", - "source": [ - "If you are still experiencing poor performance with distributed/cloud storage, please raise an issue on the GitHub issue tracker with any profiling data you can provide, as there may be opportunities to optimise further either within Zarr or within the mapping interface to the storage." - ], "metadata": { "id": "boHOqg5LO4Y4" - } + }, + "source": [ + "If you are still experiencing poor performance with distributed/cloud storage, please raise an issue on the GitHub issue tracker with any profiling data you can provide, as there may be opportunities to optimise further either within Zarr or within the mapping interface to the storage." + ] }, { "cell_type": "markdown", + "metadata": { + "id": "vhkal_STPQrH" + }, "source": [ - "###IO with ```fsspec```\n", + "## #IO with ```fsspec```\n", "\n", "As of version 2.5, zarr supports passing URLs directly to [fsspec](https://filesystem-spec.readthedocs.io/en/latest/), and having it create the “mapping” instance automatically. This means, that for all of the backend storage implementations [supported by fsspec](https://filesystem-spec.readthedocs.io/en/latest/api.html#built-in-implementations), you can skip importing and configuring the storage explicitly. For example:\n", "\n" - ], - "metadata": { - "id": "vhkal_STPQrH" - } + ] }, { "cell_type": "code", - "source": [ - "g = zarr.open_group(\"s3://zarr-demo/store\", storage_options={'anon': True}) \n", - "g['foo/bar/baz'][:].tobytes() " - ], + "execution_count": null, "metadata": { "id": "tDKp3B4FO1it" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "g = zarr.open_group(\"s3://zarr-demo/store\", storage_options={'anon': True}) \n", + "g['foo/bar/baz'][:].tobytes() " + ] }, { "cell_type": "markdown", + "metadata": { + "id": "CmuZHo8PRl5D" + }, "source": [ "The provision of the protocol specifier “s3://” will select the correct backend. Notice the kwargs ```storage_options```, used to pass parameters to that backend.\n", "\n", "As of version 2.6, write mode and complex URLs are also supported, such as" - ], - "metadata": { - "id": "CmuZHo8PRl5D" - } + ] }, { "cell_type": "code", - "source": [ - "g = zarr.open_group(\"simplecache::s3://zarr-demo/store\",\n", - " storage_options={\"s3\": {'anon': True}}) " - ], + "execution_count": null, "metadata": { "id": "1r9raC-kRAU8" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "g = zarr.open_group(\"simplecache::s3://zarr-demo/store\",\n", + " storage_options={\"s3\": {'anon': True}}) " + ] }, { "cell_type": "code", - "source": [ - "# Downloads Target File \n", - "g['foo/bar/baz'][:].tobytes()" - ], + "execution_count": null, "metadata": { "id": "TuBM3BotR55K" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "# Downloads Target File \n", + "g['foo/bar/baz'][:].tobytes()" + ] }, { "cell_type": "code", - "source": [ - "# Uses Cached File \n", - "g['foo/bar/baz'][:].tobytes()" - ], + "execution_count": null, "metadata": { "id": "_ppDor9SSCsi" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "# Uses Cached File \n", + "g['foo/bar/baz'][:].tobytes()" + ] }, { "cell_type": "markdown", - "source": [ - "The second invocation here will be much faster. Note that the ```storage_options``` have become more complex here, to account for the two parts of the supplied URL." - ], "metadata": { "id": "AFG4kwY1SOv8" - } + }, + "source": [ + "The second invocation here will be much faster. Note that the ```storage_options``` have become more complex here, to account for the two parts of the supplied URL." + ] }, { "cell_type": "markdown", + "metadata": { + "id": "-HpqJgMyVoBR" + }, "source": [ - "###Consolidating Metadata\n", + "## #Consolidating Metadata\n", "\n", "Since there is a significant overhead for every connection to a cloud object store such as S3, the pattern described in the previous section may incur significant latency while scanning the metadata of the array hierarchy, even though each individual metadata object is small. For cases such as these, once the data are static and can be regarded as read-only, at least for the metadata/structure of the array hierarchy, the many metadata objects can be consolidated into a single one via [```zarr.convenience.consolidate_metadata()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.consolidate_metadata). Doing this can greatly increase the speed of reading the array metadata, e.g.:" - ], - "metadata": { - "id": "-HpqJgMyVoBR" - } + ] }, { "cell_type": "code", - "source": [ - "zarr.consolidate_metadata(store)" - ], + "execution_count": null, "metadata": { "id": "vdfg41lgSLyA" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "zarr.consolidate_metadata(store)" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "CT6QiZsJWYHO" + }, "source": [ "This creates a special key with a copy of all of the metadata from all of the metadata objects in the store.\n", "\n", "Later, to open a Zarr store with consolidated metadata, use [```zarr.convenience.open_consolidated()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.open_consolidated), e.g.:" - ], - "metadata": { - "id": "CT6QiZsJWYHO" - } + ] }, { "cell_type": "code", - "source": [ - "root = zarr.open_consolidated(store)" - ], + "execution_count": null, "metadata": { "id": "hGCR1WwnWIbf" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "root = zarr.open_consolidated(store)" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "3f4QOII6XCyB" + }, "source": [ "This uses the special key to read all of the metadata in a single call to the backend storage.\n", "\n", "Note that, the hierarchy could still be opened in the normal way and altered, causing the consolidated metadata to become out of sync with the real state of the array hierarchy. In this case, [```zarr.convenience.consolidate_metadata()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.consolidate_metadata) would need to be called again.\n", "\n", "To protect against consolidated metadata accidentally getting out of sync, the root group returned by [```zarr.convenience.open_consolidated()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.open_consolidated) is read-only for the metadata, meaning that no new groups or arrays can be created, and arrays cannot be resized. However, data values with arrays can still be updated." - ], - "metadata": { - "id": "3f4QOII6XCyB" - } + ] }, { "cell_type": "markdown", + "metadata": { + "id": "2kR2ue-MXVxx" + }, "source": [ - "##Copying/Migrating Data\n", + "## Copying/Migrating Data\n", "\n", "If you have some data in an HDF5 file and would like to copy some or all of it into a Zarr group, or vice-versa, the [```zarr.convenience.copy()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.copy) and [```zarr.convenience.copy_all()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.copy_all) functions can be used. Here’s an example copying a group named ‘foo’ from an HDF5 file to a Zarr group:" - ], - "metadata": { - "id": "2kR2ue-MXVxx" - } + ] }, { "cell_type": "markdown", + "metadata": { + "id": "CcnmamX_Z1qx" + }, "source": [ "Input\n", "```\n", @@ -2104,13 +2113,13 @@ " │ └── baz (100,) int64\n", " └── spam (100,) int64\n", "```" - ], - "metadata": { - "id": "CcnmamX_Z1qx" - } + ] }, { "cell_type": "markdown", + "metadata": { + "id": "T-IFelMyaObO" + }, "source": [ "Input\n", "```\n", @@ -2127,13 +2136,13 @@ "all done: 3 copied, 0 skipped, 800 bytes copied\n", "(3, 0, 800)\n", "```" - ], - "metadata": { - "id": "T-IFelMyaObO" - } + ] }, { "cell_type": "markdown", + "metadata": { + "id": "w9tc4osOa1h3" + }, "source": [ "Input\n", "```\n", @@ -2148,35 +2157,35 @@ " └── bar\n", " └── baz (100,) int64\n", "```" - ], - "metadata": { - "id": "w9tc4osOa1h3" - } + ] }, { "cell_type": "markdown", + "metadata": { + "id": "GBESHf1QbGlD" + }, "source": [ "Input\n", "```\n", "source.close()\n", "```\n", "\n" - ], - "metadata": { - "id": "GBESHf1QbGlD" - } + ] }, { "cell_type": "markdown", - "source": [ - "If rather than copying a single group or array you would like to copy all groups and arrays, use [```zarr.convenience.copy_all()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.copy_all), e.g.:" - ], "metadata": { "id": "ttbwjeTbbW58" - } + }, + "source": [ + "If rather than copying a single group or array you would like to copy all groups and arrays, use [```zarr.convenience.copy_all()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.copy_all), e.g.:" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "f9ZEn7ZBcH9N" + }, "source": [ "Input\n", "```\n", @@ -2194,13 +2203,13 @@ "all done: 4 copied, 0 skipped, 1,600 bytes copied\n", "(4, 0, 1600)\n", "```" - ], - "metadata": { - "id": "f9ZEn7ZBcH9N" - } + ] }, { "cell_type": "markdown", + "metadata": { + "id": "72RBjYK0cku8" + }, "source": [ "Input\n", "```\n", @@ -2215,22 +2224,22 @@ " │ └── baz (100,) int64\n", " └── spam (100,) int64\n", "```" - ], - "metadata": { - "id": "72RBjYK0cku8" - } + ] }, { "cell_type": "markdown", - "source": [ - "If you need to copy data between two Zarr groups, the [```zarr.convenience.copy()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.copy) and [```zarr.convenience.copy_all()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.copy_all) functions can be used and provide the most flexibility. However, if you want to copy data in the most efficient way possible, without changing any configuration options, the [```zarr.convenience.copy_store()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.copy_store) function can be used. This function copies data directly between the underlying stores, without any decompression or re-compression, and so should be faster. E.g.:" - ], "metadata": { "id": "J61LvFlNcz1O" - } + }, + "source": [ + "If you need to copy data between two Zarr groups, the [```zarr.convenience.copy()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.copy) and [```zarr.convenience.copy_all()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.copy_all) functions can be used and provide the most flexibility. However, if you want to copy data in the most efficient way possible, without changing any configuration options, the [```zarr.convenience.copy_store()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.copy_store) function can be used. This function copies data directly between the underlying stores, without any decompression or re-compression, and so should be faster. E.g.:" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "VRQ0Pa4jfIG8" + }, "source": [ "Input\n", "```\n", @@ -2251,13 +2260,13 @@ " │ └── baz (100,) int64\n", " └── spam (100,) int64\n", "```" - ], - "metadata": { - "id": "VRQ0Pa4jfIG8" - } + ] }, { "cell_type": "markdown", + "metadata": { + "id": "sAYruuJcfgMU" + }, "source": [ "Input\n", "```\n", @@ -2282,13 +2291,13 @@ "all done: 11 copied, 0 skipped, 1,138 bytes copied\n", "(11, 0, 1138)\n", "```\n" - ], - "metadata": { - "id": "sAYruuJcfgMU" - } + ] }, { "cell_type": "markdown", + "metadata": { + "id": "iigpR9lwf1o7" + }, "source": [ "Input\n", "```\n", @@ -2304,13 +2313,13 @@ " │ └── baz (100,) int64\n", " └── spam (100,) int64\n", "```\n" - ], - "metadata": { - "id": "iigpR9lwf1o7" - } + ] }, { "cell_type": "markdown", + "metadata": { + "id": "jHHUevzjf1w3" + }, "source": [ "Input\n", "```\n", @@ -2321,73 +2330,75 @@ "```\n", "array([ 0, 1, 2, ..., 97, 98, 99])\n", "```\n" - ], - "metadata": { - "id": "jHHUevzjf1w3" - } + ] }, { "cell_type": "markdown", + "metadata": { + "id": "giKK0ZqKgk2q" + }, "source": [ "Input\n", "```\n", "# zip stores need to be closed\n", "store2.close()\n", "```" - ], - "metadata": { - "id": "giKK0ZqKgk2q" - } + ] }, { "cell_type": "markdown", + "metadata": { + "id": "hZ344AKaHgmM" + }, "source": [ - "##String Arrays\n", + "## String Arrays\n", "\n", "There are several options for storing arrays of strings.\n", "\n", "If your strings are all ASCII strings, and you know the maximum length of the string in your array, then you can use an array with a fixed-length bytes dtype. E.g.:" - ], - "metadata": { - "id": "hZ344AKaHgmM" - } + ] }, { "cell_type": "code", - "source": [ - "z = zarr.zeros(10, dtype='S6')\n", - "z" - ], + "execution_count": null, "metadata": { "id": "-GVHB_BMHmfm" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "z = zarr.zeros(10, dtype='S6')\n", + "z" + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "yM-UTditHqFO" + }, + "outputs": [], "source": [ "z[0] = b'Hello'\n", "z[1] = b'world!'\n", "z[:]" - ], - "metadata": { - "id": "yM-UTditHqFO" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", - "source": [ - "A fixed-length unicode dtype is also available, e.g.:" - ], "metadata": { "id": "1QS15HvmIY6d" - } + }, + "source": [ + "A fixed-length unicode dtype is also available, e.g.:" + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "K7zehuUFISkl" + }, + "outputs": [], "source": [ "greetings = ['¡Hola mundo!', 'Hej Världen!', 'Servus Woid!', 'Hei maailma!',\n", " 'Xin chào thế giới', 'Njatjeta Botë!', 'Γεια σου κόσμε!',\n", @@ -2396,217 +2407,217 @@ "text_data = greetings * 10000\n", "z = zarr.array(text_data, dtype='U20')\n", "z" - ], - "metadata": { - "id": "K7zehuUFISkl" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", - "source": [ - "z[:]" - ], + "execution_count": null, "metadata": { "id": "KDQOeo5KIpjH" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "z[:]" + ] }, { "cell_type": "markdown", - "source": [ - "For variable-length strings, the ```object``` dtype can be used, but a codec must be provided to encode the data (see also [Object Arrays](#object-arrays) below). At the time of writing there are four codecs available that can encode variable length string objects: ```numcodecs.VLenUTF8```, ```numcodecs.JSON```, ```numcodecs.MsgPack```. and ```numcodecs.Pickle```. E.g. using ```VLenUTF8```:" - ], "metadata": { "id": "E0xKyXTAI5Tc" - } + }, + "source": [ + "For variable-length strings, the ```object``` dtype can be used, but a codec must be provided to encode the data (see also [Object Arrays](#object-arrays) below). At the time of writing there are four codecs available that can encode variable length string objects: ```numcodecs.VLenUTF8```, ```numcodecs.JSON```, ```numcodecs.MsgPack```. and ```numcodecs.Pickle```. E.g. using ```VLenUTF8```:" + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "BC8vMTeFIsrh" + }, + "outputs": [], "source": [ "import numcodecs\n", "z = zarr.array(text_data, dtype=object, object_codec=numcodecs.VLenUTF8())\n", "z" - ], - "metadata": { - "id": "BC8vMTeFIsrh" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", - "source": [ - "z.filters" - ], + "execution_count": null, "metadata": { "id": "XSxrxvkUJ21e" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "z.filters" + ] }, { "cell_type": "code", - "source": [ - "z[:]" - ], + "execution_count": null, "metadata": { "id": "H-8U-ezSJ6Aj" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "z[:]" + ] }, { "cell_type": "markdown", - "source": [ - "As a convenience, ```dtype=str``` (or ```dtype=unicode``` on Python 2.7) can be used, which is a short-hand for ```dtype=object, object_codec=numcodecs.VLenUTF8()```, e.g.:" - ], "metadata": { "id": "KcDs9ELKKBFF" - } + }, + "source": [ + "As a convenience, ```dtype=str``` (or ```dtype=unicode``` on Python 2.7) can be used, which is a short-hand for ```dtype=object, object_codec=numcodecs.VLenUTF8()```, e.g.:" + ] }, { "cell_type": "code", - "source": [ - "z = zarr.array(text_data, dtype=str)\n", - "z" - ], + "execution_count": null, "metadata": { "id": "kzD30Ju_J8eL" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "z = zarr.array(text_data, dtype=str)\n", + "z" + ] }, { "cell_type": "code", - "source": [ - "z.filters" - ], + "execution_count": null, "metadata": { "id": "hJR2-qRCK7m1" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "z.filters" + ] }, { "cell_type": "code", - "source": [ - "z[:]" - ], + "execution_count": null, "metadata": { "id": "nOX6EvwMK-vX" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "z[:]" + ] }, { "cell_type": "markdown", - "source": [ - "Variable-length byte strings are also supported via ```dtype=object```. Again an ```object_codec``` is required, which can be one of ```numcodecs.VLenBytes``` or ```numcodecs.Pickle```. For convenience, ```dtype=bytes``` (or ```dtype=str``` on Python 2.7) can be used as a short-hand for ```dtype=object, object_codec=numcodecs.VLenBytes()```, e.g.:" - ], "metadata": { "id": "i-FWDASWLD1q" - } + }, + "source": [ + "Variable-length byte strings are also supported via ```dtype=object```. Again an ```object_codec``` is required, which can be one of ```numcodecs.VLenBytes``` or ```numcodecs.Pickle```. For convenience, ```dtype=bytes``` (or ```dtype=str``` on Python 2.7) can be used as a short-hand for ```dtype=object, object_codec=numcodecs.VLenBytes()```, e.g.:" + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "9wWNRmTXLBIU" + }, + "outputs": [], "source": [ "bytes_data = [g.encode('utf-8') for g in greetings] * 10000\n", "z = zarr.array(bytes_data, dtype=bytes)\n", "z" - ], - "metadata": { - "id": "9wWNRmTXLBIU" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", - "source": [ - "z.filters" - ], + "execution_count": null, "metadata": { "id": "8me8bHY3L2RJ" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "z.filters" + ] }, { "cell_type": "code", - "source": [ - "z[:]" - ], + "execution_count": null, "metadata": { "id": "8L65bqgsL3Zf" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "z[:]" + ] }, { "cell_type": "markdown", - "source": [ - "If you know ahead of time all the possible string values that can occur, you could also use the ```numcodecs.Categorize``` codec to encode each unique string value as an integer. E.g.:" - ], "metadata": { "id": "0kvGrdrzL9PU" - } + }, + "source": [ + "If you know ahead of time all the possible string values that can occur, you could also use the ```numcodecs.Categorize``` codec to encode each unique string value as an integer. E.g.:" + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "85G6VRPhL6iG" + }, + "outputs": [], "source": [ "categorize = numcodecs.Categorize(greetings, dtype=object)\n", "z = zarr.array(text_data, dtype=object, object_codec=categorize)\n", "z" - ], - "metadata": { - "id": "85G6VRPhL6iG" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", - "source": [ - "z.filters" - ], + "execution_count": null, "metadata": { "id": "6acl41GvMJI_" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "z.filters" + ] }, { "cell_type": "code", - "source": [ - "z[:]" - ], + "execution_count": null, "metadata": { "id": "wvD-hB_0MRA_" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "z[:]" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "7IWmKR-TNwaJ" + }, "source": [ "\n", - "##Object Arrays\n", + "## Object Arrays\n", "\n", "Zarr supports arrays with an “object” dtype. This allows arrays to contain any type of object, such as variable length unicode strings, or variable length arrays of numbers, or other possibilities. When creating an object array, a codec must be provided via the ```object_codec``` argument. This codec handles encoding (serialization) of Python objects. The best codec to use will depend on what type of objects are present in the array.\n", "\n", "At the time of writing there are three codecs available that can serve as a general purpose object codec and support encoding of a mixture of object types: ```numcodecs.JSON```, ```numcodecs.MsgPack```. and ```numcodecs.Pickle```.\n", "\n", "For example, using the JSON codec:" - ], - "metadata": { - "id": "7IWmKR-TNwaJ" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "MLTurd8sMTJe" + }, + "outputs": [], "source": [ "z = zarr.empty(5, dtype=object, object_codec=numcodecs.JSON())\n", "z[0] = 42\n", @@ -2614,296 +2625,300 @@ "z[2] = ['bar', 'baz', 'qux']\n", "z[3] = {'a': 1, 'b': 2.2}\n", "z[:]" - ], - "metadata": { - "id": "MLTurd8sMTJe" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", - "source": [ - "Not all codecs support encoding of all object types. The numcodecs.Pickle codec is the most flexible, supporting encoding any type of Python object. However, if you are sharing data with anyone other than yourself, then Pickle is not recommended as it is a potential security risk. This is because malicious code can be embedded within pickled data. The JSON and MsgPack codecs do not have any security issues and support encoding of unicode strings, lists and dictionaries. MsgPack is usually faster for both encoding and decoding." - ], "metadata": { "id": "3FikhuoHO2CD" - } + }, + "source": [ + "Not all codecs support encoding of all object types. The numcodecs.Pickle codec is the most flexible, supporting encoding any type of Python object. However, if you are sharing data with anyone other than yourself, then Pickle is not recommended as it is a potential security risk. This is because malicious code can be embedded within pickled data. The JSON and MsgPack codecs do not have any security issues and support encoding of unicode strings, lists and dictionaries. MsgPack is usually faster for both encoding and decoding." + ] }, { "cell_type": "markdown", + "metadata": { + "id": "PhJ4A1z2O4VF" + }, "source": [ - "###Ragged Arrays\n", + "## #Ragged Arrays\n", "\n", "If you need to store an array of arrays, where each member array can be of any length and stores the same primitive type (a.k.a. a ragged array), the ```numcodecs.VLenArray``` codec can be used, e.g.:" - ], - "metadata": { - "id": "PhJ4A1z2O4VF" - } + ] }, { "cell_type": "code", - "source": [ - "z = zarr.empty(4, dtype=object, object_codec=numcodecs.VLenArray(int))\n", - "z" - ], + "execution_count": null, "metadata": { "id": "vUnQvnhoOzn4" }, - "execution_count": null, - "outputs": [] - }, + "outputs": [], + "source": [ + "z = zarr.empty(4, dtype=object, object_codec=numcodecs.VLenArray(int))\n", + "z" + ] + }, { "cell_type": "code", - "source": [ - "z.filters" - ], + "execution_count": null, "metadata": { "id": "gDyICbXcPLfb" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "z.filters" + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "3N6R7ZhsPNxL" + }, + "outputs": [], "source": [ "z[0] = np.array([1, 3, 5])\n", "z[1] = np.array([4])\n", "z[2] = np.array([7, 9, 14])\n", "z[:]" - ], - "metadata": { - "id": "3N6R7ZhsPNxL" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", - "source": [ - "As a convenience, ```dtype='array:T'``` can be used as a short-hand for ```dtype=object, object_codec=numcodecs.VLenArray('T')```, where ‘T’ can be any NumPy primitive dtype such as ‘i4’ or ‘f8’. E.g.:" - ], "metadata": { "id": "NpvHUL9sPT-g" - } + }, + "source": [ + "As a convenience, ```dtype='array:T'``` can be used as a short-hand for ```dtype=object, object_codec=numcodecs.VLenArray('T')```, where ‘T’ can be any NumPy primitive dtype such as ‘i4’ or ‘f8’. E.g.:" + ] }, { "cell_type": "code", - "source": [ - "z = zarr.empty(4, dtype='array:i8')\n", - "z" - ], + "execution_count": null, "metadata": { "id": "8H2jJDRAPRSL" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "z = zarr.empty(4, dtype='array:i8')\n", + "z" + ] }, { "cell_type": "code", - "source": [ - "z.filters" - ], + "execution_count": null, "metadata": { "id": "OcHSVwGwPfdh" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "z.filters" + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "REN-2v9ZPlKK" + }, + "outputs": [], "source": [ "z[0] = np.array([1, 3, 5])\n", "z[1] = np.array([4])\n", "z[2] = np.array([7, 9, 14])\n", "z[:]" - ], - "metadata": { - "id": "REN-2v9ZPlKK" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", - "source": [ - "##Chunk Optimisations" - ], "metadata": { "id": "rEtgRl4xeoOI" - } + }, + "source": [ + "## Chunk Optimisations" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "JuuGDVbCPsYz" + }, "source": [ - "###Chunk Size and Shape\n", + "## #Chunk Size and Shape\n", "\n", "In general, chunks of at least 1 megabyte (1M) uncompressed size seem to provide better performance, at least when using the Blosc compression library.\n", "\n", "The optimal chunk shape will depend on how you want to access the data. E.g., for a 2-dimensional array, if you only ever take slices along the first dimension, then chunk across the second dimenson. If you know you want to chunk across an entire dimension you can use ```None``` or ```-1``` within the chunks argument, e.g.:" - ], - "metadata": { - "id": "JuuGDVbCPsYz" - } + ] }, { "cell_type": "code", - "source": [ - "z1 = zarr.zeros((10000, 10000), chunks=(100, None), dtype='i4')\n", - "z1.chunks" - ], + "execution_count": null, "metadata": { "id": "tUORtisnPojh" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "z1 = zarr.zeros((10000, 10000), chunks=(100, None), dtype='i4')\n", + "z1.chunks" + ] }, { "cell_type": "markdown", - "source": [ - "Alternatively, if you only ever take slices along the second dimension, then chunk across the first dimension, e.g.:" - ], "metadata": { "id": "2Iwy4I7WRIO0" - } + }, + "source": [ + "Alternatively, if you only ever take slices along the second dimension, then chunk across the first dimension, e.g.:" + ] }, { "cell_type": "code", - "source": [ - "z2 = zarr.zeros((10000, 10000), chunks=(None, 100), dtype='i4')\n", - "z2.chunks" - ], + "execution_count": null, "metadata": { "id": "YJnljjafRDdi" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "z2 = zarr.zeros((10000, 10000), chunks=(None, 100), dtype='i4')\n", + "z2.chunks" + ] }, { "cell_type": "markdown", - "source": [ - "If you require reasonable performance for both access patterns then you need to find a compromise, e.g.:" - ], "metadata": { "id": "UkT-P2qUROys" - } + }, + "source": [ + "If you require reasonable performance for both access patterns then you need to find a compromise, e.g.:" + ] }, { "cell_type": "code", - "source": [ - "z3 = zarr.zeros((10000, 10000), chunks=(1000, 1000), dtype='i4')\n", - "z3.chunks" - ], + "execution_count": null, "metadata": { "id": "aoqVgUTqRNNf" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "z3 = zarr.zeros((10000, 10000), chunks=(1000, 1000), dtype='i4')\n", + "z3.chunks" + ] }, { "cell_type": "markdown", - "source": [ - "If you are feeling lazy, you can let Zarr guess a chunk shape for your data by providing ```chunks=True```, although please note that the algorithm for guessing a chunk shape is based on simple heuristics and may be far from optimal. E.g.:" - ], "metadata": { "id": "7HmrTCRgRVaH" - } + }, + "source": [ + "If you are feeling lazy, you can let Zarr guess a chunk shape for your data by providing ```chunks=True```, although please note that the algorithm for guessing a chunk shape is based on simple heuristics and may be far from optimal. E.g.:" + ] }, { "cell_type": "code", - "source": [ - "z4 = zarr.zeros((10000, 10000), chunks=True, dtype='i4')\n", - "z4.chunks" - ], + "execution_count": null, "metadata": { "id": "4lYaMhfORSTJ" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "z4 = zarr.zeros((10000, 10000), chunks=True, dtype='i4')\n", + "z4.chunks" + ] }, { "cell_type": "markdown", - "source": [ - "If you know you are always going to be loading the entire array into memory, you can turn off chunks by providing ```chunks=False```, in which case there will be one single chunk for the array:" - ], "metadata": { "id": "GYouDAvMRhw-" - } + }, + "source": [ + "If you know you are always going to be loading the entire array into memory, you can turn off chunks by providing ```chunks=False```, in which case there will be one single chunk for the array:" + ] }, { "cell_type": "code", - "source": [ - "z5 = zarr.zeros((10000, 10000), chunks=False, dtype='i4')\n", - "z5.chunks" - ], + "execution_count": null, "metadata": { "id": "ahiY5kezRfid" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "z5 = zarr.zeros((10000, 10000), chunks=False, dtype='i4')\n", + "z5.chunks" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "HCKZ9GP1RtlF" + }, "source": [ - "###Chunk Memory Layout\n", + "## #Chunk Memory Layout\n", "\n", "The order of bytes **within each chunk** of an array can be changed via the ```order``` keyword argument, to use either C or Fortran layout. For multi-dimensional arrays, these two layouts may provide different compression ratios, depending on the correlation structure within the data. E.g.:" - ], - "metadata": { - "id": "HCKZ9GP1RtlF" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "IRGMXvmJRsNI" + }, + "outputs": [], "source": [ "a = np.arange(100000000, dtype='i4').reshape(10000, 10000).T\n", "c = zarr.array(a, chunks=(1000, 1000))\n", "c.info" - ], - "metadata": { - "id": "IRGMXvmJRsNI" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", - "source": [ - "f = zarr.array(a, chunks=(1000, 1000), order='F')\n", - "f.info" - ], + "execution_count": null, "metadata": { "id": "D9p8GjSvR-Y8" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "f = zarr.array(a, chunks=(1000, 1000), order='F')\n", + "f.info" + ] }, { "cell_type": "markdown", - "source": [ - "In the above example, Fortran order gives a better compression ratio. This is an artificial example but illustrates the general point that changing the order of bytes within chunks of an array may improve the compression ratio, depending on the structure of the data, the compression algorithm used, and which compression filters (e.g., byte-shuffle) have been applied." - ], "metadata": { "id": "UGJqatqMSN72" - } + }, + "source": [ + "In the above example, Fortran order gives a better compression ratio. This is an artificial example but illustrates the general point that changing the order of bytes within chunks of an array may improve the compression ratio, depending on the structure of the data, the compression algorithm used, and which compression filters (e.g., byte-shuffle) have been applied." + ] }, { "cell_type": "markdown", + "metadata": { + "id": "VV_mOZ1VSQBN" + }, "source": [ - "###Empty Chunks\n", + "## #Empty Chunks\n", "\n", "As of version 2.11, it is possible to configure how Zarr handles the storage of chunks that are “empty” (i.e., every element in the chunk is equal to the array’s fill value). When creating an array with ```write_empty_chunks=False``` (the default), Zarr will check whether a chunk is empty before compression and storage. If a chunk is empty, then Zarr does not store it, and instead deletes the chunk from storage if the chunk had been previously stored.\n", "\n", "This optimization prevents storing redundant objects and can speed up reads, but the cost is added computation during array writes, since the contents of each chunk must be compared to the fill value, and these advantages are contingent on the content of the array. If you know that your data will form chunks that are almost always non-empty, then there is no advantage to the optimization described above. In this case, creating an array with ```write_empty_chunks=True``` will instruct Zarr to write every chunk without checking for emptiness.\n", "\n", "The following example illustrates the effect of the ```write_empty_chunks``` flag on the time required to write an array with different values.:" - ], - "metadata": { - "id": "VV_mOZ1VSQBN" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "RqUZDhlhSCUN", + "outputId": "0819c626-013c-43aa-9006-309cc435d96f" + }, + "outputs": [], "source": [ "import zarr\n", "import numpy as np\n", @@ -2941,122 +2956,99 @@ "for write_empty_chunks in (True, False):\n", " full, empty = timed_write(write_empty_chunks)\n", " print(f'\\nwrite_empty_chunks={write_empty_chunks}:\\n\\tRandom Data: {full[0]:.4f}s, {full[1]} objects stored\\n\\t Empty Data: {empty[0]:.4f}s, {empty[1]} objects stored\\n')" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "RqUZDhlhSCUN", - "outputId": "0819c626-013c-43aa-9006-309cc435d96f" - }, - "execution_count": null, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "\n", - "write_empty_chunks=True:\n", - "\tRandom Data: 0.3179s, 1024 objects stored\n", - "\t Empty Data: 0.2976s, 1024 objects stored\n", - "\n", - "\n", - "write_empty_chunks=False:\n", - "\tRandom Data: 0.3552s, 1024 objects stored\n", - "\t Empty Data: 0.0653s, 0 objects stored\n", - "\n" - ] - } ] }, { "cell_type": "markdown", - "source": [ - "In this example, writing random data is slightly slower with ```write_empty_chunks=True```, but writing empty data is substantially faster and generates far fewer objects in storage." - ], "metadata": { "id": "cPzyeIPWU8sJ" - } + }, + "source": [ + "In this example, writing random data is slightly slower with ```write_empty_chunks=True```, but writing empty data is substantially faster and generates far fewer objects in storage." + ] }, { "cell_type": "markdown", + "metadata": { + "id": "f22Cm6_tVEgL" + }, "source": [ - "###Changing Chunk Shapes (Rechunking)\n", + "## #Changing Chunk Shapes (Rechunking)\n", "\n", "Sometimes you are not free to choose the initial chunking of your input data, or you might have data saved with chunking which is not optimal for the analysis you have planned. In such cases it can be advantageous to re-chunk the data. For small datasets, or when the mismatch between input and output chunks is small such that only a few chunks of the input dataset need to be read to create each chunk in the output array, it is sufficient to simply copy the data to a new array with the desired chunking, e.g." - ], - "metadata": { - "id": "f22Cm6_tVEgL" - } + ] }, { "cell_type": "code", - "source": [ - "a = zarr.zeros((10000, 10000), chunks=(100,100), dtype='uint16', store='a.zarr')\n", - "b = zarr.array(a, chunks=(100, 200), store='b.zarr')" - ], + "execution_count": null, "metadata": { "id": "B1B2WnPjXi83" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "a = zarr.zeros((10000, 10000), chunks=(100,100), dtype='uint16', store='a.zarr')\n", + "b = zarr.array(a, chunks=(100, 200), store='b.zarr')" + ] }, { "cell_type": "markdown", - "source": [ - "If the chunk shapes mismatch, however, a simple copy can lead to non-optimal data access patterns and incur a substantial performance hit when using file based stores. One of the most pathological examples is switching from column-based chunking to row-based chunking e.g." - ], "metadata": { "id": "fHW5IzpoWj1g" - } + }, + "source": [ + "If the chunk shapes mismatch, however, a simple copy can lead to non-optimal data access patterns and incur a substantial performance hit when using file based stores. One of the most pathological examples is switching from column-based chunking to row-based chunking e.g." + ] }, { "cell_type": "code", - "source": [ - "a = zarr.zeros((10000,10000), chunks=(10000, 1), dtype='uint16', store='a.zarr')\n", - "b = zarr.array(a, chunks=(1,10000), store='b.zarr')" - ], + "execution_count": null, "metadata": { "id": "Eiby2yIWXUSd" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "a = zarr.zeros((10000,10000), chunks=(10000, 1), dtype='uint16', store='a.zarr')\n", + "b = zarr.array(a, chunks=(1,10000), store='b.zarr')" + ] }, { "cell_type": "markdown", - "source": [ - "which will require every chunk in the input data set to be repeatedly read when creating each output chunk. If the entire array will fit within memory, this is simply resolved by forcing the entire input array into memory as a numpy array before converting back to zarr with the desired chunking." - ], "metadata": { "id": "SqFfIcCtXyHm" - } + }, + "source": [ + "which will require every chunk in the input data set to be repeatedly read when creating each output chunk. If the entire array will fit within memory, this is simply resolved by forcing the entire input array into memory as a numpy array before converting back to zarr with the desired chunking." + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "M-VYVJntXvKt" + }, + "outputs": [], "source": [ "a = zarr.zeros((10000,10000), chunks=(10000, 1), dtype='uint16, store='a.zarr')\n", "b = a[...]\n", "c = zarr.array(b, chunks=(1,10000), store='c.zarr')" - ], - "metadata": { - "id": "M-VYVJntXvKt" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", - "source": [ - "For data sets which have mismatched chunks and which do not fit in memory, a more sophisticated approach to rechunking, such as offered by the [rechunker](https://github.com/pangeo-data/rechunker) package and discussed [here](https://medium.com/pangeo/rechunker-the-missing-link-for-chunked-array-analytics-5b2359e9dc11) may offer a substantial improvement in performance." - ], "metadata": { "id": "-ms0Z4rIX7Zb" - } + }, + "source": [ + "For data sets which have mismatched chunks and which do not fit in memory, a more sophisticated approach to rechunking, such as offered by the [rechunker](https://github.com/pangeo-data/rechunker) package and discussed [here](https://medium.com/pangeo/rechunker-the-missing-link-for-chunked-array-analytics-5b2359e9dc11) may offer a substantial improvement in performance." + ] }, { "cell_type": "markdown", + "metadata": { + "id": "PCRbVGiHY_Q6" + }, "source": [ - "##Parallel Computing and Synchronization\n", + "## Parallel Computing and Synchronization\n", "\n", "Zarr arrays have been designed for use as the source or sink for data in parallel computations. By data source we mean that multiple concurrent read operations may occur. By data sink we mean that multiple concurrent write operations may occur, with each writer updating a different region of the array. Zarr arrays have not been designed for situations where multiple readers and writers are concurrently operating on the same array.\n", "\n", @@ -3067,297 +3059,294 @@ "To give a simple example, consider a 1-dimensional array of length 60, ```z```, divided into three chunks of 20 elements each. If three workers are running and each attempts to write to a 20 element region (i.e., ```z[0:20]```, ```z[20:40]``` and ```z[40:60]```) then each worker will be writing to a separate chunk and no synchronization is required. However, if two workers are running and each attempts to write to a 30 element region (i.e., ```z[0:30]``` and ```z[30:60]```) then it is possible both workers will attempt to modify the middle chunk at the same time, and synchronization is required to prevent data loss.\n", "\n", "Zarr provides support for chunk-level synchronization. E.g., create an array with thread synchronization:" - ], - "metadata": { - "id": "PCRbVGiHY_Q6" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "xryVyc_8ZoeE" + }, + "outputs": [], "source": [ "z = zarr.zeros((10000, 10000), chunks=(1000, 1000), dtype='i4',\n", " synchronizer=zarr.ThreadSynchronizer())\n", "z" - ], - "metadata": { - "id": "xryVyc_8ZoeE" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", + "metadata": { + "id": "ko5oeTMcZxrG" + }, "source": [ "This array is safe to read or write within a multi-threaded program.\n", "\n", "Zarr also provides support for process synchronization via file locking, provided that all processes have access to a shared file system, and provided that the underlying file system supports file locking (which is not the case for some networked file systems). E.g.:" - ], - "metadata": { - "id": "ko5oeTMcZxrG" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "OcvYriWgZs99" + }, + "outputs": [], "source": [ "synchronizer = zarr.ProcessSynchronizer('data/example.sync')\n", "z = zarr.open_array('data/example', mode='w', shape=(10000, 10000),\n", " chunks=(1000, 1000), dtype='i4',\n", " synchronizer=synchronizer)\n", "z" - ], - "metadata": { - "id": "OcvYriWgZs99" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", + "metadata": { + "id": "LXZIsP64aCxU" + }, "source": [ "This array is safe to read or write from multiple processes.\n", "\n", "When using multiple processes to parallelize reads or writes on arrays using the Blosc compression library, it may be necessary to set ```numcodecs.blosc.use_threads = False```, as otherwise Blosc may share incorrect global state amongst processes causing programs to hang. See also the section on [Configuring Blosc](#configuring-blosc) below.\n", "\n", "Please note that support for parallel computing is an area of ongoing research and development. If you are using Zarr for parallel computing, we welcome feedback, experience, discussion, ideas and advice, particularly about issues related to data integrity and performance." - ], - "metadata": { - "id": "LXZIsP64aCxU" - } + ] }, { "cell_type": "markdown", + "metadata": { + "id": "w3jaHbGFaW0y" + }, "source": [ - "##Pickle Support\n", + "## Pickle Support\n", "\n", "Zarr arrays and groups can be pickled, as long as the underlying store object can be pickled. Instances of any of the storage classes provided in the ```zarr.storage``` module can be pickled, as can the built-in ```dict``` class which can also be used for storage.\n", "\n", "Note that if an array or group is backed by an in-memory store like a ```dict``` or [```zarr.storage.MemoryStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.MemoryStore), then when it is pickled all of the store data will be included in the pickled data. However, if an array or group is backed by a persistent store like a [```zarr.storage.DirectoryStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.DirectoryStore), [```zarr.storage.ZipStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.ZipStore) or [```zarr.storage.DBMStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.DBMStore) then the store data **are not** pickled. The only thing that is pickled is the necessary parameters to allow the store to re-open any underlying files or databases upon being unpickled.\n", "\n", "E.g., pickle/unpickle an in-memory array:" - ], - "metadata": { - "id": "w3jaHbGFaW0y" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "9scF09UTZ5sj" + }, + "outputs": [], "source": [ "import pickle\n", "z1 = zarr.array(np.arange(100000))\n", "s = pickle.dumps(z1)\n", "# Relatively large because data have been pickled\n", "len(s) > 5000" - ], - "metadata": { - "id": "9scF09UTZ5sj" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", - "source": [ - "z2 = pickle.loads(s)\n", - "z1 == z2" - ], + "execution_count": null, "metadata": { "id": "38C6zF-7cYCz" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "z2 = pickle.loads(s)\n", + "z1 == z2" + ] }, { "cell_type": "code", - "source": [ - "np.all(z1[:] == z2[:])" - ], + "execution_count": null, "metadata": { "id": "1J0DeU66cbrb" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "np.all(z1[:] == z2[:])" + ] }, { "cell_type": "markdown", - "source": [ - "E.g., pickle/unpickle an array stored on disk:" - ], "metadata": { "id": "UlaInqMTcjGm" - } + }, + "source": [ + "E.g., pickle/unpickle an array stored on disk:" + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "C4YKWMeTcd4q" + }, + "outputs": [], "source": [ "z3 = zarr.open('data/walnuts.zarr', mode='w', shape=100000, dtype='i8')\n", "z3[:] = np.arange(100000)\n", "s = pickle.dumps(z3)\n", "# Small because no data have been pickled\n", "len(s) < 200 " - ], - "metadata": { - "id": "C4YKWMeTcd4q" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", - "source": [ - "z4 = pickle.loads(s)\n", - "z3 == z4" - ], + "execution_count": null, "metadata": { "id": "tmlCtveEcrDD" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "z4 = pickle.loads(s)\n", + "z3 == z4" + ] }, { "cell_type": "code", - "source": [ - "np.all(z3[:] == z4[:])" - ], + "execution_count": null, "metadata": { "id": "YX4tJKBKcu5p" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "np.all(z3[:] == z4[:])" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "7jRxWiDZcx9D" + }, "source": [ - "##Datetimes and Timedeltas\n", + "## Datetimes and Timedeltas\n", "\n", "NumPy’s ```datetime64``` (‘M8’) and ```timedelta64``` (‘m8’) dtypes are supported for Zarr arrays, as long as the units are specified. E.g.:" - ], - "metadata": { - "id": "7jRxWiDZcx9D" - } + ] }, { "cell_type": "code", - "source": [ - "z = zarr.array(['2007-07-13', '2006-01-13', '2010-08-13'], dtype='M8[D]')\n", - "z" - ], + "execution_count": null, "metadata": { "id": "bgtk4Hwfcw3x" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "z = zarr.array(['2007-07-13', '2006-01-13', '2010-08-13'], dtype='M8[D]')\n", + "z" + ] }, { "cell_type": "code", - "source": [ - "z[:]" - ], + "execution_count": null, "metadata": { "id": "45D0XtswdDuZ" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "z[:]" + ] }, { "cell_type": "code", - "source": [ - "z[0]" - ], + "execution_count": null, "metadata": { "id": "Yn56VuJVdFag" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "z[0]" + ] }, { "cell_type": "code", - "source": [ - "z[0] = '1999-12-31'\n", - "z[:]" - ], + "execution_count": null, "metadata": { "id": "HXhOTdoGdG4h" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "z[0] = '1999-12-31'\n", + "z[:]" + ] }, { "cell_type": "markdown", - "source": [ - "##Usage Tips" - ], "metadata": { "id": "JQfFoW6pdKlW" - } + }, + "source": [ + "## Usage Tips" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "lvmF6UpIeaZW" + }, "source": [ - "###Copying Large Arrays\n", + "## #Copying Large Arrays\n", "\n", "Data can be copied between large arrays without needing much memory, e.g.:" - ], - "metadata": { - "id": "lvmF6UpIeaZW" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "WjRcFECAdJYJ" + }, + "outputs": [], "source": [ "z1 = zarr.empty((10000, 10000), chunks=(1000, 1000), dtype='i4')\n", "z1[:] = 42\n", "z2 = zarr.empty_like(z1)\n", "z2[:] = z1" - ], - "metadata": { - "id": "WjRcFECAdJYJ" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", - "source": [ - "Internally the example above works chunk-by-chunk, extracting only the data from ```z1``` required to fill each chunk in ```z2```. The source of the data (```z1```) could equally be an h5py Dataset." - ], "metadata": { "id": "udVHJz0pdbDP" - } + }, + "source": [ + "Internally the example above works chunk-by-chunk, extracting only the data from ```z1``` required to fill each chunk in ```z2```. The source of the data (```z1```) could equally be an h5py Dataset." + ] }, { "cell_type": "markdown", + "metadata": { + "id": "ImWdiPYGdknW" + }, "source": [ "\n", - "###Configuring Blosc\n", + "## #Configuring Blosc\n", "\n", "The Blosc compressor is able to use multiple threads internally to accelerate compression and decompression. By default, Blosc uses up to 8 internal threads. The number of Blosc threads can be changed to increase or decrease this number, e.g.:" - ], - "metadata": { - "id": "ImWdiPYGdknW" - } + ] }, { "cell_type": "code", - "source": [ - "from numcodecs import blosc\n", - "blosc.set_nthreads(2) " - ], + "execution_count": null, "metadata": { "id": "R3ejeuK7dXs5" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "from numcodecs import blosc\n", + "blosc.set_nthreads(2) " + ] }, { "cell_type": "markdown", + "metadata": { + "id": "KSpXynP-dym6" + }, "source": [ "When a Zarr array is being used within a multi-threaded program, Zarr automatically switches to using Blosc in a single-threaded “contextual” mode. This is generally better as it allows multiple program threads to use Blosc simultaneously and prevents CPU thrashing from too many active threads. If you want to manually override this behaviour, set the value of the ```blosc.use_threads``` variable to ```True``` (Blosc always uses multiple internal threads) or ```False``` (Blosc always runs in single-threaded contextual mode). To re-enable automatic switching, set ```blosc.use_threads``` to ```None```.\n", "\n", "Please note that if Zarr is being used within a multi-process program, Blosc may not be safe to use in multi-threaded mode and may cause the program to hang. If using Blosc in a multi-process program then it is recommended to set ```blosc.use_threads = False```." - ], - "metadata": { - "id": "KSpXynP-dym6" - } + ] } ], "metadata": { @@ -3367,13 +3356,28 @@ "provenance": [] }, "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3.10.6 64-bit", + "language": "python", "name": "python3" }, "language_info": { - "name": "python" + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + }, + "vscode": { + "interpreter": { + "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6" + } } }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} From 0c464a7107c5773149f605ed4c5d37b66be21f75 Mon Sep 17 00:00:00 2001 From: GbotemiB Date: Tue, 11 Oct 2022 02:48:18 +0100 Subject: [PATCH 06/11] corrections to header --- docs/tutorial_nb.ipynb | 44 +++++++++++++++++++----------------------- 1 file changed, 20 insertions(+), 24 deletions(-) diff --git a/docs/tutorial_nb.ipynb b/docs/tutorial_nb.ipynb index 85891a5255..ef2ffd688c 100644 --- a/docs/tutorial_nb.ipynb +++ b/docs/tutorial_nb.ipynb @@ -7,6 +7,13 @@ "### Tutorial" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Zarr provides classes and functions for working with N-dimensional arrays that behave like NumPy arrays but whose data is divided into chunks and each chunk is compressed. If you are already familiar with HDF5 then Zarr arrays provide similar functionality, but with some additional flexibility." + ] + }, { "cell_type": "markdown", "metadata": { @@ -28,17 +35,6 @@ "pip install zarr" ] }, - { - "cell_type": "markdown", - "metadata": { - "id": "mQw5YpxcsRTV" - }, - "source": [ - "#Tutorial\n", - "\n", - "Zarr provides classes and functions for working with N-dimensional arrays that behave like NumPy arrays but whose data is divided into chunks and each chunk is compressed. If you are already familiar with HDF5 then Zarr arrays provide similar functionality, but with some additional flexibility." - ] - }, { "cell_type": "markdown", "metadata": { @@ -1002,7 +998,7 @@ "id": "BknVdBuRLo_Q" }, "source": [ - "## #Indexing with Coordinate Arrays\n", + "## Indexing with Coordinate Arrays\n", "Items from a Zarr array can be extracted by providing an integer array of coordinates. E.g.:" ] }, @@ -1174,7 +1170,7 @@ "id": "vd8qKAgYMmLk" }, "source": [ - "## #Indexing with Mask Array\n", + "## Indexing with Mask Array\n", "Items can also be extracted by providing a Boolean mask. E.g.:" ] }, @@ -1310,7 +1306,7 @@ "id": "AX7Ttfc_Nbfh" }, "source": [ - "## #Orthogonal Indexing\n", + "## Orthogonal Indexing\n", "Zarr arrays also support methods for orthogonal indexing, which allows selections to be made along each dimension of an array independently. For example, this allows selecting a subset of rows and/or columns from a 2-dimensional array. E.g.:" ] }, @@ -1457,7 +1453,7 @@ "id": "hcb6xE8FPqgX" }, "source": [ - "## #Indexing Fields in Structured Arrays\n", + "## Indexing Fields in Structured Arrays\n", "All selection methods support a ```fields``` parameter which allows retrieving or replacing data for a specific field in an array with a structured dtype. E.g.:" ] }, @@ -1785,7 +1781,7 @@ "id": "o9nezRjYbvo6" }, "source": [ - "## #Distributed/Cloud Storage" + "## Distributed/Cloud Storage" ] }, { @@ -1945,7 +1941,7 @@ "id": "vhkal_STPQrH" }, "source": [ - "## #IO with ```fsspec```\n", + "## IO with ```fsspec```\n", "\n", "As of version 2.5, zarr supports passing URLs directly to [fsspec](https://filesystem-spec.readthedocs.io/en/latest/), and having it create the “mapping” instance automatically. This means, that for all of the backend storage implementations [supported by fsspec](https://filesystem-spec.readthedocs.io/en/latest/api.html#built-in-implementations), you can skip importing and configuring the storage explicitly. For example:\n", "\n" @@ -2025,7 +2021,7 @@ "id": "-HpqJgMyVoBR" }, "source": [ - "## #Consolidating Metadata\n", + "## Consolidating Metadata\n", "\n", "Since there is a significant overhead for every connection to a cloud object store such as S3, the pattern described in the previous section may incur significant latency while scanning the metadata of the array hierarchy, even though each individual metadata object is small. For cases such as these, once the data are static and can be regarded as read-only, at least for the metadata/structure of the array hierarchy, the many metadata objects can be consolidated into a single one via [```zarr.convenience.consolidate_metadata()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.consolidate_metadata). Doing this can greatly increase the speed of reading the array metadata, e.g.:" ] @@ -2642,7 +2638,7 @@ "id": "PhJ4A1z2O4VF" }, "source": [ - "## #Ragged Arrays\n", + "## Ragged Arrays\n", "\n", "If you need to store an array of arrays, where each member array can be of any length and stores the same primitive type (a.k.a. a ragged array), the ```numcodecs.VLenArray``` codec can be used, e.g.:" ] @@ -2745,7 +2741,7 @@ "id": "JuuGDVbCPsYz" }, "source": [ - "## #Chunk Size and Shape\n", + "## Chunk Size and Shape\n", "\n", "In general, chunks of at least 1 megabyte (1M) uncompressed size seem to provide better performance, at least when using the Blosc compression library.\n", "\n", @@ -2854,7 +2850,7 @@ "id": "HCKZ9GP1RtlF" }, "source": [ - "## #Chunk Memory Layout\n", + "## Chunk Memory Layout\n", "\n", "The order of bytes **within each chunk** of an array can be changed via the ```order``` keyword argument, to use either C or Fortran layout. For multi-dimensional arrays, these two layouts may provide different compression ratios, depending on the correlation structure within the data. E.g.:" ] @@ -2899,7 +2895,7 @@ "id": "VV_mOZ1VSQBN" }, "source": [ - "## #Empty Chunks\n", + "## Empty Chunks\n", "\n", "As of version 2.11, it is possible to configure how Zarr handles the storage of chunks that are “empty” (i.e., every element in the chunk is equal to the array’s fill value). When creating an array with ```write_empty_chunks=False``` (the default), Zarr will check whether a chunk is empty before compression and storage. If a chunk is empty, then Zarr does not store it, and instead deletes the chunk from storage if the chunk had been previously stored.\n", "\n", @@ -2973,7 +2969,7 @@ "id": "f22Cm6_tVEgL" }, "source": [ - "## #Changing Chunk Shapes (Rechunking)\n", + "## Changing Chunk Shapes (Rechunking)\n", "\n", "Sometimes you are not free to choose the initial chunking of your input data, or you might have data saved with chunking which is not optimal for the analysis you have planned. In such cases it can be advantageous to re-chunk the data. For small datasets, or when the mismatch between input and output chunks is small such that only a few chunks of the input dataset need to be read to create each chunk in the output array, it is sufficient to simply copy the data to a new array with the desired chunking, e.g." ] @@ -3285,7 +3281,7 @@ "id": "lvmF6UpIeaZW" }, "source": [ - "## #Copying Large Arrays\n", + "## Copying Large Arrays\n", "\n", "Data can be copied between large arrays without needing much memory, e.g.:" ] From b8bcc68492c62f3c13b0f07f5f2e70aece05c15d Mon Sep 17 00:00:00 2001 From: GbotemiB Date: Tue, 11 Oct 2022 02:49:03 +0100 Subject: [PATCH 07/11] added numpydoc to extensions --- docs/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index f5941540ae..bee64265b6 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -43,7 +43,7 @@ 'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.intersphinx', - #'numpydoc', + 'numpydoc', 'sphinx_issues', "sphinx_copybutton", ] From 0fd1ad032e10e66dd89aee2bf5f9c5e68f0571c5 Mon Sep 17 00:00:00 2001 From: GbotemiB Date: Tue, 11 Oct 2022 12:12:37 +0100 Subject: [PATCH 08/11] markup fix --- docs/tutorial_nb.ipynb | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/tutorial_nb.ipynb b/docs/tutorial_nb.ipynb index ef2ffd688c..b296b25908 100644 --- a/docs/tutorial_nb.ipynb +++ b/docs/tutorial_nb.ipynb @@ -67,7 +67,9 @@ "source": [ "The code above creates a 2-dimensional array of 32-bit integers with 10000 rows and 10000 columns, divided into chunks where each chunk has 1000 rows and 1000 columns (and so there will be 100 chunks in total).\n", "\n", - "For a complete list of array creation routines see the [```zarr.creation```](https://zarr.readthedocs.io/en/stable/api/creation.html#module-zarr.creation) module documentation." + "For a complete list of array creation routines see the **[zarr.creation](https://zarr.readthedocs.io/en/stable/api/creation.html#module-zarr.creation)** module documentation.\n", + "\n", + "\n" ] }, { From 5559a010fe619a82f6b57ff72825d4633f99b546 Mon Sep 17 00:00:00 2001 From: GbotemiB Date: Tue, 11 Oct 2022 12:17:28 +0100 Subject: [PATCH 09/11] fix markdown rendering --- docs/tutorial_nb.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/tutorial_nb.ipynb b/docs/tutorial_nb.ipynb index b296b25908..2f924c0cde 100644 --- a/docs/tutorial_nb.ipynb +++ b/docs/tutorial_nb.ipynb @@ -67,7 +67,7 @@ "source": [ "The code above creates a 2-dimensional array of 32-bit integers with 10000 rows and 10000 columns, divided into chunks where each chunk has 1000 rows and 1000 columns (and so there will be 100 chunks in total).\n", "\n", - "For a complete list of array creation routines see the **[zarr.creation](https://zarr.readthedocs.io/en/stable/api/creation.html#module-zarr.creation)** module documentation.\n", + "For a complete list of array creation routines see the [zarr.creation](https://zarr.readthedocs.io/en/stable/api/creation.html#module-zarr.creation) module documentation.\n", "\n", "\n" ] From 5113a99f27d91185e8a2830492646454fa9bc909 Mon Sep 17 00:00:00 2001 From: GbotemiB Date: Tue, 11 Oct 2022 12:38:51 +0100 Subject: [PATCH 10/11] fix markdown rendering --- docs/tutorial_nb.ipynb | 52 +++++++++++++++++++++--------------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/docs/tutorial_nb.ipynb b/docs/tutorial_nb.ipynb index 2f924c0cde..bded12b794 100644 --- a/docs/tutorial_nb.ipynb +++ b/docs/tutorial_nb.ipynb @@ -209,7 +209,7 @@ "id": "4vg9dzXTwbmo" }, "source": [ - "The array above will store its configuration metadata and all compressed chunk data in a directory called ‘data/example.zarr’ relative to the current working directory. The [```zarr.convenience.open()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.open) function provides a convenient way to create a new persistent array or continue working with an existing array. Note that although the function is called “open”, there is no need to close an array: data are automatically flushed to disk, and files are automatically closed whenever an array is modified.\n", + "The array above will store its configuration metadata and all compressed chunk data in a directory called ‘data/example.zarr’ relative to the current working directory. The [zarr.convenience.open()](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.open) function provides a convenient way to create a new persistent array or continue working with an existing array. Note that although the function is called “open”, there is no need to close an array: data are automatically flushed to disk, and files are automatically closed whenever an array is modified.\n", "\n", "Persistent arrays support the same interface for reading and writing data, e.g.:" ] @@ -254,7 +254,7 @@ "id": "ZpRSeeZvxnUL" }, "source": [ - "If you are just looking for a fast and convenient way to save NumPy arrays to disk then load back into memory later, the functions [```zarr.convenience.save()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.save) and [```zarr.convenience.load()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.load) may be useful. E.g.:" + "If you are just looking for a fast and convenient way to save NumPy arrays to disk then load back into memory later, the functions [zarr.convenience.save()](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.save) and [zarr.convenience.load()](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.load) may be useful. E.g.:" ] }, { @@ -724,7 +724,7 @@ "id": "Mlz4ccmkXP4A" }, "source": [ - "The [```zarr.hierarchy.Group.tree()```](https://zarr.readthedocs.io/en/stable/api/hierarchy.html#zarr.hierarchy.Group.tree) method can be used to print a tree representation of the hierarchy, e.g.:\n", + "The [zarr.hierarchy.Group.tree()](https://zarr.readthedocs.io/en/stable/api/hierarchy.html#zarr.hierarchy.Group.tree) method can be used to print a tree representation of the hierarchy, e.g.:\n", "\n" ] }, @@ -754,7 +754,7 @@ "id": "8HKFOEb8-IV5" }, "source": [ - "The [```zarr.convenience.open()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.open) function provides a convenient way to create or re-open a group stored in a directory on the file-system, with sub-groups stored in sub-directories, e.g.:" + "The [zarr.convenience.open()](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.open) function provides a convenient way to create or re-open a group stored in a directory on the file-system, with sub-groups stored in sub-directories, e.g.:" ] }, { @@ -789,7 +789,7 @@ "source": [ "Groups can be used as context managers (in a ```with``` statement). If the underlying store has a ```close``` method, it will be called on exit.\n", "\n", - "For more information on groups see the [```zarr.hierarchy```](https://zarr.readthedocs.io/en/stable/api/hierarchy.html#module-zarr.hierarchy) and [```zarr.convenience```](https://zarr.readthedocs.io/en/stable/api/convenience.html#module-zarr.convenience) API docs" + "For more information on groups see the [zarr.hierarchy](https://zarr.readthedocs.io/en/stable/api/hierarchy.html#module-zarr.hierarchy) and [zarr.convenience](https://zarr.readthedocs.io/en/stable/api/convenience.html#module-zarr.convenience) API docs" ] }, { @@ -858,7 +858,7 @@ "id": "3WzlmWbf_xB7" }, "source": [ - "Groups also have the [```zarr.hierarchy.Group.tree()```](https://zarr.readthedocs.io/en/stable/api/hierarchy.html#zarr.hierarchy.Group.tree) method, e.g.:\n", + "Groups also have the [zarr.hierarchy.Group.tree()](https://zarr.readthedocs.io/en/stable/api/hierarchy.html#zarr.hierarchy.Group.tree) method, e.g.:\n", "\n" ] }, @@ -991,7 +991,7 @@ "## Advanced Indexing\n", "As of version 2.2, Zarr arrays support several methods for advanced or “fancy” indexing, which enable a subset of data items to be extracted or updated in an array without loading the entire array into memory.\n", "\n", - "Note that although this functionality is similar to some of the advanced indexing capabilities available on NumPy arrays and on h5py datasets, **the Zarr API for advanced indexing is different from both NumPy and h5py**, so please read this section carefully. For a complete description of the indexing API, see the documentation for the [```zarr.core.Array```](https://zarr.readthedocs.io/en/stable/api/core.html#zarr.core.Array) class." + "Note that although this functionality is similar to some of the advanced indexing capabilities available on NumPy arrays and on h5py datasets, **the Zarr API for advanced indexing is different from both NumPy and h5py**, so please read this section carefully. For a complete description of the indexing API, see the documentation for the [zarr.core.Array](https://zarr.readthedocs.io/en/stable/api/core.html#zarr.core.Array) class." ] }, { @@ -1517,9 +1517,9 @@ "\n", "## Storage Alternatives\n", "\n", - "Zarr can use any object that implements the ```MutableMapping``` interface from the [```collections```](https://docs.python.org/3/library/collections.html#module-collections) module in the Python standard library as the store for a group or an array.\n", + "Zarr can use any object that implements the ```MutableMapping``` interface from the [collections](https://docs.python.org/3/library/collections.html#module-collections) module in the Python standard library as the store for a group or an array.\n", "\n", - "Some pre-defined storage classes are provided in the [```zarr.storage```](https://zarr.readthedocs.io/en/stable/api/storage.html#module-zarr.storage) module. For example, the [```zarr.storage.DirectoryStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.DirectoryStore) class provides a ```MutableMapping``` interface to a directory on the local file system. This is used under the hood by the [```zarr.convenience.open()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.open) function. In other words, the following code:\n", + "Some pre-defined storage classes are provided in the [zarr.storage](https://zarr.readthedocs.io/en/stable/api/storage.html#module-zarr.storage) module. For example, the [zarr.storage.DirectoryStore](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.DirectoryStore) class provides a ```MutableMapping``` interface to a directory on the local file system. This is used under the hood by the [zarr.convenience.open()](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.open) function. In other words, the following code:\n", "\n" ] }, @@ -1602,7 +1602,7 @@ "id": "1GYws83JShUU" }, "source": [ - "Any other compatible storage class could be used in place of [```zarr.storage.DirectoryStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.DirectoryStore) in the code examples above. For example, here is an array stored directly into a Zip file, via the [```zarr.storage.ZipStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.ZipStore) class:" + "Any other compatible storage class could be used in place of [zarr.storage.DirectoryStore](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.DirectoryStore) in the code examples above. For example, here is an array stored directly into a Zip file, via the [zarr.storage.ZipStore](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.ZipStore) class:" ] }, { @@ -1662,7 +1662,7 @@ "source": [ "Note that there are some limitations on how Zip files can be used, because items within a Zip file cannot be updated in place. This means that data in the array should only be written once and write operations should be aligned with chunk boundaries. Note also that the ```close()``` method must be called after writing any data to the store, otherwise essential records will not be written to the underlying zip file.\n", "\n", - "Another storage alternative is the [```zarr.storage.DBMStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.DBMStore) class, added in Zarr version 2.2. This class allows any DBM-style database to be used for storing an array or group. Here is an example using a Berkeley DB B-tree database for storage (requires [bsddb3](https://www.jcea.es/programacion/pybsddb.htm) to be installed):\n", + "Another storage alternative is the [zarr.storage.DBMStore](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.DBMStore) class, added in Zarr version 2.2. This class allows any DBM-style database to be used for storing an array or group. Here is an example using a Berkeley DB B-tree database for storage (requires [bsddb3](https://www.jcea.es/programacion/pybsddb.htm) to be installed):\n", "\n" ] }, @@ -1688,7 +1688,7 @@ "id": "LYx7PDVzUNy8" }, "source": [ - "Also added in Zarr version 2.2 is the [```zarr.storage.LMDBStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.LMDBStore) class which enables the lightning memory-mapped database (LMDB) to be used for storing an array or group (requires lmdb to be installed):\n", + "Also added in Zarr version 2.2 is the [zarr.storage.LMDBStore](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.LMDBStore) class which enables the lightning memory-mapped database (LMDB) to be used for storing an array or group (requires lmdb to be installed):\n", "\n" ] }, @@ -1713,7 +1713,7 @@ "id": "EWAldISLZGxT" }, "source": [ - "In Zarr version 2.3 is the [```zarr.storage.SQLiteStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.SQLiteStore) class which enables the SQLite database to be used for storing an array or group (requires Python is built with SQLite support):\n", + "In Zarr version 2.3 is the [zarr.storage.SQLiteStore](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.SQLiteStore) class which enables the SQLite database to be used for storing an array or group (requires Python is built with SQLite support):\n", "\n" ] }, @@ -1738,9 +1738,9 @@ "id": "miduiodpZvl6" }, "source": [ - "Also added in Zarr version 2.3 are two storage classes for interfacing with server-client databases. The [```zarr.storage.RedisStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.RedisStore) class interfaces [Redis](https://redis.io/) (an in memory data structure store), and the ```zarr.storage.MongoDB``` class interfaces with [MongoDB](https://www.mongodb.com/) (an object oriented NoSQL database). These stores respectively require the [redis-py](https://redis-py.readthedocs.io/en/stable/) and [pymongo](https://api.mongodb.com/python/current/) packages to be installed.\n", + "Also added in Zarr version 2.3 are two storage classes for interfacing with server-client databases. The [zarr.storage.RedisStore](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.RedisStore) class interfaces [Redis](https://redis.io/) (an in memory data structure store), and the ```zarr.storage.MongoDB``` class interfaces with [MongoDB](https://www.mongodb.com/) (an object oriented NoSQL database). These stores respectively require the [redis-py](https://redis-py.readthedocs.io/en/stable/) and [pymongo](https://api.mongodb.com/python/current/) packages to be installed.\n", "\n", - "For compatibility with the [N5](https://github.com/saalfeldlab/n5) data format, Zarr also provides an N5 backend (this is currently an experimental feature). Similar to the zip storage class, an [```zarr.n5.N5Store```](https://zarr.readthedocs.io/en/stable/api/n5.html#zarr.n5.N5Store) can be instantiated directly:" + "For compatibility with the [N5](https://github.com/saalfeldlab/n5) data format, Zarr also provides an N5 backend (this is currently an experimental feature). Similar to the zip storage class, an [zarr.n5.N5Store](https://zarr.readthedocs.io/en/stable/api/n5.html#zarr.n5.N5Store) can be instantiated directly:" ] }, { @@ -1864,7 +1864,7 @@ "id": "A6dbWR4WL4tX" }, "source": [ - "Zarr now also has a builtin storage backend for Azure Blob Storage. The class is [```zarr.storage.ABSStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.ABSStore) (requires [azure-storage-blob](https://docs.microsoft.com/en-us/azure/storage/blobs/storage-quickstart-blobs-python?tabs=environment-variable-windows) to be installed):" + "Zarr now also has a builtin storage backend for Azure Blob Storage. The class is [zarr.storage.ABSStore](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.ABSStore) (requires [azure-storage-blob](https://docs.microsoft.com/en-us/azure/storage/blobs/storage-quickstart-blobs-python?tabs=environment-variable-windows) to be installed):" ] }, { @@ -1890,11 +1890,11 @@ "id": "U-WqeNmbNjup" }, "source": [ - "When using an actual storage account, provide ```account_name``` and ```account_key``` arguments to [```zarr.storage.ABSStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.ABSStore), the above client is just testing against the emulator. Please also note that this is an experimental feature.\n", + "When using an actual storage account, provide ```account_name``` and ```account_key``` arguments to [zarr.storage.ABSStore](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.ABSStore), the above client is just testing against the emulator. Please also note that this is an experimental feature.\n", "\n", "Note that retrieving data from a remote service via the network can be significantly slower than retrieving data from a local file system, and will depend on network latency and bandwidth between the client and server systems. If you are experiencing poor performance, there are several things you can try. One option is to increase the array chunk size, which will reduce the number of chunks and thus reduce the number of network round-trips required to retrieve data for an array (and thus reduce the impact of network latency). Another option is to try to increase the compression ratio by changing compression options or trying a different compressor (which will reduce the impact of limited network bandwidth).\n", "\n", - "As of version 2.2, Zarr also provides the [```zarr.storage.LRUStoreCache```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.LRUStoreCache) which can be used to implement a local in-memory cache layer over a remote store. E.g.:" + "As of version 2.2, Zarr also provides the [zarr.storage.LRUStoreCache]('https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.LRUStoreCache') which can be used to implement a local in-memory cache layer over a remote store. E.g.:" ] }, { @@ -2025,7 +2025,7 @@ "source": [ "## Consolidating Metadata\n", "\n", - "Since there is a significant overhead for every connection to a cloud object store such as S3, the pattern described in the previous section may incur significant latency while scanning the metadata of the array hierarchy, even though each individual metadata object is small. For cases such as these, once the data are static and can be regarded as read-only, at least for the metadata/structure of the array hierarchy, the many metadata objects can be consolidated into a single one via [```zarr.convenience.consolidate_metadata()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.consolidate_metadata). Doing this can greatly increase the speed of reading the array metadata, e.g.:" + "Since there is a significant overhead for every connection to a cloud object store such as S3, the pattern described in the previous section may incur significant latency while scanning the metadata of the array hierarchy, even though each individual metadata object is small. For cases such as these, once the data are static and can be regarded as read-only, at least for the metadata/structure of the array hierarchy, the many metadata objects can be consolidated into a single one via [zarr.convenience.consolidate_metadata()](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.consolidate_metadata). Doing this can greatly increase the speed of reading the array metadata, e.g.:" ] }, { @@ -2047,7 +2047,7 @@ "source": [ "This creates a special key with a copy of all of the metadata from all of the metadata objects in the store.\n", "\n", - "Later, to open a Zarr store with consolidated metadata, use [```zarr.convenience.open_consolidated()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.open_consolidated), e.g.:" + "Later, to open a Zarr store with consolidated metadata, use [zarr.convenience.open_consolidated()](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.open_consolidated), e.g.:" ] }, { @@ -2069,9 +2069,9 @@ "source": [ "This uses the special key to read all of the metadata in a single call to the backend storage.\n", "\n", - "Note that, the hierarchy could still be opened in the normal way and altered, causing the consolidated metadata to become out of sync with the real state of the array hierarchy. In this case, [```zarr.convenience.consolidate_metadata()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.consolidate_metadata) would need to be called again.\n", + "Note that, the hierarchy could still be opened in the normal way and altered, causing the consolidated metadata to become out of sync with the real state of the array hierarchy. In this case, [zarr.convenience.consolidate_metadata()](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.consolidate_metadata) would need to be called again.\n", "\n", - "To protect against consolidated metadata accidentally getting out of sync, the root group returned by [```zarr.convenience.open_consolidated()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.open_consolidated) is read-only for the metadata, meaning that no new groups or arrays can be created, and arrays cannot be resized. However, data values with arrays can still be updated." + "To protect against consolidated metadata accidentally getting out of sync, the root group returned by [zarr.convenience.open_consolidated()](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.open_consolidated) is read-only for the metadata, meaning that no new groups or arrays can be created, and arrays cannot be resized. However, data values with arrays can still be updated." ] }, { @@ -2082,7 +2082,7 @@ "source": [ "## Copying/Migrating Data\n", "\n", - "If you have some data in an HDF5 file and would like to copy some or all of it into a Zarr group, or vice-versa, the [```zarr.convenience.copy()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.copy) and [```zarr.convenience.copy_all()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.copy_all) functions can be used. Here’s an example copying a group named ‘foo’ from an HDF5 file to a Zarr group:" + "If you have some data in an HDF5 file and would like to copy some or all of it into a Zarr group, or vice-versa, the [zarr.convenience.copy()](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.copy) and [zarr.convenience.copy_all()](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.copy_all) functions can be used. Here’s an example copying a group named ‘foo’ from an HDF5 file to a Zarr group:" ] }, { @@ -2176,7 +2176,7 @@ "id": "ttbwjeTbbW58" }, "source": [ - "If rather than copying a single group or array you would like to copy all groups and arrays, use [```zarr.convenience.copy_all()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.copy_all), e.g.:" + "If rather than copying a single group or array you would like to copy all groups and arrays, use [zarr.convenience.copy_all()](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.copy_all), e.g.:" ] }, { @@ -2230,7 +2230,7 @@ "id": "J61LvFlNcz1O" }, "source": [ - "If you need to copy data between two Zarr groups, the [```zarr.convenience.copy()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.copy) and [```zarr.convenience.copy_all()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.copy_all) functions can be used and provide the most flexibility. However, if you want to copy data in the most efficient way possible, without changing any configuration options, the [```zarr.convenience.copy_store()```](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.copy_store) function can be used. This function copies data directly between the underlying stores, without any decompression or re-compression, and so should be faster. E.g.:" + "If you need to copy data between two Zarr groups, the [zarr.convenience.copy()](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.copy) and [zarr.convenience.copy_all()](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.copy_all) functions can be used and provide the most flexibility. However, if you want to copy data in the most efficient way possible, without changing any configuration options, the [zarr.convenience.copy_store()](https://zarr.readthedocs.io/en/stable/api/convenience.html#zarr.convenience.copy_store) function can be used. This function copies data directly between the underlying stores, without any decompression or re-compression, and so should be faster. E.g.:" ] }, { @@ -3121,7 +3121,7 @@ "\n", "Zarr arrays and groups can be pickled, as long as the underlying store object can be pickled. Instances of any of the storage classes provided in the ```zarr.storage``` module can be pickled, as can the built-in ```dict``` class which can also be used for storage.\n", "\n", - "Note that if an array or group is backed by an in-memory store like a ```dict``` or [```zarr.storage.MemoryStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.MemoryStore), then when it is pickled all of the store data will be included in the pickled data. However, if an array or group is backed by a persistent store like a [```zarr.storage.DirectoryStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.DirectoryStore), [```zarr.storage.ZipStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.ZipStore) or [```zarr.storage.DBMStore```](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.DBMStore) then the store data **are not** pickled. The only thing that is pickled is the necessary parameters to allow the store to re-open any underlying files or databases upon being unpickled.\n", + "Note that if an array or group is backed by an in-memory store like a ```dict``` or [zarr.storage.MemoryStore](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.MemoryStore), then when it is pickled all of the store data will be included in the pickled data. However, if an array or group is backed by a persistent store like a [zarr.storage.DirectoryStore](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.DirectoryStore), [zarr.storage.ZipStore](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.ZipStore) or [zarr.storage.DBMStore](https://zarr.readthedocs.io/en/stable/api/storage.html#zarr.storage.DBMStore) then the store data **are not** pickled. The only thing that is pickled is the necessary parameters to allow the store to re-open any underlying files or databases upon being unpickled.\n", "\n", "E.g., pickle/unpickle an in-memory array:" ] From 7641814a405c69ad4db4b6e0cdaeb57090f65799 Mon Sep 17 00:00:00 2001 From: GbotemiB Date: Wed, 12 Oct 2022 23:41:42 +0100 Subject: [PATCH 11/11] recent changes --- docs/conf.py | 3 ++- docs/data/example.n5/attributes.json | 3 --- docs/data/example.sqldb | Bin 40960 -> 0 bytes docs/data/example.zarr/.zgroup | 3 --- docs/data/example.zip | Bin 34105 -> 0 bytes docs/data/group.zarr/.zgroup | 3 --- docs/data/group.zarr/foo/.zgroup | 3 --- docs/data/group.zarr/foo/bar/.zgroup | 3 --- docs/data/group.zarr/foo/bar/baz/.zarray | 22 ---------------------- docs/tutorial_nb.ipynb | 4 ++-- 10 files changed, 4 insertions(+), 40 deletions(-) delete mode 100644 docs/data/example.n5/attributes.json delete mode 100644 docs/data/example.sqldb delete mode 100644 docs/data/example.zarr/.zgroup delete mode 100644 docs/data/example.zip delete mode 100644 docs/data/group.zarr/.zgroup delete mode 100644 docs/data/group.zarr/foo/.zgroup delete mode 100644 docs/data/group.zarr/foo/bar/.zgroup delete mode 100644 docs/data/group.zarr/foo/bar/baz/.zarray diff --git a/docs/conf.py b/docs/conf.py index 3dcede71ac..b884b84d8c 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -52,7 +52,8 @@ numpydoc_class_members_toctree = False issues_github_path = 'zarr-developers/zarr-python' -nbsphinx_execute = 'never' +nbsphinx_execute = 'auto' +nbsphinx_allow_errors = True #Handling errors with rendering notebook #nbsphinx_allow_errors = True diff --git a/docs/data/example.n5/attributes.json b/docs/data/example.n5/attributes.json deleted file mode 100644 index a659b3e01f..0000000000 --- a/docs/data/example.n5/attributes.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "n5": "2.0.0" -} \ No newline at end of file diff --git a/docs/data/example.sqldb b/docs/data/example.sqldb deleted file mode 100644 index f6b120d57b90eb28a1a982e49279b82e1de42500..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40960 zcmeI5%~KO+9LIN)5XingMn$b!OhW6s%kJh~sS=b{1(X23;2SO>AY(KVz-i6&udDva^`A#(7XEzCB z<)OL7YGS6|c;YT5($X$TmZiQ#LXxC8zAAj(Uc&rNWE=ROa^U{k?#@Z(t*biwO$xD> z5_`$Mf7^y)TqGb85DAC`L;@lKk$^}*Bp?zH35Wzl0#p(x$w(&K}e)R7u?05Dn+hniU&u<3&L9vJg zL;@lKk$^}*Bp?zH35Wzl0wMvCfJi_j@SY?PyT>1B$1uNXKhJNXJ^ZFE!EYjQ{{sJP zY*}Kzu%FmETVtzig)K9W%`=ykS&7*!$>K~lHjNEq-B>eLjTK|r@QiuGHOfZGu#Kb< zH)MTN-_Y0fHGNfI(U)~kpVwWzte13KPwH`9);6^bZCzW_R<#vvS@X1c&DF|UNwc-2 z7T08TQ{7P4)irfhT~U`+Pn}m?wXBv@TTQBQRaQ2Y4P{+fQ&yD~Wm)l*dBs)AN=dPm zq!L%;*k)`awjNuHt;SZkj#xwjA_0+rNI)bY5)cWzKMAmKw>(p?_e{Bso}yWtMJ~)B z=c~xMY2<7LIWvWvc9ETj$o3?%^&rRvvv?o5a1S|u7ddwaIXi)zxs9B@h3wo!wr?O? z<3Y}w#WCcIvwM&;yOGmz zWM>z$&5$i4$d*~ukqa7fUPaC+$k`ZjrUN;X|>l`#OJRj7wFC{lLC6uIPi>uzFo7$HqEtwBL$OwB3!|4^M_%xxzR3 z_lwtzN4meB-hNrL&2&gkM*6;$B!7Ly54@MJ4quO)l%?12wsei0^q1CN{Idw}w$tmb zk(S1PxtG`6h4v0DdxrKlEqep)En4<^(4PNyNy}bC`vNU{747r1>=m@n(Xx-BeU_Ge z2ij+7*|(!Tv91?IdtzO$4eg0_y$ITq>Uv*23P(#=;f^;o7Czj)Rc#_O3UktG-0);@01lT6pBPjt5Z$WOy(ZgwcA} z{+kcz^8-1`mq$b4?p{CWtvUQw$bZ`|C5I37{=+u@{GZ8P5(^m-J*su6ilW7gj$Q4$ zqkG%-MGk}yh7R)|6J5M!BKS|*F^N_9BWO>o!grxPu?qho+7qkrhtZx`g+GM$#47wj zv?o^KKR|n875)I)ldAAubAQmDUwxx>;$k1#6YF}NXiu!`?L~WHU2hNC6YF}r(VkS- z6Z8L_PHlM2p5SvML+yt_Qaa-sh~Z7A0MqId=l-6HQ^IzPpkUX|njfP*v9q2Zp*^v#*NyhXx?T$HiFLhX(4K!Lf>PHL^Z%Xa z|L23xjV!h6|DT{eu_oM$_Qaa-IkYF%gwLWqu_jzZdtyzvfcC_ia31Z6HQ^lE6KlfR zpgsR|HKiu(H8W^W?5t-R?TK|g2knVw0}?Pps=*2-@@Syrk6i#QcBf`TxD(bE81*`u|yN33px}HD(SKIcz<@^73nj4Fsi3CIfB7y&70$y`A_?p0f4;QT_ zJcIVcns62Ei8bMAv?tbtD`-!w2~VLtsV3|-U9=~5*7G6S6FciUiT1?KdOkpVVrM;cK>#_v3&U^9VGtH3g z!0(%xXP(*oTH5)tL#=I_L*5b|IXt}m!l_Hwo7diYxmhENla+F9_H;+LS)9dOr7~75 zSEh4w&c5z=$C>P!@RbHGmdm}W0iIfTVPH5M3VG|*jRD4U6*Gw0ac-e;6#1#z%*m=b zfaCR}@{TMPjqb%y>aRzaH#zz8^lYV2t(Gf>SstT)HsfwNf3jLyESSxJJ9)P-UvSOF zls~4EpUF*o|Il$4lMZYzO?Vs4P}Mwcu1?j8Mb`sj^= zWAiz;1_$D8oGVnkvz)29Zi6mYCJNq3ICnMn6j0ViXEiLMCaUoK(o+;^{E zG|78w^AdVjAR39bgm<(ZI3EgmtM}JlY2s$>>IsJ~1zS3^J>F8MLJG?4rLnL7h+5g} zwL^(96uT*LhGK`3U?_GdNrqyFl42-!D18jY4kgV{>`*cc#SW#Pp{x!iMx(VgKNg|U zT3`FbA~aed6dJ7%3XN6>g+?ocLZcNzq0tJV&}fBFXtY)+aT={{v~e1(6bg-2xP54} zLMSv^Aru;|5DJY}2!%!~ghHbgLZQ)Gp(JRuw!9^1v{EQETH*Gg(F&o^XoXN{v_dE} zS|Jn~tq=;0RtSYgYlV`e(c1Esq|r*D&}fC*hej)eLZcNzq0tJV&}fBFXtY8oG+H4P z8m$#dibiY8TZ%?2g+ikhZXX)05DJY}2!%!~ghHbgLZQ(Lq0nfBP-wJPD19_qTi*I; zv{EQETH*Gg(F&o^XoXN{v_dE}S|Jn~tq=;0RtSYgYlV`g(c1EsrqN2F&}fC*hej)e zLZcNzq0tJV&}fBFXtY8oG+H4P8m$#dhDK}4TZTp}g+ikhZXX)05DJY}2!%!~ghHbg zLZQ(Lq0nfBP-wJPDE%~ATi*I}f+V9vs}?j$k4_Sk{oNsm`sA$fww;-r!&bl5170v0~ zgP@{0iM2M5WG3P{Rk?Wb8sJmisp&DnZ4 zf{Nzsxd%Z-a}L~#prSd$hY(b}=bX3?K}B<(K8&EEIj;;MsA$exM-Wsr=fk5uXnu_$ zcyqqIA3;TPetG~wMRWejBB*H2riTzzG-vn22r8O0cnm>Ba~>Q