From c60881e5da05e533cdca6dfdae56e10e34e816aa Mon Sep 17 00:00:00 2001 From: Divyesh Narayanan <47829318+Div12345@users.noreply.github.com> Date: Fri, 3 Dec 2021 14:45:51 +0530 Subject: [PATCH 01/19] Set download dir test and example (#249) * Update to dataset_search call in FilterBank Motor Imagery * Removing completed #fixme * Removing total_classes argument from dataset_search call in FilterBank MI This was earlier deprecated in https://github.com/NeuroTechX/moabb/commit/55f77ae305e1f0960e781a57895cb34ad6af0c62# * set_download_dir test and example * adding pre-commit modifications * Update whats_new.rst * Update examples/changing_download_directory.py Co-authored-by: Sylvain Chevallier * Update examples/changing_download_directory.py Co-authored-by: Sylvain Chevallier --- docs/source/whats_new.rst | 2 +- examples/changing_download_directory.py | 33 +++++++++++++++++++++++++ moabb/tests/util_tests.py | 18 ++++++++++++++ 3 files changed, 52 insertions(+), 1 deletion(-) create mode 100644 examples/changing_download_directory.py diff --git a/docs/source/whats_new.rst b/docs/source/whats_new.rst index 1c650880f..ac55800f4 100644 --- a/docs/source/whats_new.rst +++ b/docs/source/whats_new.rst @@ -18,7 +18,7 @@ Develop branch Enhancements ~~~~~~~~~~~~ -- None +- Adding Test and Example for set_download_dir (:gh:`249` by `Divyesh Narayanan`_) Bugs ~~~~ diff --git a/examples/changing_download_directory.py b/examples/changing_download_directory.py new file mode 100644 index 000000000..bc6f4228a --- /dev/null +++ b/examples/changing_download_directory.py @@ -0,0 +1,33 @@ +""" +=========================== +Change Download Directory +=========================== + +This is a minimal example to demonstrate how to change the default data download directory to a custom +path/location. +""" +# Authors: Divyesh Narayanan +# +# License: BSD (3-clause) + +import os.path as osp + +from mne import get_config + +from moabb.utils import set_download_dir + + +# You can choose to change the download directory to any path of your choice. +# If the path/folder doesn't exist, it will be created for you. + +original_path = get_config("MNE_DATA") +print(f"The download directory is currently {original_path}") +new_path = osp.join(osp.expanduser("~"), "mne_data_test") +set_download_dir(new_path) + +# To see if the mne config has been changed correctly +check_path = get_config("MNE_DATA") +print(f"Now the download directory has been changed to {check_path}") + +# Set the directory back to default location +set_download_dir(original_path) diff --git a/moabb/tests/util_tests.py b/moabb/tests/util_tests.py index f9c918a89..dc2ff1faf 100644 --- a/moabb/tests/util_tests.py +++ b/moabb/tests/util_tests.py @@ -1,6 +1,10 @@ +import os.path as osp import unittest +from mne import get_config + from moabb.datasets import utils +from moabb.utils import set_download_dir class Test_Utils(unittest.TestCase): @@ -47,6 +51,20 @@ def test_dataset_channel_search(self): raw = sess1[list(sess1.keys())[0]] self.assertFalse(set(chans) <= set(raw.info["ch_names"])) + def test_set_download_dir(self): + original_path = get_config("MNE_DATA") + new_path = osp.join(osp.expanduser("~"), "mne_data_test") + set_download_dir(new_path) + + # Check if the mne config has been changed correctly + self.assertTrue(get_config("MNE_DATA") == new_path) + + # Check if the folder has been created + self.assertTrue(osp.isdir(new_path)) + + # Set back to usual + set_download_dir(original_path) + if __name__ == "__main__": unittest.main() From b5451c40a202b3dcb05aa74c92f8064cad07ba56 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 21 Jan 2022 10:55:10 +0100 Subject: [PATCH 02/19] Bump pillow from 8.4.0 to 9.0.0 (#253) Bumps [pillow](https://github.com/python-pillow/Pillow) from 8.4.0 to 9.0.0. - [Release notes](https://github.com/python-pillow/Pillow/releases) - [Changelog](https://github.com/python-pillow/Pillow/blob/main/CHANGES.rst) - [Commits](https://github.com/python-pillow/Pillow/compare/8.4.0...9.0.0) --- updated-dependencies: - dependency-name: pillow dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 92 ++++++++++++++++++++++++++++------------------------- 1 file changed, 49 insertions(+), 43 deletions(-) diff --git a/poetry.lock b/poetry.lock index 27986b374..68dc5e031 100644 --- a/poetry.lock +++ b/poetry.lock @@ -339,11 +339,11 @@ test = ["pytest (>=4.0.2)", "pytest-xdist", "hypothesis (>=3.58)"] [[package]] name = "pillow" -version = "8.4.0" +version = "9.0.0" description = "Python Imaging Library (Fork)" category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [[package]] name = "platformdirs" @@ -933,6 +933,9 @@ markupsafe = [ {file = "MarkupSafe-2.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d7d807855b419fc2ed3e631034685db6079889a1f01d5d9dac950f764da3dad"}, {file = "MarkupSafe-2.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:add36cb2dbb8b736611303cd3bfcee00afd96471b09cda130da3581cbdc56a6d"}, {file = "MarkupSafe-2.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:168cd0a3642de83558a5153c8bd34f175a9a6e7f6dc6384b9655d2697312a646"}, + {file = "MarkupSafe-2.0.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4dc8f9fb58f7364b63fd9f85013b780ef83c11857ae79f2feda41e270468dd9b"}, + {file = "MarkupSafe-2.0.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:20dca64a3ef2d6e4d5d615a3fd418ad3bde77a47ec8a23d984a12b5b4c74491a"}, + {file = "MarkupSafe-2.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cdfba22ea2f0029c9261a4bd07e830a8da012291fbe44dc794e488b6c9bb353a"}, {file = "MarkupSafe-2.0.1-cp310-cp310-win32.whl", hash = "sha256:99df47edb6bda1249d3e80fdabb1dab8c08ef3975f69aed437cb69d0a5de1e28"}, {file = "MarkupSafe-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:e0f138900af21926a02425cf736db95be9f4af72ba1bb21453432a07f6082134"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f9081981fe268bd86831e5c75f7de206ef275defcb82bc70740ae6dc507aee51"}, @@ -944,6 +947,9 @@ markupsafe = [ {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf5d821ffabf0ef3533c39c518f3357b171a1651c1ff6827325e4489b0e46c3c"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0d4b31cc67ab36e3392bbf3862cfbadac3db12bdd8b02a2731f509ed5b829724"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:baa1a4e8f868845af802979fcdbf0bb11f94f1cb7ced4c4b8a351bb60d108145"}, + {file = "MarkupSafe-2.0.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:deb993cacb280823246a026e3b2d81c493c53de6acfd5e6bfe31ab3402bb37dd"}, + {file = "MarkupSafe-2.0.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:63f3268ba69ace99cab4e3e3b5840b03340efed0948ab8f78d2fd87ee5442a4f"}, + {file = "MarkupSafe-2.0.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:8d206346619592c6200148b01a2142798c989edcb9c896f9ac9722a99d4e77e6"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-win32.whl", hash = "sha256:6c4ca60fa24e85fe25b912b01e62cb969d69a23a5d5867682dd3e80b5b02581d"}, {file = "MarkupSafe-2.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b2f4bf27480f5e5e8ce285a8c8fd176c0b03e93dcc6646477d4630e83440c6a9"}, {file = "MarkupSafe-2.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0717a7390a68be14b8c793ba258e075c6f4ca819f15edfc2a3a027c823718567"}, @@ -955,6 +961,9 @@ markupsafe = [ {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e9936f0b261d4df76ad22f8fee3ae83b60d7c3e871292cd42f40b81b70afae85"}, {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:2a7d351cbd8cfeb19ca00de495e224dea7e7d919659c2841bbb7f420ad03e2d6"}, {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:60bf42e36abfaf9aff1f50f52644b336d4f0a3fd6d8a60ca0d054ac9f713a864"}, + {file = "MarkupSafe-2.0.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d6c7ebd4e944c85e2c3421e612a7057a2f48d478d79e61800d81468a8d842207"}, + {file = "MarkupSafe-2.0.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f0567c4dc99f264f49fe27da5f735f414c4e7e7dd850cfd8e69f0862d7c74ea9"}, + {file = "MarkupSafe-2.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:89c687013cb1cd489a0f0ac24febe8c7a666e6e221b783e53ac50ebf68e45d86"}, {file = "MarkupSafe-2.0.1-cp37-cp37m-win32.whl", hash = "sha256:a30e67a65b53ea0a5e62fe23682cfe22712e01f453b95233b25502f7c61cb415"}, {file = "MarkupSafe-2.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:611d1ad9a4288cf3e3c16014564df047fe08410e628f89805e475368bd304914"}, {file = "MarkupSafe-2.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5bb28c636d87e840583ee3adeb78172efc47c8b26127267f54a9c0ec251d41a9"}, @@ -967,6 +976,9 @@ markupsafe = [ {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fcf051089389abe060c9cd7caa212c707e58153afa2c649f00346ce6d260f1b"}, {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5855f8438a7d1d458206a2466bf82b0f104a3724bf96a1c781ab731e4201731a"}, {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3dd007d54ee88b46be476e293f48c85048603f5f516008bee124ddd891398ed6"}, + {file = "MarkupSafe-2.0.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:aca6377c0cb8a8253e493c6b451565ac77e98c2951c45f913e0b52facdcff83f"}, + {file = "MarkupSafe-2.0.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:04635854b943835a6ea959e948d19dcd311762c5c0c6e1f0e16ee57022669194"}, + {file = "MarkupSafe-2.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6300b8454aa6930a24b9618fbb54b5a68135092bc666f7b06901f897fa5c2fee"}, {file = "MarkupSafe-2.0.1-cp38-cp38-win32.whl", hash = "sha256:023cb26ec21ece8dc3907c0e8320058b2e0cb3c55cf9564da612bc325bed5e64"}, {file = "MarkupSafe-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:984d76483eb32f1bcb536dc27e4ad56bba4baa70be32fa87152832cdd9db0833"}, {file = "MarkupSafe-2.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2ef54abee730b502252bcdf31b10dacb0a416229b72c18b19e24a4509f273d26"}, @@ -979,6 +991,9 @@ markupsafe = [ {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c47adbc92fc1bb2b3274c4b3a43ae0e4573d9fbff4f54cd484555edbf030baf1"}, {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:37205cac2a79194e3750b0af2a5720d95f786a55ce7df90c3af697bfa100eaac"}, {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:1f2ade76b9903f39aa442b4aadd2177decb66525062db244b35d71d0ee8599b6"}, + {file = "MarkupSafe-2.0.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4296f2b1ce8c86a6aea78613c34bb1a672ea0e3de9c6ba08a960efe0b0a09047"}, + {file = "MarkupSafe-2.0.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f02365d4e99430a12647f09b6cc8bab61a6564363f313126f775eb4f6ef798e"}, + {file = "MarkupSafe-2.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5b6d930f030f8ed98e3e6c98ffa0652bdb82601e7a016ec2ab5d7ff23baa78d1"}, {file = "MarkupSafe-2.0.1-cp39-cp39-win32.whl", hash = "sha256:10f82115e21dc0dfec9ab5c0223652f7197feb168c940f3ef61563fc2d6beb74"}, {file = "MarkupSafe-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:693ce3f9e70a6cf7d2fb9e6c9d8b204b6b39897a2c4a1aa65728d5ac97dcc1d8"}, {file = "MarkupSafe-2.0.1.tar.gz", hash = "sha256:594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a"}, @@ -1083,47 +1098,38 @@ pandas = [ {file = "pandas-1.1.5.tar.gz", hash = "sha256:f10fc41ee3c75a474d3bdf68d396f10782d013d7f67db99c0efbfd0acb99701b"}, ] pillow = [ - {file = "Pillow-8.4.0-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:81f8d5c81e483a9442d72d182e1fb6dcb9723f289a57e8030811bac9ea3fef8d"}, - {file = "Pillow-8.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3f97cfb1e5a392d75dd8b9fd274d205404729923840ca94ca45a0af57e13dbe6"}, - {file = "Pillow-8.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb9fc393f3c61f9054e1ed26e6fe912c7321af2f41ff49d3f83d05bacf22cc78"}, - {file = "Pillow-8.4.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d82cdb63100ef5eedb8391732375e6d05993b765f72cb34311fab92103314649"}, - {file = "Pillow-8.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:62cc1afda735a8d109007164714e73771b499768b9bb5afcbbee9d0ff374b43f"}, - {file = "Pillow-8.4.0-cp310-cp310-win32.whl", hash = "sha256:e3dacecfbeec9a33e932f00c6cd7996e62f53ad46fbe677577394aaa90ee419a"}, - {file = "Pillow-8.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:620582db2a85b2df5f8a82ddeb52116560d7e5e6b055095f04ad828d1b0baa39"}, - {file = "Pillow-8.4.0-cp36-cp36m-macosx_10_10_x86_64.whl", hash = "sha256:1bc723b434fbc4ab50bb68e11e93ce5fb69866ad621e3c2c9bdb0cd70e345f55"}, - {file = "Pillow-8.4.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:72cbcfd54df6caf85cc35264c77ede902452d6df41166010262374155947460c"}, - {file = "Pillow-8.4.0-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:70ad9e5c6cb9b8487280a02c0ad8a51581dcbbe8484ce058477692a27c151c0a"}, - {file = "Pillow-8.4.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25a49dc2e2f74e65efaa32b153527fc5ac98508d502fa46e74fa4fd678ed6645"}, - {file = "Pillow-8.4.0-cp36-cp36m-win32.whl", hash = "sha256:93ce9e955cc95959df98505e4608ad98281fff037350d8c2671c9aa86bcf10a9"}, - {file = "Pillow-8.4.0-cp36-cp36m-win_amd64.whl", hash = "sha256:2e4440b8f00f504ee4b53fe30f4e381aae30b0568193be305256b1462216feff"}, - {file = "Pillow-8.4.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:8c803ac3c28bbc53763e6825746f05cc407b20e4a69d0122e526a582e3b5e153"}, - {file = "Pillow-8.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8a17b5d948f4ceeceb66384727dde11b240736fddeda54ca740b9b8b1556b29"}, - {file = "Pillow-8.4.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1394a6ad5abc838c5cd8a92c5a07535648cdf6d09e8e2d6df916dfa9ea86ead8"}, - {file = "Pillow-8.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:792e5c12376594bfcb986ebf3855aa4b7c225754e9a9521298e460e92fb4a488"}, - {file = "Pillow-8.4.0-cp37-cp37m-win32.whl", hash = "sha256:d99ec152570e4196772e7a8e4ba5320d2d27bf22fdf11743dd882936ed64305b"}, - {file = "Pillow-8.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:7b7017b61bbcdd7f6363aeceb881e23c46583739cb69a3ab39cb384f6ec82e5b"}, - {file = "Pillow-8.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:d89363f02658e253dbd171f7c3716a5d340a24ee82d38aab9183f7fdf0cdca49"}, - {file = "Pillow-8.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0a0956fdc5defc34462bb1c765ee88d933239f9a94bc37d132004775241a7585"}, - {file = "Pillow-8.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b7bb9de00197fb4261825c15551adf7605cf14a80badf1761d61e59da347779"}, - {file = "Pillow-8.4.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:72b9e656e340447f827885b8d7a15fc8c4e68d410dc2297ef6787eec0f0ea409"}, - {file = "Pillow-8.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a5a4532a12314149d8b4e4ad8ff09dde7427731fcfa5917ff16d0291f13609df"}, - {file = "Pillow-8.4.0-cp38-cp38-win32.whl", hash = "sha256:82aafa8d5eb68c8463b6e9baeb4f19043bb31fefc03eb7b216b51e6a9981ae09"}, - {file = "Pillow-8.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:066f3999cb3b070a95c3652712cffa1a748cd02d60ad7b4e485c3748a04d9d76"}, - {file = "Pillow-8.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:5503c86916d27c2e101b7f71c2ae2cddba01a2cf55b8395b0255fd33fa4d1f1a"}, - {file = "Pillow-8.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4acc0985ddf39d1bc969a9220b51d94ed51695d455c228d8ac29fcdb25810e6e"}, - {file = "Pillow-8.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b052a619a8bfcf26bd8b3f48f45283f9e977890263e4571f2393ed8898d331b"}, - {file = "Pillow-8.4.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:493cb4e415f44cd601fcec11c99836f707bb714ab03f5ed46ac25713baf0ff20"}, - {file = "Pillow-8.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8831cb7332eda5dc89b21a7bce7ef6ad305548820595033a4b03cf3091235ed"}, - {file = "Pillow-8.4.0-cp39-cp39-win32.whl", hash = "sha256:5e9ac5f66616b87d4da618a20ab0a38324dbe88d8a39b55be8964eb520021e02"}, - {file = "Pillow-8.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:3eb1ce5f65908556c2d8685a8f0a6e989d887ec4057326f6c22b24e8a172c66b"}, - {file = "Pillow-8.4.0-pp36-pypy36_pp73-macosx_10_10_x86_64.whl", hash = "sha256:ddc4d832a0f0b4c52fff973a0d44b6c99839a9d016fe4e6a1cb8f3eea96479c2"}, - {file = "Pillow-8.4.0-pp36-pypy36_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9a3e5ddc44c14042f0844b8cf7d2cd455f6cc80fd7f5eefbe657292cf601d9ad"}, - {file = "Pillow-8.4.0-pp36-pypy36_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c70e94281588ef053ae8998039610dbd71bc509e4acbc77ab59d7d2937b10698"}, - {file = "Pillow-8.4.0-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:3862b7256046fcd950618ed22d1d60b842e3a40a48236a5498746f21189afbbc"}, - {file = "Pillow-8.4.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a4901622493f88b1a29bd30ec1a2f683782e57c3c16a2dbc7f2595ba01f639df"}, - {file = "Pillow-8.4.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84c471a734240653a0ec91dec0996696eea227eafe72a33bd06c92697728046b"}, - {file = "Pillow-8.4.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:244cf3b97802c34c41905d22810846802a3329ddcb93ccc432870243211c79fc"}, - {file = "Pillow-8.4.0.tar.gz", hash = "sha256:b8e2f83c56e141920c39464b852de3719dfbfb6e3c99a2d8da0edf4fb33176ed"}, + {file = "Pillow-9.0.0-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:113723312215b25c22df1fdf0e2da7a3b9c357a7d24a93ebbe80bfda4f37a8d4"}, + {file = "Pillow-9.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bb47a548cea95b86494a26c89d153fd31122ed65255db5dcbc421a2d28eb3379"}, + {file = "Pillow-9.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31b265496e603985fad54d52d11970383e317d11e18e856971bdbb86af7242a4"}, + {file = "Pillow-9.0.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d154ed971a4cc04b93a6d5b47f37948d1f621f25de3e8fa0c26b2d44f24e3e8f"}, + {file = "Pillow-9.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80fe92813d208ce8aa7d76da878bdc84b90809f79ccbad2a288e9bcbeac1d9bd"}, + {file = "Pillow-9.0.0-cp310-cp310-win32.whl", hash = "sha256:d5dcea1387331c905405b09cdbfb34611050cc52c865d71f2362f354faee1e9f"}, + {file = "Pillow-9.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:52abae4c96b5da630a8b4247de5428f593465291e5b239f3f843a911a3cf0105"}, + {file = "Pillow-9.0.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:72c3110228944019e5f27232296c5923398496b28be42535e3b2dc7297b6e8b6"}, + {file = "Pillow-9.0.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97b6d21771da41497b81652d44191489296555b761684f82b7b544c49989110f"}, + {file = "Pillow-9.0.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:72f649d93d4cc4d8cf79c91ebc25137c358718ad75f99e99e043325ea7d56100"}, + {file = "Pillow-9.0.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7aaf07085c756f6cb1c692ee0d5a86c531703b6e8c9cae581b31b562c16b98ce"}, + {file = "Pillow-9.0.0-cp37-cp37m-win32.whl", hash = "sha256:03b27b197deb4ee400ed57d8d4e572d2d8d80f825b6634daf6e2c18c3c6ccfa6"}, + {file = "Pillow-9.0.0-cp37-cp37m-win_amd64.whl", hash = "sha256:a09a9d4ec2b7887f7a088bbaacfd5c07160e746e3d47ec5e8050ae3b2a229e9f"}, + {file = "Pillow-9.0.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:490e52e99224858f154975db61c060686df8a6b3f0212a678e5d2e2ce24675c9"}, + {file = "Pillow-9.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:500d397ddf4bbf2ca42e198399ac13e7841956c72645513e8ddf243b31ad2128"}, + {file = "Pillow-9.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ebd8b9137630a7bbbff8c4b31e774ff05bbb90f7911d93ea2c9371e41039b52"}, + {file = "Pillow-9.0.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd0e5062f11cb3e730450a7d9f323f4051b532781026395c4323b8ad055523c4"}, + {file = "Pillow-9.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f3b4522148586d35e78313db4db0df4b759ddd7649ef70002b6c3767d0fdeb7"}, + {file = "Pillow-9.0.0-cp38-cp38-win32.whl", hash = "sha256:0b281fcadbb688607ea6ece7649c5d59d4bbd574e90db6cd030e9e85bde9fecc"}, + {file = "Pillow-9.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:b5050d681bcf5c9f2570b93bee5d3ec8ae4cf23158812f91ed57f7126df91762"}, + {file = "Pillow-9.0.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:c2067b3bb0781f14059b112c9da5a91c80a600a97915b4f48b37f197895dd925"}, + {file = "Pillow-9.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2d16b6196fb7a54aff6b5e3ecd00f7c0bab1b56eee39214b2b223a9d938c50af"}, + {file = "Pillow-9.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98cb63ca63cb61f594511c06218ab4394bf80388b3d66cd61d0b1f63ee0ea69f"}, + {file = "Pillow-9.0.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bc462d24500ba707e9cbdef436c16e5c8cbf29908278af053008d9f689f56dee"}, + {file = "Pillow-9.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3586e12d874ce2f1bc875a3ffba98732ebb12e18fb6d97be482bd62b56803281"}, + {file = "Pillow-9.0.0-cp39-cp39-win32.whl", hash = "sha256:68e06f8b2248f6dc8b899c3e7ecf02c9f413aab622f4d6190df53a78b93d97a5"}, + {file = "Pillow-9.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:6579f9ba84a3d4f1807c4aab4be06f373017fc65fff43498885ac50a9b47a553"}, + {file = "Pillow-9.0.0-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:47f5cf60bcb9fbc46011f75c9b45a8b5ad077ca352a78185bd3e7f1d294b98bb"}, + {file = "Pillow-9.0.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2fd8053e1f8ff1844419842fd474fc359676b2e2a2b66b11cc59f4fa0a301315"}, + {file = "Pillow-9.0.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c5439bfb35a89cac50e81c751317faea647b9a3ec11c039900cd6915831064d"}, + {file = "Pillow-9.0.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:95545137fc56ce8c10de646074d242001a112a92de169986abd8c88c27566a05"}, + {file = "Pillow-9.0.0.tar.gz", hash = "sha256:ee6e2963e92762923956fe5d3479b1fdc3b76c83f290aad131a2f98c3df0593e"}, ] platformdirs = [ {file = "platformdirs-2.4.0-py3-none-any.whl", hash = "sha256:8868bbe3c3c80d42f20156f22e7131d2fb321f5bc86a2a345375c6481a67021d"}, From 960bafb46f0bd701eb468049a9b078cf903875e6 Mon Sep 17 00:00:00 2001 From: Sylvain Chevallier Date: Sun, 23 Jan 2022 11:39:36 +0100 Subject: [PATCH 03/19] Fix Schirrmeister2017 error (#255) * correct event loading error, renaming session and runs * add whats new --- docs/source/whats_new.rst | 2 +- moabb/datasets/schirrmeister2017.py | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/docs/source/whats_new.rst b/docs/source/whats_new.rst index ac55800f4..23dac8fee 100644 --- a/docs/source/whats_new.rst +++ b/docs/source/whats_new.rst @@ -23,7 +23,7 @@ Enhancements Bugs ~~~~ -- None +- Correcting events management in Schirrmeister2017, renaming session and run (:gh:`255` by `Pierre Guetschel`_ and `Sylvain Chevallier`_) API changes ~~~~~~~~~~~ diff --git a/moabb/datasets/schirrmeister2017.py b/moabb/datasets/schirrmeister2017.py index bf337029f..db57957d4 100644 --- a/moabb/datasets/schirrmeister2017.py +++ b/moabb/datasets/schirrmeister2017.py @@ -87,7 +87,7 @@ def _url(prefix): def _get_single_subject_data(self, subject): train, test = [BBCIDataset(path) for path in self.data_path(subject)] sessions = {} - sessions["session_1"] = {"train": train.load(), "test": test.load()} + sessions["session_0"] = {"run_0": train.load(), "run_1": test.load()} return sessions @@ -252,5 +252,7 @@ def _add_markers(self, cnt): [0] * len(event_times_in_samples), event_classes, ] - cnt.info["events"] = np.array(event_arr).T + cnt.info["events"] = [ + dict(list=np.array(event_arr).T, channels=None), + ] return cnt From 91c471367f265444cd4497ed9bbe6c7b12ae6d51 Mon Sep 17 00:00:00 2001 From: Divyesh Narayanan <47829318+Div12345@users.noreply.github.com> Date: Tue, 25 Jan 2022 23:18:30 +0530 Subject: [PATCH 04/19] Removing dependency of Physionet MI download on mne method (#257) * Update physionet_mi.py * consistency of runs numbering * Update whats_new.rst * f-string edits Co-authored-by: Sylvain Chevallier * f-string edits Co-authored-by: Sylvain Chevallier Co-authored-by: Sylvain Chevallier --- docs/source/whats_new.rst | 1 + moabb/datasets/physionet_mi.py | 40 ++++++++++++++++++++++++---------- 2 files changed, 30 insertions(+), 11 deletions(-) diff --git a/docs/source/whats_new.rst b/docs/source/whats_new.rst index 23dac8fee..ce053921f 100644 --- a/docs/source/whats_new.rst +++ b/docs/source/whats_new.rst @@ -23,6 +23,7 @@ Enhancements Bugs ~~~~ +- Removing dependency on mne method for PhysionetMI data downloading, renaming runs (:gh:`257` by `Divyesh Narayanan`_) - Correcting events management in Schirrmeister2017, renaming session and run (:gh:`255` by `Pierre Guetschel`_ and `Sylvain Chevallier`_) API changes diff --git a/moabb/datasets/physionet_mi.py b/moabb/datasets/physionet_mi.py index 13f776c12..02bd84ee3 100644 --- a/moabb/datasets/physionet_mi.py +++ b/moabb/datasets/physionet_mi.py @@ -4,14 +4,13 @@ import mne import numpy as np -from mne.datasets import eegbci from mne.io import read_raw_edf from moabb.datasets.base import BaseDataset -from moabb.datasets.download import get_dataset_path +from moabb.datasets.download import data_dl, get_dataset_path -BASE_URL = "http://archive.physionet.org/pn4/eegmmidb/" +BASE_URL = "https://physionet.org/files/eegmmidb/1.0.0/" class PhysionetMI(BaseDataset): @@ -95,9 +94,7 @@ def __init__(self, imagined=True, executed=False): self.hand_runs += [3, 7, 11] def _load_one_run(self, subject, run, preload=True): - raw_fname = eegbci.load_data( - subject, runs=[run], verbose="ERROR", base_url=BASE_URL - )[0] + raw_fname = self._load_data(subject, runs=[run], verbose="ERROR")[0] raw = read_raw_edf(raw_fname, preload=preload, verbose="ERROR") raw.rename_channels(lambda x: x.strip(".")) raw.rename_channels(lambda x: x.upper()) @@ -118,6 +115,7 @@ def _get_single_subject_data(self, subject): get_dataset_path(sign, None) # hand runs + idx = 0 for run in self.hand_runs: raw = self._load_one_run(subject, run) stim = raw.annotations.description.astype(np.dtype(" Date: Thu, 27 Jan 2022 09:02:47 +0100 Subject: [PATCH 05/19] Correct MAMEM issues (#256) * switch mamem session to runs, use predictable names * update docstring in evaluation, for building documentation * update Lee2017 docstring for correct documentation. * update whats new * switch SSVEP example to within session * correct typo and rebase * correct typos on examples --- docs/source/whats_new.rst | 2 + .../plot_filterbank_csp_vs_csp.py | 12 +- .../plot_mne_and_scikit_estimators.py | 37 +-- .../plot_select_electrodes_resample.py | 8 +- .../plot_statistical_analysis.py | 25 +- examples/changing_download_directory.py | 6 +- .../plot_learning_curve_p300_external.py | 12 +- .../plot_learning_curve_motor_imagery.py | 23 +- .../plot_learning_curve_p300.py | 19 +- examples/plot_cross_session_motor_imagery.py | 34 +- .../plot_cross_session_multiple_datasets.py | 14 +- examples/plot_cross_subject_ssvep.py | 23 +- examples/plot_explore_paradigm.py | 44 +-- examples/plot_within_session_p300.py | 16 +- ..._ssvep.py => plot_within_session_ssvep.py} | 37 ++- moabb/datasets/Lee2019.py | 304 +++++++++++------- moabb/datasets/ssvep_mamem.py | 26 +- moabb/evaluations/evaluations.py | 131 ++++++-- 18 files changed, 476 insertions(+), 297 deletions(-) rename examples/{plot_cross_session_ssvep.py => plot_within_session_ssvep.py} (74%) diff --git a/docs/source/whats_new.rst b/docs/source/whats_new.rst index ce053921f..73f490e2c 100644 --- a/docs/source/whats_new.rst +++ b/docs/source/whats_new.rst @@ -25,6 +25,8 @@ Bugs - Removing dependency on mne method for PhysionetMI data downloading, renaming runs (:gh:`257` by `Divyesh Narayanan`_) - Correcting events management in Schirrmeister2017, renaming session and run (:gh:`255` by `Pierre Guetschel`_ and `Sylvain Chevallier`_) +- Switch session and runs in MAMEM1, 2 and 3 to avoid error in WithinSessionEvaluation (:gh:`256` by `Sylvain Chevallier`_) +- Correct doctstrings for the documentation, incuding Lee2017 (:gh:`256` by `Sylvain Chevallier`_) API changes ~~~~~~~~~~~ diff --git a/examples/advanced_examples/plot_filterbank_csp_vs_csp.py b/examples/advanced_examples/plot_filterbank_csp_vs_csp.py index 49d0e19f2..ac3784a9e 100644 --- a/examples/advanced_examples/plot_filterbank_csp_vs_csp.py +++ b/examples/advanced_examples/plot_filterbank_csp_vs_csp.py @@ -3,7 +3,7 @@ FilterBank CSP versus CSP ========================= -This Example show a comparison of CSP versus FilterBank CSP on the +This example show a comparison of CSP versus FilterBank CSP on the very popular dataset 2a from the BCI competition IV. """ # Authors: Alexandre Barachant @@ -27,7 +27,7 @@ moabb.set_log_level("info") ############################################################################## -# Create pipelines +# Create Pipelines # ---------------- # # The CSP implementation from MNE is used. We selected 8 CSP components, as @@ -51,7 +51,7 @@ # ---------- # # Since two different preprocessing will be applied, we have two different -# paradigm objects. We have to make sure their filter matchs so the comparison +# paradigm objects. We have to make sure their filter matches so the comparison # will be fair. # # The first one is a standard `LeftRightImagery` with a 8 to 35 Hz broadband @@ -75,7 +75,7 @@ ) results = evaluation.process(pipelines) -# bank of 6 filter, by 4 Hz increment +# Bank of 6 filters, by 4 Hz increment filters = [[8, 12], [12, 16], [16, 20], [20, 24], [24, 28], [28, 35]] paradigm = FilterBankLeftRightImagery(filters=filters) evaluation = CrossSessionEvaluation( @@ -93,10 +93,10 @@ # Plot Results # ---------------- # -# Here we plot the results via normal methods. We the first plot is a pointplot +# Here we plot the results via seaborn. We first display a pointplot # with the average performance of each pipeline across session and subjects. # The second plot is a paired scatter plot. Each point representing the score -# of a single session. An algorithm will outperforms another is most of the +# of a single session. An algorithm will outperform another is most of the # points are in its quadrant. fig, axes = plt.subplots(1, 2, figsize=[8, 4], sharey=True) diff --git a/examples/advanced_examples/plot_mne_and_scikit_estimators.py b/examples/advanced_examples/plot_mne_and_scikit_estimators.py index de900bc6d..5454f83a3 100644 --- a/examples/advanced_examples/plot_mne_and_scikit_estimators.py +++ b/examples/advanced_examples/plot_mne_and_scikit_estimators.py @@ -1,16 +1,16 @@ """ -========================= -MNE Epochs-based piplines -========================= +========================== +MNE Epochs-based pipelines +========================== This example shows how to use machine learning pipeline based on MNE Epochs -instead of numpy arrays. This is useful to make the most of the MNE code base +instead of Numpy arrays. This is useful to make the most of the MNE code base and to embed EEG specific code inside sklearn pipelines. -We will compare compare different pipelines for P300: -- Logistic Regression, based on MNE Epochs +We will compare different pipelines for P300: +- Logistic regression, based on MNE Epochs - XDAWN and Logistic Regression (LR), based on MNE Epochs -- XDAWN extended covariance and LR on tangent space, based on numpy +- XDAWN extended covariance and LR on tangent space, based on Numpy """ # Authors: Sylvain Chevallier @@ -47,7 +47,7 @@ moabb.set_log_level("info") ############################################################################### -# Loading dataset +# Loading Dataset # --------------- # # Load 2 subjects of BNCI 2014-009 dataset, with 3 session each @@ -58,7 +58,7 @@ paradigm = P300() ############################################################################## -# Get data (optional) +# Get Data (optional) # ------------------- # # To get access to the EEG signals downloaded from the dataset, you could @@ -66,7 +66,7 @@ # in a dictionary of sessions and runs. # The ``paradigm.get_data(dataset=dataset, subjects=[subject_id])`` allows to # obtain the preprocessed EEG data, the labels and the meta information. By -# default, the EEG is return as a numpy array. With ``return_epochs=True``, MNE +# default, the EEG is return as a Numpy array. With ``return_epochs=True``, MNE # Epochs are returned. subject_list = [1] @@ -77,14 +77,14 @@ ) ############################################################################## -# A simple MNE pipeline +# A Simple MNE Pipeline # --------------------- # # Using ``return_epochs=True`` in the evaluation, it is possible to design a # pipeline based on MNE Epochs input. Let's create a simple one, that # reshape the input data from epochs, rescale the data and uses a logistic # regression to classify the data. We will need to write a basic Transformer -# estimator, that comply with +# estimator, that complies with # `sklearn convention `_. # This transformer will extract the data from an input Epoch, and reshapes into # 2D array. @@ -124,13 +124,13 @@ def transform(self, X, y=None): mne_res = mne_eval.process(mne_ppl) ############################################################################## -# Advanced MNE pipeline +# Advanced MNE Pipeline # --------------------- # # In some case, the MNE pipeline should have access to the original labels from # the dataset. This is the case for the XDAWN code of MNE. One could pass # `mne_labels` to evaluation in order to keep this label. -# As an example, we will define a pipeline that compute an XDAWN filter, rescale, +# As an example, we will define a pipeline that computes an XDAWN filter, rescale, # then apply a logistic regression. mne_adv = {} @@ -151,10 +151,10 @@ def transform(self, X, y=None): adv_res = mne_eval.process(mne_adv) ############################################################################### -# Numpy-based pipeline +# Numpy-based Pipeline # -------------------- # -# For the comparison, we will define a numpy-based pipeline that relies on +# For the comparison, we will define a Numpy-based pipeline that relies on # pyriemann to estimate XDAWN-extended covariance matrices that are projected # on the tangent space and classified with a logistic regression. @@ -173,11 +173,12 @@ def transform(self, X, y=None): sk_res = sk_eval.process(sk_ppl) ############################################################################### -# Combining results +# Combining Results # ----------------- # # Even if the results have been obtained by different evaluation processes, it -# possible to combine the resulting dataframes to analyze and plot the results. +# is possible to combine the resulting DataFrames to analyze and plot the +# results. all_res = pd.concat([mne_res, adv_res, sk_res]) diff --git a/examples/advanced_examples/plot_select_electrodes_resample.py b/examples/advanced_examples/plot_select_electrodes_resample.py index 91b2fcc82..a569c9f00 100644 --- a/examples/advanced_examples/plot_select_electrodes_resample.py +++ b/examples/advanced_examples/plot_select_electrodes_resample.py @@ -1,6 +1,6 @@ """ ================================ -Select electrodes and resampling +Select Electrodes and Resampling ================================ Within paradigm, it is possible to restrict analysis only to a subset of @@ -30,7 +30,7 @@ # Datasets # -------- # -# Load 2 subjects of BNCI 2014-004 and Zhou2016 datasets, with 2 session each +# Load 2 subjects of BNCI 2014-004 and Zhou2016 datasets, with 2 sessions each subj = [1, 2] datasets = [Zhou2016(), BNCI2014001()] @@ -63,7 +63,7 @@ print(results.head()) ############################################################################## -# Electrode selection +# Electrode Selection # ------------------- # # It is possible to select the electrodes that are shared by all datasets @@ -79,7 +79,7 @@ print(results.head()) ############################################################################## -# Plot results +# Plot Results # ------------ # # Compare the obtained results with the two pipelines, CSP+LDA and logistic diff --git a/examples/advanced_examples/plot_statistical_analysis.py b/examples/advanced_examples/plot_statistical_analysis.py index 5037277fe..73bbea947 100644 --- a/examples/advanced_examples/plot_statistical_analysis.py +++ b/examples/advanced_examples/plot_statistical_analysis.py @@ -1,4 +1,5 @@ -"""======================= +""" +======================= Statistical Analysis ======================= @@ -40,20 +41,20 @@ # --------------------- # # First we need to set up a paradigm, dataset list, and some pipelines to -# test. This is explored more in the examples -- we choose a left vs right +# test. This is explored more in the examples -- we choose left vs right # imagery paradigm with a single bandpass. There is only one dataset here but # any number can be added without changing this workflow. # -# Create pipelines +# Create Pipelines # ---------------- # # Pipelines must be a dict of sklearn pipeline transformer. # -# The csp implementation from MNE is used. We selected 8 CSP components, as -# usually done in the litterature. +# The CSP implementation from MNE is used. We selected 8 CSP components, as +# usually done in the literature. # -# The riemannian geometry pipeline consists in covariance estimation, tangent -# space mapping and finaly a logistic regression for the classification. +# The Riemannian geometry pipeline consists in covariance estimation, tangent +# space mapping and finally a logistic regression for the classification. pipelines = {} @@ -70,7 +71,7 @@ # ---------- # # We define the paradigm (LeftRightImagery) and the dataset (BNCI2014001). -# The evaluation will return a dataframe containing a single AUC score for +# The evaluation will return a DataFrame containing a single AUC score for # each subject / session of the dataset, and for each pipeline. # # Results are saved into the database, so that if you add a new pipeline, it @@ -89,7 +90,7 @@ results = evaluation.process(pipelines) ############################################################################## -# MOABB plotting +# MOABB Plotting # ---------------- # # Here we plot the results using some of the convenience methods within the @@ -109,7 +110,7 @@ plt.show() ############################################################################### -# Statistical testing and further plots +# Statistical Testing and Further Plots # ---------------------------------------- # # If the statistical significance of results is of interest, the method @@ -124,13 +125,13 @@ ############################################################################### # The meta-analysis style plot shows the standardized mean difference within # each tested dataset for the two algorithms in question, in addition to a -# meta-effect and significances both per-dataset and overall. +# meta-effect and significance both per-dataset and overall. fig = moabb_plt.meta_analysis_plot(stats, "CSP+LDA", "RG+LDA") plt.show() ############################################################################### # The summary plot shows the effect and significance related to the hypothesis -# that the algorithm on the y-axis significantly out-performed the algorithm on +# that the algorithm on the y-axis significantly outperformed the algorithm on # the x-axis over all datasets moabb_plt.summary_plot(P, T) plt.show() diff --git a/examples/changing_download_directory.py b/examples/changing_download_directory.py index bc6f4228a..224c910c9 100644 --- a/examples/changing_download_directory.py +++ b/examples/changing_download_directory.py @@ -17,6 +17,7 @@ from moabb.utils import set_download_dir +############################################################################### # You can choose to change the download directory to any path of your choice. # If the path/folder doesn't exist, it will be created for you. @@ -25,9 +26,12 @@ new_path = osp.join(osp.expanduser("~"), "mne_data_test") set_download_dir(new_path) -# To see if the mne config has been changed correctly +############################################################################### +# You could verify that the MNE config has been changed correctly + check_path = get_config("MNE_DATA") print(f"Now the download directory has been changed to {check_path}") +############################################################################### # Set the directory back to default location set_download_dir(original_path) diff --git a/examples/external/plot_learning_curve_p300_external.py b/examples/external/plot_learning_curve_p300_external.py index 3a211a5b9..22648e294 100644 --- a/examples/external/plot_learning_curve_p300_external.py +++ b/examples/external/plot_learning_curve_p300_external.py @@ -1,17 +1,17 @@ """ ======================================= -Within Session P300 with learning curve +Within Session P300 with Learning Curve ======================================= -This Example shows how to perform a within session analysis while also +This example shows how to perform a within session analysis while also creating learning curves for a P300 dataset. Additionally, we will evaluate external code. Make sure to have tdlda installed, which can be found in requirements_external.txt We will compare three pipelines : -- Riemannian Geometry -- Jumping Means based Linear Discriminant Analysis +- Riemannian geometry +- Jumping Means-based Linear Discriminant Analysis - Time-Decoupled Linear Discriminant Analysis We will use the P300 paradigm, which uses the AUC as metric. @@ -53,7 +53,7 @@ processing_sampling_rate = 128 pipelines = {} -# we have to do this because the classes are called 'Target' and 'NonTarget' +# We have to do this because the classes are called 'Target' and 'NonTarget' # but the evaluation function uses a LabelEncoder, transforming them # to 0 and 1 labels_dict = {"Target": 1, "NonTarget": 0} @@ -96,7 +96,7 @@ # Evaluation # ---------- # -# We define the paradigm (P300) and use all three datasets available for it. +# We define the paradigm (P300) and use the BNCI 2014-009 dataset for it. # The evaluation will return a dataframe containing AUCs for each permutation # and dataset size. diff --git a/examples/learning_curve/plot_learning_curve_motor_imagery.py b/examples/learning_curve/plot_learning_curve_motor_imagery.py index 96960c8c1..df17ce86b 100644 --- a/examples/learning_curve/plot_learning_curve_motor_imagery.py +++ b/examples/learning_curve/plot_learning_curve_motor_imagery.py @@ -1,9 +1,9 @@ """ ================================================ -Within Session Motor Imagery with learning curve +Within Session Motor Imagery with Learning Curve ================================================ -This Example show how to perform a within session motor imagery analysis on the +This example shows how to perform a within session motor imagery analysis on the very popular dataset 2a from the BCI competition IV. We will compare two pipelines : @@ -11,8 +11,8 @@ - CSP + LDA - Riemannian Geometry + Logistic Regression -We will use the LeftRightImagery paradigm. this will restrict the analysis -to two classes (left hand versus righ hand) and use AUC as metric. +We will use the LeftRightImagery paradigm. This will restrict the analysis +to two classes (left- vs right-hand) and use AUC as metric. """ # Original author: Alexandre Barachant # Learning curve modification: Jan Sosulski @@ -38,16 +38,16 @@ moabb.set_log_level("info") ############################################################################## -# Create pipelines +# Create Pipelines # ---------------- # # Pipelines must be a dict of sklearn pipeline transformer. # -# The csp implementation from MNE is used. We selected 8 CSP components, as +# The CSP implementation from MNE is used. We selected 8 CSP components, as # usually done in the litterature. # -# The riemannian geometry pipeline consists in covariance estimation, tangent -# space mapping and finaly a logistic regression for the classification. +# The Riemannian geometry pipeline consists in covariance estimation, tangent +# space mapping and finally a logistic regression for the classification. pipelines = {} @@ -64,12 +64,12 @@ # ---------- # # We define the paradigm (LeftRightImagery) and the dataset (BNCI2014001). -# The evaluation will return a dataframe containing a single AUC score for +# The evaluation will return a DataFrame containing a single AUC score for # each subject / session of the dataset, and for each pipeline. # # Results are saved into the database, so that if you add a new pipeline, it # will not run again the evaluation unless a parameter has changed. Results can -# be overwrited if necessary. +# be overwritten if necessary. paradigm = LeftRightImagery() dataset = BNCI2014001() @@ -97,7 +97,8 @@ # Plot Results # ------------ # -# Here we plot the results. +# We plot the accuracy as a function of the number of training samples, for +# each pipeline fig, ax = plt.subplots(facecolor="white", figsize=[8, 4]) diff --git a/examples/learning_curve/plot_learning_curve_p300.py b/examples/learning_curve/plot_learning_curve_p300.py index 458d1fc1b..daa594344 100644 --- a/examples/learning_curve/plot_learning_curve_p300.py +++ b/examples/learning_curve/plot_learning_curve_p300.py @@ -1,16 +1,16 @@ """ ======================================= -Within Session P300 with learning curve +Within Session P300 with Learning Curve ======================================= -This Example shows how to perform a within session analysis while also +This example shows how to perform a within session analysis while also creating learning curves for a P300 dataset. Additionally, we will evaluate external code. Make sure to have tdlda installed, which can be found in requirements_external.txt We will compare two pipelines : -- Riemannian Geometry with Linear Discriminant Analysis +- Riemannian geometry with Linear Discriminant Analysis - XDAWN and Linear Discriminant Analysis We will use the P300 paradigm, which uses the AUC as metric. @@ -44,6 +44,7 @@ moabb.set_log_level("info") +############################################################################## # This is an auxiliary transformer that allows one to vectorize data # structures in a pipeline For instance, in the case of a X with dimensions # Nt x Nc x Ns, one might be interested in a new data structure with @@ -64,14 +65,15 @@ def transform(self, X): ############################################################################## -# Create pipelines +# Create Pipelines # ---------------- # # Pipelines must be a dict of sklearn pipeline transformer. processing_sampling_rate = 128 pipelines = {} -# we have to do this because the classes are called 'Target' and 'NonTarget' +############################################################################## +# We have to do this because the classes are called 'Target' and 'NonTarget' # but the evaluation function uses a LabelEncoder, transforming them # to 0 and 1 labels_dict = {"Target": 1, "NonTarget": 0} @@ -92,7 +94,7 @@ def transform(self, X): # ---------- # # We define the paradigm (P300) and use all three datasets available for it. -# The evaluation will return a dataframe containing AUCs for each permutation +# The evaluation will return a DataFrame containing AUCs for each permutation # and dataset size. paradigm = P300(resample=processing_sampling_rate) @@ -102,7 +104,7 @@ def transform(self, X): datasets = [dataset] overwrite = True # set to True if we want to overwrite cached results data_size = dict(policy="ratio", value=np.geomspace(0.02, 1, 4)) -# When the training data is sparse, peform more permutations than when we have a lot of data +# When the training data is sparse, perform more permutations than when we have a lot of data n_perms = np.floor(np.geomspace(20, 2, len(data_size["value"]))).astype(int) # Guarantee reproducibility np.random.seed(7536298) @@ -122,7 +124,8 @@ def transform(self, X): # Plot Results # ------------ # -# Here we plot the results. +# We plot the accuracy as a function of the number of training samples, for +# each pipeline fig, ax = plt.subplots(facecolor="white", figsize=[8, 4]) diff --git a/examples/plot_cross_session_motor_imagery.py b/examples/plot_cross_session_motor_imagery.py index ea9eee4d4..b1c1ce253 100644 --- a/examples/plot_cross_session_motor_imagery.py +++ b/examples/plot_cross_session_motor_imagery.py @@ -1,9 +1,9 @@ """ =========================== -Cross Session Motor Imagery +Cross-Session Motor Imagery =========================== -This Example show how to perform a cross session motor imagery analysis on the +This example show how to perform a cross session motor imagery analysis on the very popular dataset 2a from the BCI competition IV. We will compare two pipelines : @@ -11,8 +11,8 @@ - CSP+LDA - Riemannian Geometry+Logistic Regression -We will use the LeftRightImagery paradigm. this will restrict the analysis -to two classes (left hand versus righ hand) and use AUC as metric. +We will use the LeftRightImagery paradigm. This will restrict the analysis +to two classes (left hand versus right hand) and use AUC as metric. The cross session evaluation context will evaluate performance using a leave one session out cross-validation. For each session in the dataset, a model @@ -20,6 +20,7 @@ session. """ # Authors: Alexandre Barachant +# Sylvain Chevallier # # License: BSD (3-clause) @@ -41,16 +42,16 @@ moabb.set_log_level("info") ############################################################################## -# Create pipelines +# Create Pipelines # ---------------- # # Pipelines must be a dict of sklearn pipeline transformer. # -# The csp implementation from MNE is used. We selected 8 CSP components, as -# usually done in the litterature. +# The CSP implementation is based on the MNE implementation. We selected 8 CSP +# components, as usually done in the literature. # -# The riemannian geometry pipeline consists in covariance estimation, tangent -# space mapping and finaly a logistic regression for the classification. +# The Riemannian geometry pipeline consists in covariance estimation, tangent +# space mapping and finally a logistic regression for the classification. pipelines = {} @@ -65,12 +66,12 @@ # ---------- # # We define the paradigm (LeftRightImagery) and the dataset (BNCI2014001). -# The evaluation will return a dataframe containing a single AUC score for +# The evaluation will return a DataFrame containing a single AUC score for # each subject / session of the dataset, and for each pipeline. # # Results are saved into the database, so that if you add a new pipeline, it # will not run again the evaluation unless a parameter has changed. Results can -# be overwrited if necessary. +# be overwritten if necessary. paradigm = LeftRightImagery() # Because this is being auto-generated we only use 2 subjects @@ -90,11 +91,8 @@ # Plot Results # ---------------- # -# Here we plot the results. We the first plot is a pointplot with the average +# Here we plot the results. We first make a pointplot with the average # performance of each pipeline across session and subjects. -# The second plot is a paired scatter plot. Each point representing the score -# of a single session. An algorithm will outperforms another is most of the -# points are in its quadrant. fig, axes = plt.subplots(1, 2, figsize=[8, 4], sharey=True) @@ -113,7 +111,11 @@ axes[0].set_ylabel("ROC AUC") axes[0].set_ylim(0.5, 1) -# paired plot +############################################################################## +# The second plot is a paired scatter plot. Each point representing the score +# of a single session. An algorithm will outperform another is most of the +# points are in its quadrant. + paired = results.pivot_table( values="score", columns="pipeline", index=["subject", "session"] ) diff --git a/examples/plot_cross_session_multiple_datasets.py b/examples/plot_cross_session_multiple_datasets.py index a8fb6e1bd..a21e1b82f 100644 --- a/examples/plot_cross_session_multiple_datasets.py +++ b/examples/plot_cross_session_multiple_datasets.py @@ -34,7 +34,7 @@ moabb.set_log_level("info") ############################################################################### -# Loading dataset +# Loading Dataset # --------------- # # Load 2 subjects of BNCI 2014-004 and Zhou2016 datasets, with 2 session each @@ -45,16 +45,16 @@ d.subject_list = subj ############################################################################### -# Choose paradigm +# Choose Paradigm # --------------- # # We select the paradigm MI, applying a bandpass filter (8-35 Hz) on -# the data and we will keep only left and right hand motor imagery +# the data and we will keep only left- and right-hand motor imagery paradigm = LeftRightImagery(fmin=8, fmax=35) ############################################################################## -# Create pipelines +# Create Pipelines # ---------------- # # Use the Common Spatial Patterns with 8 components and a Linear Discriminant @@ -64,14 +64,14 @@ pipeline["CSP+LDA"] = make_pipeline(CSP(n_components=8), LDA()) ############################################################################## -# Get data (optional) +# Get Data (optional) # ------------------- # # To get access to the EEG signals downloaded from the dataset, you could # use `dataset.get_data(subjects=[subject_id])` to obtain the EEG under # an MNE format, stored in a dictionary of sessions and runs. # Otherwise, `paradigm.get_data(dataset=dataset, subjects=[subject_id])` -# allows to obtain the EEG data in scikit format, the labels and the meta +# allows to obtain the EEG data in sklearn format, the labels and the meta # information. The data are preprocessed according to the paradigm # requirements. @@ -87,7 +87,7 @@ # Evaluation # ---------- # -# The evaluation will return a dataframe containing a single AUC score for +# The evaluation will return a DataFrame containing a single AUC score for # each subject / session of the dataset, and for each pipeline. overwrite = True # set to True if we want to overwrite cached results diff --git a/examples/plot_cross_subject_ssvep.py b/examples/plot_cross_subject_ssvep.py index 8667454d4..ed3881220 100644 --- a/examples/plot_cross_subject_ssvep.py +++ b/examples/plot_cross_subject_ssvep.py @@ -1,8 +1,8 @@ """ =========================== -Cross Subject SSVEP +Cross-Subject SSVEP =========================== -This example shows how to perform a cross subject analysis on a SSVEP dataset. +This example shows how to perform a cross-subject analysis on an SSVEP dataset. We will compare two pipelines : - Riemannian Geometry @@ -36,11 +36,11 @@ moabb.set_log_level("info") ############################################################################### -# Loading dataset +# Loading Dataset # --------------- # # We will load the data from the first 2 subjects of the ``SSVEP_Exo`` dataset -# and compare two algorithms on this set. One of the algorithm could only +# and compare two algorithms on this set. One of the algorithms could only # process class associated with a stimulation frequency, we will thus drop # the resting class. As the resting class is the last defined class, picking # the first three classes (out of four) allows to focus only on the stimulation @@ -52,7 +52,7 @@ interval = dataset.interval ############################################################################### -# Choose paradigm +# Choose Paradigm # --------------- # # We define the paradigms (SSVEP, SSSVEP_TRCA and FilterBankSSVEP) and use the dataset @@ -62,7 +62,7 @@ # there are stimulation frequencies (here 2). For each stimulation frequency # the EEG is filtered with a 1 Hz-wide bandpass filter centered on the # frequency. This results in ``n_classes`` copies of the signal, filtered for each -# class, as used in filterbank motor imagery paradigms. +# class, as used in the filterbank motor imagery paradigms. paradigm = SSVEP(fmin=10, fmax=25, n_classes=3) paradigm_TRCA = SSVEP(fmin=1, fmax=110, n_classes=3) @@ -77,7 +77,7 @@ freqs = paradigm.used_events(dataset) ############################################################################## -# Create pipelines +# Create Pipelines # ---------------- # # Pipelines must be a dict of sklearn pipeline transformer. @@ -85,7 +85,7 @@ # covariance matrices from the signal filtered around the considered # frequency and applying a logistic regression in the tangent plane. # The second pipeline relies on the above defined CCA classifier. -# The third pipeline relies on TRCA algorithm. +# The third pipeline relies on the TRCA algorithm. pipelines_fb = {} pipelines_fb["RG+LogReg"] = make_pipeline( @@ -107,7 +107,7 @@ # Evaluation # ---------- # -# The evaluation will return a dataframe containing a single AUC score for +# The evaluation will return a DataFrame containing an accuracy score for # each subject / session of the dataset, and for each pipeline. # # Results are saved into the database, so that if you add a new pipeline, it @@ -122,7 +122,7 @@ results = evaluation.process(pipelines) ############################################################################### -# Filter bank processing, determine automatically the filter from the +# Filter bank processing, determine the filter automatically from the # stimulation frequency values of events. evaluation_fb = CrossSubjectEvaluation( @@ -147,7 +147,7 @@ # Plot Results # ---------------- # -# Here we plot the results. +# Here we display the results as stripplot, with a pointplot for error bar. fig, ax = plt.subplots(facecolor="white", figsize=[8, 4]) sns.stripplot( @@ -163,5 +163,4 @@ sns.pointplot(data=results, y="score", x="pipeline", ax=ax, zorder=1, palette="Set1") ax.set_ylabel("Accuracy") ax.set_ylim(0.1, 0.6) -plt.savefig("ssvep.png") fig.show() diff --git a/examples/plot_explore_paradigm.py b/examples/plot_explore_paradigm.py index ab92b2c55..1cab0593a 100644 --- a/examples/plot_explore_paradigm.py +++ b/examples/plot_explore_paradigm.py @@ -10,16 +10,17 @@ for ERP vs ERD paradigms. A paradigm also defines the appropriate evaluation metric, for example AUC -for binary classification problem, accuracy for multiclass, or kappa -coefficient for continuous paradigms. +for binary classification problems, accuracy for multiclass, or kappa +coefficients for continuous paradigms. -This tutorial explore the paradigm object, with 3 examples of paradigm : +This tutorial explores the paradigm object, with 3 examples of paradigm : - MotorImagery - FilterBankMotorImagery - LeftRightImagery """ # Authors: Alexandre Barachant +# Sylvain Chevallier # # License: BSD (3-clause) @@ -35,7 +36,7 @@ # MotorImagery # ----------------- # -# First, lets take a example of the MotorImagery paradigm. +# First, let's take an example of the MotorImagery paradigm. paradigm = MotorImagery(n_classes=4) @@ -52,9 +53,9 @@ # Lets take the example of the BNCI2014001 dataset, known as the dataset IIa # from the BCI competition IV. We will load the data from the subject 1. # When calling `get_data`, the paradigm will retrieve the data from the -# specified list of subject, apply preprocessing (by default, a bandpass +# specified list of subjects, apply preprocessing (by default, a bandpass # between 7 and 35 Hz), epoch the data (with interval specified by the dataset, -# unless superseeded by the paradigm) and return the corresponding objects. +# unless superseded by the paradigm) and return the corresponding objects. dataset = BNCI2014001() subjects = [1] @@ -70,32 +71,31 @@ ############################################################################### # Labels contains the labels corresponding to each trial. in the case of this -# dataset, we have the 4 type of motor imagery that was performed. +# dataset, we have the 4 types of motor imagery that was performed. print(np.unique(y)) ############################################################################### -# metadata have at least 3 columns, subject, session and run. +# Metadata have at least 3 columns: subject, session and run. # # - subject is the subject id of the corresponding trial -# - session is the session id. A session is a all the data recorded without +# - session is the session id. A session denotes a recording made without # removing the EEG cap. -# - run is the individual continuous recording made during a session. A Session -# may or may not contain multiple run. +# - run is the individual continuous recording made during a session. A session +# may or may not contain multiple runs. # - print(metadata.head()) ############################################################################### -# For this data, we have one subjecy, 2 sessions (2 different recording day) -# and 6 run per session. +# For this data, we have one subject, 2 sessions (2 different recording days) +# and 6 runs per session. print(metadata.describe(include="all")) ############################################################################### -# Paradigm object can also return the list of all dataset compatible. here -# it will return the list all the imagery datasets from the moabb. +# Paradigm objects can also return the list of all dataset compatible. Here +# it will return the list all the imagery datasets from the MOABB. compatible_datasets = paradigm.datasets print([dataset.code for dataset in compatible_datasets]) @@ -105,7 +105,7 @@ # ----------------------- # # FilterBankMotorImagery is the same paradigm, but with a different -# preprocessing. In this case, it apply a bank of 6 bandpass filter on the data +# preprocessing. In this case, it applies a bank of 6 bandpass filter on the data # before concatenating the output. paradigm = FilterBankMotorImagery() @@ -113,7 +113,7 @@ print(paradigm.__doc__) ############################################################################### -# therefore, the output X is a 4D array, with trial x channel x time x filter +# Therefore, the output X is a 4D array, with trial x channel x time x filter X, y, metadata = paradigm.get_data(dataset=dataset, subjects=subjects) @@ -124,22 +124,22 @@ # ---------------------- # # LeftRightImagery is a variation over the BaseMotorImagery paradigm, -# restricted to left and right hand events. +# restricted to left- and right-hand events. paradigm = LeftRightImagery() print(paradigm.__doc__) ############################################################################### -# the compatible dataset list is a subset of motor imagery dataset that +# The compatible dataset list is a subset of motor imagery dataset that # contains at least left and right hand events. compatible_datasets = paradigm.datasets print([dataset.code for dataset in compatible_datasets]) ############################################################################### -# So if we apply this this to our original dataset, it will only return trials -# corresponding to left and right hand motor imagination. +# So if we apply this to our original dataset, it will only return trials +# corresponding to left- and right-hand motor imagination. X, y, metadata = paradigm.get_data(dataset=dataset, subjects=subjects) diff --git a/examples/plot_within_session_p300.py b/examples/plot_within_session_p300.py index 5c897bb10..2e8bd1ce7 100644 --- a/examples/plot_within_session_p300.py +++ b/examples/plot_within_session_p300.py @@ -3,13 +3,13 @@ Within Session P300 =========================== -This Example shows how to perform a within session analysis on three different +This example shows how to perform a within session analysis on three different P300 datasets. We will compare two pipelines : -- Riemannian Geometry -- xDawn with Linear Discriminant Analysis +- Riemannian geometry +- XDAWN with Linear Discriminant Analysis We will use the P300 paradigm, which uses the AUC as metric. @@ -44,7 +44,7 @@ ############################################################################## # This is an auxiliary transformer that allows one to vectorize data -# structures in a pipeline For instance, in the case of a X with dimensions +# structures in a pipeline For instance, in the case of an X with dimensions # Nt x Nc x Ns, one might be interested in a new data structure with # dimensions Nt x (Nc.Ns) @@ -63,7 +63,7 @@ def transform(self, X): ############################################################################## -# Create pipelines +# Create Pipelines # ---------------- # # Pipelines must be a dict of sklearn pipeline transformer. @@ -72,7 +72,7 @@ def transform(self, X): pipelines = {} ############################################################################## -# we have to do this because the classes are called 'Target' and 'NonTarget' +# We have to do this because the classes are called 'Target' and 'NonTarget' # but the evaluation function uses a LabelEncoder, transforming them # to 0 and 1 labels_dict = {"Target": 1, "NonTarget": 0} @@ -94,7 +94,7 @@ def transform(self, X): # ---------- # # We define the paradigm (P300) and use all three datasets available for it. -# The evaluation will return a dataframe containing a single AUC score for +# The evaluation will return a DataFrame containing a single AUC score for # each subject / session of the dataset, and for each pipeline. # # Results are saved into the database, so that if you add a new pipeline, it @@ -115,7 +115,7 @@ def transform(self, X): # Plot Results # ---------------- # -# Here we plot the results. +# Here we plot the results to compare the two pipelines fig, ax = plt.subplots(facecolor="white", figsize=[8, 4]) diff --git a/examples/plot_cross_session_ssvep.py b/examples/plot_within_session_ssvep.py similarity index 74% rename from examples/plot_cross_session_ssvep.py rename to examples/plot_within_session_ssvep.py index 0d8269b10..dbc9e6188 100644 --- a/examples/plot_cross_session_ssvep.py +++ b/examples/plot_within_session_ssvep.py @@ -1,15 +1,14 @@ """ =================== -Cross Session SSVEP +Within Session SSVEP =================== -This Example show how to perform a cross-session SSVEP analysis on the +This Example show how to perform a within-session SSVEP analysis on the MAMEM dataset 3, using a CCA pipeline. -The cross session evaluation context will evaluate performance using a leave -one session out cross-validation. For each session in the dataset, a model -is trained on every other session and performance are evaluated on the current -session. +The within-session evaluation assesses the performance of a classification +pipeline using a 5-fold cross-validation. The reported metric (here, accuracy) +is the average of all fold. """ # Authors: Sylvain Chevallier # @@ -23,7 +22,7 @@ import moabb from moabb.datasets import MAMEM3 -from moabb.evaluations import CrossSessionEvaluation +from moabb.evaluations import WithinSessionEvaluation from moabb.paradigms import SSVEP from moabb.pipelines import SSVEP_CCA @@ -33,17 +32,17 @@ moabb.set_log_level("info") ############################################################################### -# Loading dataset +# Loading Dataset # --------------- # -# Load 2 subjects of MAMEM3 dataset, with 3 session each +# Load 2 subjects of MAMEM3 dataset subj = [1, 3] dataset = MAMEM3() dataset.subject_list = subj ############################################################################### -# Choose paradigm +# Choose Paradigm # --------------- # # We select the paradigm SSVEP, applying a bandpass filter (3-15 Hz) on @@ -53,7 +52,7 @@ paradigm = SSVEP(fmin=3, fmax=15, n_classes=3) ############################################################################## -# Create pipelines +# Create Pipelines # ---------------- # # Use a Canonical Correlation Analysis classifier @@ -65,7 +64,7 @@ pipeline["CCA"] = make_pipeline(SSVEP_CCA(interval=interval, freqs=freqs, n_harmonics=3)) ############################################################################## -# Get data (optional) +# Get Data (optional) # ------------------- # # To get access to the EEG signals downloaded from the dataset, you could @@ -83,12 +82,12 @@ # Evaluation # ---------- # -# The evaluation will return a dataframe containing a single AUC score for -# each subject / session of the dataset, and for each pipeline. +# The evaluation will return a DataFrame containing a single AUC score for +# each subject and pipeline. overwrite = True # set to True if we want to overwrite cached results -evaluation = CrossSessionEvaluation( +evaluation = WithinSessionEvaluation( paradigm=paradigm, datasets=dataset, suffix="examples", overwrite=overwrite ) results = evaluation.process(pipeline) @@ -99,9 +98,15 @@ # Plot Results # ---------------- # -# Here we plot the results, indicating the score for each session and subject +# Here we plot the results, indicating the score for each subject plt.figure() sns.barplot(data=results, y="score", x="session", hue="subject", palette="viridis") +############################################################################## +# And the computation time in seconds + +plt.figure() +ax = sns.barplot(data=results, y="time", x="session", hue="subject", palette="Reds") +ax.set_ylabel("Time (s)") plt.show() diff --git a/moabb/datasets/Lee2019.py b/moabb/datasets/Lee2019.py index 600a35a5f..625a5006f 100644 --- a/moabb/datasets/Lee2019.py +++ b/moabb/datasets/Lee2019.py @@ -1,5 +1,5 @@ """ -BMI/OpenBMI dataset (Motor Imagery). +BMI/OpenBMI dataset """ from functools import partialmethod @@ -17,115 +17,7 @@ class Lee2019(BaseDataset): - """BMI/OpenBMI dataset. - - Dataset from Lee et al 2019 [1]_. - - **Dataset Description** - - EEG signals were recorded with a sampling rate of 1,000 Hz and - collected with 62 Ag/AgCl electrodes. The EEG amplifier used - in the experiment was a BrainAmp (Brain Products; Munich, - Germany). The channels were nasion-referenced and grounded - to electrode AFz. Additionally, an EMG electrode recorded from - each flexor digitorum profundus muscle with the olecranon - used as reference. The EEG/EMG channel configuration and - indexing numbers are described in Fig. 1. The impedances of the - EEG electrodes were maintained below 10 k during the entire - experiment. - - MI paradigm - The MI paradigm was designed following a well-established system protocol. - For all blocks, the first 3 s of each trial began - with a black fixation cross that appeared at the center of the - monitor to prepare subjects for the MI task. Afterwards, the subject - performed the imagery task of grasping with the appropriate - hand for 4 s when the right or left arrow appeared as a visual cue. - After each task, the screen remained blank for 6 s (± 1.5 s). The - experiment consisted of training and test phases; each phase - had 100 trials with balanced right and left hand imagery tasks. - During the online test phase, the fixation cross appeared at the - center of the monitor and moved right or left, according to the - real-time classifier output of the EEG signal. - - ERP paradigm - The interface layout of the speller followed the typical design - of a row-column speller. The six rows and six columns were - configured with 36 symbols (A to Z, 1 to 9, and _). Each symbol - was presented equally spaced. To enhance the - signal quality, two additional settings were incorporated into - the original row-column speller design, namely, random-set - presentation and face stimuli. These additional settings - help to elicit stronger ERP responses by minimizing adjacency - distraction errors and by presenting a familiar face image. The - stimulus-time interval was set to 80 ms, and the inter-stimulus - interval (ISI) to 135 ms. A single iteration of stimulus presentation - in all rows and columns was considered a sequence. Therefore, - one sequence consisted of 12 stimulus flashes. A maximum - of five sequences (i.e., 60 flashes) was allotted without prolonged - inter-sequence intervals for each target character. After the end - of five sequences, 4.5 s were given to the user for identifying, locating, - and gazing at the next target character. The participant - was instructed to attend to the target symbol by counting the - number of times each target character had been flashed. - In the training session, subjects were asked to copy-spell - a given sentence, "NEURAL NETWORKS AND DEEP LEARNING" - (33 characters including spaces) by gazing at the target character - on the screen. The training session was performed in the offline - condition, and no feedback was provided to the subject during - the EEG recording. In the test session, subjects were instructed to - copy-spell "PATTERN RECOGNITION MACHINE LEARNING" - (36 characters including spaces), and the real-time EEG data were - analyzed based on the classifier that was calculated from the - training session data. The selected character from the subject’s - current EEG data was displayed in the top left area of the screen - at the end of the presentation (i.e., after five sequences). - Per participant, the collected EEG data for the ERP experiment consisted - of 1,980 and 2,160 trials (samples) for training and test phase, respectively. - - SSVEP paradigm - Four target SSVEP stimuli were designed to flicker at 5.45, 6.67, - 8.57, and 12 Hz and were presented in four positions (down, - right, left, and up, respectively) on a monitor. The designed - paradigm followed the conventional types of SSVEP-based BCI - systems that require four-direction movements. Partici- - pants were asked to fixate the center of a black screen and then - to gaze in the direction where the target stimulus was high- - lighted in a different color. Each SSVEP stimulus - was presented for 4 s with an ISI of 6 s. Each target frequency - was presented 25 times. Therefore, the corrected EEG data had - 100 trials (4 classes x 25 trials) in the offline training phase and - another 100 trials in the online test phase. Visual feedback was - presented in the test phase; the estimated target frequency was - highlighted for 1 s with a red border at the end of each trial. - - Parameters - ---------- - paradigm: (['MI','ERP','SSVEP']) - the paradigm to load (see paper). - - train_run: bool (default True) - if True, return runs corresponding to the training/offline phase (see paper). - - test_run: bool (default: False for MI and SSVEP paradigms, True for ERP) - if True, return runs corresponding to the test/online phase (see paper). Beware that test_run - for MI and SSVEP do not have labels associated with trials: these runs could not be used in - classification tasks. - - resting_state: bool (default False) - if True, return runs corresponding to the resting phases before and after recordings (see paper). - - sessions: list of int (default [1,2]) - the list of the sessions to load (2 available). - - References - ---------- - .. [1] Lee, M. H., Kwon, O. Y., Kim, Y. J., Kim, H. K., Lee, Y. E., - Williamson, J., … Lee, S. W. (2019). EEG dataset and OpenBMI - toolbox for three BCI paradigms: An investigation into BCI - illiteracy. GigaScience, 8(5), 1–16. - https://doi.org/10.1093/gigascience/giz002 - """ + """Base dataset class for Lee2019""" def __init__( self, @@ -345,12 +237,204 @@ def data_path( class Lee2019_MI(Lee2019): + """BMI/OpenBMI dataset for MI. + + Dataset from Lee et al 2019 [1]_. + + **Dataset Description** + + EEG signals were recorded with a sampling rate of 1,000 Hz and + collected with 62 Ag/AgCl electrodes. The EEG amplifier used + in the experiment was a BrainAmp (Brain Products; Munich, + Germany). The channels were nasion-referenced and grounded + to electrode AFz. Additionally, an EMG electrode recorded from + each flexor digitorum profundus muscle with the olecranon + used as reference. The EEG/EMG channel configuration and + indexing numbers are described in Fig. 1. The impedances of the + EEG electrodes were maintained below 10 k during the entire + experiment. + + MI paradigm + The MI paradigm was designed following a well-established system protocol. + For all blocks, the first 3 s of each trial began + with a black fixation cross that appeared at the center of the + monitor to prepare subjects for the MI task. Afterwards, the subject + performed the imagery task of grasping with the appropriate + hand for 4 s when the right or left arrow appeared as a visual cue. + After each task, the screen remained blank for 6 s (± 1.5 s). The + experiment consisted of training and test phases; each phase + had 100 trials with balanced right and left hand imagery tasks. + During the online test phase, the fixation cross appeared at the + center of the monitor and moved right or left, according to the + real-time classifier output of the EEG signal. + + + Parameters + ---------- + train_run: bool (default True) + if True, return runs corresponding to the training/offline phase (see paper). + + test_run: bool (default: False for MI and SSVEP paradigms, True for ERP) + if True, return runs corresponding to the test/online phase (see paper). Beware that test_run + for MI and SSVEP do not have labels associated with trials: these runs could not be used in + classification tasks. + + resting_state: bool (default False) + if True, return runs corresponding to the resting phases before and after recordings (see paper). + + sessions: list of int (default [1,2]) + the list of the sessions to load (2 available). + + References + ---------- + .. [1] Lee, M. H., Kwon, O. Y., Kim, Y. J., Kim, H. K., Lee, Y. E., + Williamson, J., … Lee, S. W. (2019). EEG dataset and OpenBMI + toolbox for three BCI paradigms: An investigation into BCI + illiteracy. GigaScience, 8(5), 1–16. + https://doi.org/10.1093/gigascience/giz002 + """ + __init__ = partialmethod(Lee2019.__init__, "MI") class Lee2019_ERP(Lee2019): + """BMI/OpenBMI dataset for P300. + + Dataset from Lee et al 2019 [1]_. + + **Dataset Description** + + EEG signals were recorded with a sampling rate of 1,000 Hz and + collected with 62 Ag/AgCl electrodes. The EEG amplifier used + in the experiment was a BrainAmp (Brain Products; Munich, + Germany). The channels were nasion-referenced and grounded + to electrode AFz. Additionally, an EMG electrode recorded from + each flexor digitorum profundus muscle with the olecranon + used as reference. The EEG/EMG channel configuration and + indexing numbers are described in Fig. 1. The impedances of the + EEG electrodes were maintained below 10 k during the entire + experiment. + + ERP paradigm + The interface layout of the speller followed the typical design + of a row-column speller. The six rows and six columns were + configured with 36 symbols (A to Z, 1 to 9, and _). Each symbol + was presented equally spaced. To enhance the + signal quality, two additional settings were incorporated into + the original row-column speller design, namely, random-set + presentation and face stimuli. These additional settings + help to elicit stronger ERP responses by minimizing adjacency + distraction errors and by presenting a familiar face image. The + stimulus-time interval was set to 80 ms, and the inter-stimulus + interval (ISI) to 135 ms. A single iteration of stimulus presentation + in all rows and columns was considered a sequence. Therefore, + one sequence consisted of 12 stimulus flashes. A maximum + of five sequences (i.e., 60 flashes) was allotted without prolonged + inter-sequence intervals for each target character. After the end + of five sequences, 4.5 s were given to the user for identifying, locating, + and gazing at the next target character. The participant + was instructed to attend to the target symbol by counting the + number of times each target character had been flashed. + In the training session, subjects were asked to copy-spell + a given sentence, "NEURAL NETWORKS AND DEEP LEARNING" + (33 characters including spaces) by gazing at the target character + on the screen. The training session was performed in the offline + condition, and no feedback was provided to the subject during + the EEG recording. In the test session, subjects were instructed to + copy-spell "PATTERN RECOGNITION MACHINE LEARNING" + (36 characters including spaces), and the real-time EEG data were + analyzed based on the classifier that was calculated from the + training session data. The selected character from the subject’s + current EEG data was displayed in the top left area of the screen + at the end of the presentation (i.e., after five sequences). + Per participant, the collected EEG data for the ERP experiment consisted + of 1,980 and 2,160 trials (samples) for training and test phase, respectively. + + Parameters + ---------- + train_run: bool (default True) + if True, return runs corresponding to the training/offline phase (see paper). + + test_run: bool (default: False for MI and SSVEP paradigms, True for ERP) + if True, return runs corresponding to the test/online phase (see paper). Beware that test_run + for MI and SSVEP do not have labels associated with trials: these runs could not be used in + classification tasks. + + resting_state: bool (default False) + if True, return runs corresponding to the resting phases before and after recordings (see paper). + + sessions: list of int (default [1,2]) + the list of the sessions to load (2 available). + + References + ---------- + .. [1] Lee, M. H., Kwon, O. Y., Kim, Y. J., Kim, H. K., Lee, Y. E., + Williamson, J., … Lee, S. W. (2019). EEG dataset and OpenBMI + toolbox for three BCI paradigms: An investigation into BCI + illiteracy. GigaScience, 8(5), 1–16. + https://doi.org/10.1093/gigascience/giz002 + """ + __init__ = partialmethod(Lee2019.__init__, "ERP") class Lee2019_SSVEP(Lee2019): + """BMI/OpenBMI dataset for SSVEP. + + Dataset from Lee et al 2019 [1]_. + + **Dataset Description** + + EEG signals were recorded with a sampling rate of 1,000 Hz and + collected with 62 Ag/AgCl electrodes. The EEG amplifier used + in the experiment was a BrainAmp (Brain Products; Munich, + Germany). The channels were nasion-referenced and grounded + to electrode AFz. Additionally, an EMG electrode recorded from + each flexor digitorum profundus muscle with the olecranon + used as reference. The EEG/EMG channel configuration and + indexing numbers are described in Fig. 1. The impedances of the + EEG electrodes were maintained below 10 k during the entire + experiment. + + SSVEP paradigm + Four target SSVEP stimuli were designed to flicker at 5.45, 6.67, + 8.57, and 12 Hz and were presented in four positions (down, + right, left, and up, respectively) on a monitor. The designed + paradigm followed the conventional types of SSVEP-based BCI + systems that require four-direction movements. Partici- + pants were asked to fixate the center of a black screen and then + to gaze in the direction where the target stimulus was high- + lighted in a different color. Each SSVEP stimulus + was presented for 4 s with an ISI of 6 s. Each target frequency + was presented 25 times. Therefore, the corrected EEG data had + 100 trials (4 classes x 25 trials) in the offline training phase and + another 100 trials in the online test phase. Visual feedback was + presented in the test phase; the estimated target frequency was + highlighted for 1 s with a red border at the end of each trial. + + Parameters + ---------- + train_run: bool (default True) + if True, return runs corresponding to the training/offline phase (see paper). + + test_run: bool (default: False for MI and SSVEP paradigms, True for ERP) + if True, return runs corresponding to the test/online phase (see paper). Beware that test_run + for MI and SSVEP do not have labels associated with trials: these runs could not be used in + classification tasks. + + resting_state: bool (default False) + if True, return runs corresponding to the resting phases before and after recordings (see paper). + + sessions: list of int (default [1,2]) + the list of the sessions to load (2 available). + + References + ---------- + .. [1] Lee, M. H., Kwon, O. Y., Kim, Y. J., Kim, H. K., Lee, Y. E., + Williamson, J., … Lee, S. W. (2019). EEG dataset and OpenBMI + toolbox for three BCI paradigms: An investigation into BCI + illiteracy. GigaScience, 8(5), 1–16. + https://doi.org/10.1093/gigascience/giz002 + """ + __init__ = partialmethod(Lee2019.__init__, "SSVEP") diff --git a/moabb/datasets/ssvep_mamem.py b/moabb/datasets/ssvep_mamem.py index af069ad6a..ec76cdd05 100644 --- a/moabb/datasets/ssvep_mamem.py +++ b/moabb/datasets/ssvep_mamem.py @@ -107,14 +107,12 @@ def _get_single_subject_data(self, subject): fnamed = fsn[osp.basename(fpath)] if fnamed[4] == "x": continue - session_name = "session_" + fnamed[4] + session_name = "session_0" if self.code == "SSVEP MAMEM3": - # Since the data for each session is saved in 2 files, - # it is being saved in 2 runs - run_number = len(fnamed) - 10 - run_name = "run_" + str(run_number) + repetition = len(fnamed) - 10 + run_name = f"run_{(ord(fnamed[4])-97)*2 + repetition}" else: - run_name = "run_0" + run_name = f"run_{ord(fnamed[4])-97}" if self.code == "SSVEP MAMEM3": m = loadmat(fpath) @@ -124,10 +122,8 @@ def _get_single_subject_data(self, subject): eeg = m["eeg"] else: m = loadmat(fpath, squeeze_me=True) - ch_names = ["E{}".format(i + 1) for i in range(0, 256)] + ch_names = [f"E{i + 1}" for i in range(0, 256)] ch_names.append("stim") - # ch_names = ["{}-{}".format(s, i) if s == "EEG" else s - # for i, s in enumerate(record.sig_name)] sfreq = 250 if self.code == "SSVEP MAMEM2": labels = m["labels"] @@ -153,9 +149,9 @@ def data_path( if subject not in self.subject_list: raise (ValueError("Invalid subject number")) - sub = "{:02d}".format(subject) + sub = f"{subject:02d}" sign = self.code.split()[1] - key_dest = "MNE-{:s}-data".format(sign.lower()) + key_dest = f"MNE-{sign.lower():s}-data" path = osp.join(get_dataset_path(sign, path), key_dest) filelist = fs_get_file_list(self.figshare_id) @@ -273,8 +269,8 @@ class MAMEM1(BaseMAMEM): def __init__(self): super().__init__( events={"6.66": 1, "7.50": 2, "8.57": 3, "10.00": 4, "12.00": 5}, - sessions_per_subject=3, - # 3 for S001, S003, S008, 4 for S004 + sessions_per_subject=1, + # 5 runs per sessions, except 3 for S001, S003, S008, 4 for S004 code="SSVEP MAMEM1", doi="https://arxiv.org/abs/1602.00904", figshare_id=2068677, @@ -358,7 +354,7 @@ class MAMEM2(BaseMAMEM): def __init__(self): super().__init__( events={"6.66": 1, "7.50": 2, "8.57": 3, "10.00": 4, "12.00": 5}, - sessions_per_subject=5, + sessions_per_subject=1, code="SSVEP MAMEM2", doi="https://arxiv.org/abs/1602.00904", figshare_id=3153409, @@ -457,7 +453,7 @@ def __init__(self): "10.00": 33026, "12.00": 33025, }, - sessions_per_subject=5, + sessions_per_subject=1, code="SSVEP MAMEM3", doi="https://arxiv.org/abs/1602.00904", figshare_id=3413851, diff --git a/moabb/evaluations/evaluations.py b/moabb/evaluations/evaluations.py index 898e94aae..302823f36 100644 --- a/moabb/evaluations/evaluations.py +++ b/moabb/evaluations/evaluations.py @@ -26,7 +26,55 @@ class WithinSessionEvaluation(BaseEvaluation): - """Within Session evaluation.""" + """Performance evaluation within session (k-fold cross-validation) + + Within-session evaluation uses k-fold cross_validation to determine train + and test sets on separate session for each subject, it is possible to + estimate the performance on a subset of training examples to obtain + learning curves. + + Parameters + ---------- + n_perms : + Number of permutations to perform. If an array + is passed it has to be equal in size to the data_size array. + Values in this array must be monotonically decreasing (performing + more permutations for more data is not useful to reduce standard + error of the mean). + Default: None + data_size : + If None is passed, it performs conventional WithinSession evaluation. + Contains the policy to pick the datasizes to + evaluate, as well as the actual values. The dict has the + key 'policy' with either 'ratio' or 'per_class', and the key + 'value' with the actual values as an numpy array. This array should be + sorted, such that values in data_size are strictly monotonically increasing. + Default: None + paradigm : Paradigm instance + The paradigm to use. + datasets : List of Dataset instance + The list of dataset to run the evaluation. If none, the list of + compatible dataset will be retrieved from the paradigm instance. + random_state: int, RandomState instance, default=None + If not None, can guarantee same seed for shuffling examples. + n_jobs: int, default=1 + Number of jobs for fitting of pipeline. + overwrite: bool, default=False + If true, overwrite the results. + error_score: "raise" or numeric, default="raise" + Value to assign to the score if an error occurs in estimator fitting. If set to + 'raise', the error is raised. + suffix: str + Suffix for the results file. + hdf5_path: str + Specific path for storing the results. + additional_columns: None + Adding information to results. + return_epochs: bool, default=False + use MNE epoch to train pipelines. + mne_labels: bool, default=False + if returning MNE epoch, use original dataset label if True + """ VALID_POLICIES = ["per_class", "ratio"] @@ -36,25 +84,6 @@ def __init__( data_size: Optional[dict] = None, **kwargs, ): - """ - Parameters - ---------- - n_perms : - Number of permutations to perform. If an array - is passed it has to be equal in size to the data_size array. - Values in this array must be monotonically decreasing (performing - more permutations for more data is not useful to reduce standard - error of the mean). - Default: None - data_size : - If None is passed, it performs conventional WithinSession evaluation. - Contains the policy to pick the datasizes to - evaluate, as well as the actual values. The dict has the - key 'policy' with either 'ratio' or 'per_class', and the key - 'value' with the actual values as an numpy array. This array should be - sorted, such that values in data_size are strictly monotonically increasing. - Default: None - """ self.data_size = data_size self.n_perms = n_perms self.calculate_learning_curve = self.data_size is not None @@ -290,12 +319,38 @@ def is_valid(self, dataset): class CrossSessionEvaluation(BaseEvaluation): - """Cross session Context. + """Cross-session performance evaluation. Evaluate performance of the pipeline across sessions but for a single - subject. Verifies that sufficient sessions are there for this to be - reasonable - + subject. Verifies that there is at least two sessions before starting + the evaluation. + + Parameters + ---------- + paradigm : Paradigm instance + The paradigm to use. + datasets : List of Dataset instance + The list of dataset to run the evaluation. If none, the list of + compatible dataset will be retrieved from the paradigm instance. + random_state: int, RandomState instance, default=None + If not None, can guarantee same seed for shuffling examples. + n_jobs: int, default=1 + Number of jobs for fitting of pipeline. + overwrite: bool, default=False + If true, overwrite the results. + error_score: "raise" or numeric, default="raise" + Value to assign to the score if an error occurs in estimator fitting. If set to + 'raise', the error is raised. + suffix: str + Suffix for the results file. + hdf5_path: str + Specific path for storing the results. + additional_columns: None + Adding information to results. + return_epochs: bool, default=False + use MNE epoch to train pipelines. + mne_labels: bool, default=False + if returning MNE epoch, use original dataset label if True """ def evaluate(self, dataset, pipelines): @@ -359,11 +414,37 @@ def is_valid(self, dataset): class CrossSubjectEvaluation(BaseEvaluation): - """Cross Subject evaluation Context. + """Cross-subject evaluation performance. Evaluate performance of the pipeline trained on all subjects but one, concatenating sessions. + Parameters + ---------- + paradigm : Paradigm instance + The paradigm to use. + datasets : List of Dataset instance + The list of dataset to run the evaluation. If none, the list of + compatible dataset will be retrieved from the paradigm instance. + random_state: int, RandomState instance, default=None + If not None, can guarantee same seed for shuffling examples. + n_jobs: int, default=1 + Number of jobs for fitting of pipeline. + overwrite: bool, default=False + If true, overwrite the results. + error_score: "raise" or numeric, default="raise" + Value to assign to the score if an error occurs in estimator fitting. If set to + 'raise', the error is raised. + suffix: str + Suffix for the results file. + hdf5_path: str + Specific path for storing the results. + additional_columns: None + Adding information to results. + return_epochs: bool, default=False + use MNE epoch to train pipelines. + mne_labels: bool, default=False + if returning MNE epoch, use original dataset label if True """ def evaluate(self, dataset, pipelines): From 3e7fc04ff407b92024fde86fc8b7c4168a19352f Mon Sep 17 00:00:00 2001 From: Divyesh Narayanan <47829318+Div12345@users.noreply.github.com> Date: Wed, 2 Feb 2022 16:11:12 +0530 Subject: [PATCH 06/19] Progress bars (#258) * Progress bars for downloads using pooch functionality * Rectification of f-string in PhysionetMI * Evaluations subject level progress bar CV test subject level in the case of CrossSubjectEvaluation * Update poetry.lock * Update pyproject.toml * dependencies * Apply suggestions from code review (mne.utils to tqdm direct) Co-authored-by: Sylvain Chevallier * Update poetry.lock * tqdm arg * Update whats_new.rst * Update mistune dep Co-authored-by: Sylvain Chevallier --- .github/workflows/test-devel.yml | 2 +- docs/source/whats_new.rst | 1 + moabb/datasets/Weibo2014.py | 9 +- moabb/datasets/Zhou2016.py | 2 +- moabb/datasets/bbci_eeg_fnirs.py | 2 + moabb/datasets/download.py | 6 +- moabb/datasets/physionet_mi.py | 4 +- moabb/evaluations/evaluations.py | 18 +- poetry.lock | 469 ++++++++++++++++++------------- pyproject.toml | 4 +- 10 files changed, 317 insertions(+), 200 deletions(-) diff --git a/.github/workflows/test-devel.yml b/.github/workflows/test-devel.yml index 81d37c34a..b1b870a3f 100644 --- a/.github/workflows/test-devel.yml +++ b/.github/workflows/test-devel.yml @@ -27,7 +27,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Install Poetry - uses: snok/install-poetry@v1.1.6 + uses: snok/install-poetry@v1 with: virtualenvs-create: true virtualenvs-in-project: true diff --git a/docs/source/whats_new.rst b/docs/source/whats_new.rst index 73f490e2c..55fe9f2f9 100644 --- a/docs/source/whats_new.rst +++ b/docs/source/whats_new.rst @@ -18,6 +18,7 @@ Develop branch Enhancements ~~~~~~~~~~~~ +- Progress bars, pooch, tqdm (:gh:`258` by `Divyesh Narayanan`_ and `Sylvain Chevallier`_) - Adding Test and Example for set_download_dir (:gh:`249` by `Divyesh Narayanan`_) Bugs diff --git a/moabb/datasets/Weibo2014.py b/moabb/datasets/Weibo2014.py index af0d47ece..e41754ab7 100644 --- a/moabb/datasets/Weibo2014.py +++ b/moabb/datasets/Weibo2014.py @@ -32,7 +32,14 @@ def eeg_data_path(base_path, subject): def get_subjects(sub_inds, sub_names, ind): dataname = "data{}".format(ind) if not os.path.isfile(os.path.join(base_path, dataname + ".zip")): - retrieve(FILES[ind], None, dataname + ".zip", base_path, processor=Unzip()) + retrieve( + FILES[ind], + None, + dataname + ".zip", + base_path, + processor=Unzip(), + progressbar=True, + ) for fname in os.listdir(os.path.join(base_path, dataname + ".zip.unzip")): for ind, prefix in zip(sub_inds, sub_names): diff --git a/moabb/datasets/Zhou2016.py b/moabb/datasets/Zhou2016.py index 414c5a9e6..3b143aa51 100644 --- a/moabb/datasets/Zhou2016.py +++ b/moabb/datasets/Zhou2016.py @@ -22,7 +22,7 @@ def local_data_path(base_path, subject): if not os.path.isdir(os.path.join(base_path, "subject_{}".format(subject))): if not os.path.isdir(os.path.join(base_path, "data")): - retrieve(DATA_PATH, None, fname="data.zip", path=base_path) + retrieve(DATA_PATH, None, fname="data.zip", path=base_path, progressbar=True) with z.ZipFile(os.path.join(base_path, "data.zip"), "r") as f: f.extractall(base_path) os.remove(os.path.join(base_path, "data.zip")) diff --git a/moabb/datasets/bbci_eeg_fnirs.py b/moabb/datasets/bbci_eeg_fnirs.py index 067fbc880..ac31b6d3e 100644 --- a/moabb/datasets/bbci_eeg_fnirs.py +++ b/moabb/datasets/bbci_eeg_fnirs.py @@ -41,6 +41,7 @@ def eeg_data_path(base_path, subject, accept): None, fname="EEG.zip", path=base_path, + progressbar=True, ) with z.ZipFile(op.join(base_path, "EEG.zip"), "r") as f: f.extractall(op.join(base_path, "EEG")) @@ -65,6 +66,7 @@ def fnirs_data_path(path, subject, accept): None, fname="fNIRS.zip", path=path, + progressbar=True, ) if not op.isdir(op.join(path, "NIRS")): os.makedirs(op.join(path, "NIRS")) diff --git a/moabb/datasets/download.py b/moabb/datasets/download.py index d04e2d27f..dff12b432 100644 --- a/moabb/datasets/download.py +++ b/moabb/datasets/download.py @@ -146,7 +146,11 @@ def data_dl(url, sign, path=None, force_update=False, verbose=None): else: known_hash = file_hash(destination) dlpath = retrieve( - url, known_hash, fname=osp.basename(url), path=osp.dirname(destination) + url, + known_hash, + fname=osp.basename(url), + path=osp.dirname(destination), + progressbar=True, ) return dlpath diff --git a/moabb/datasets/physionet_mi.py b/moabb/datasets/physionet_mi.py index 02bd84ee3..e4ad3eae8 100644 --- a/moabb/datasets/physionet_mi.py +++ b/moabb/datasets/physionet_mi.py @@ -123,7 +123,7 @@ def _get_single_subject_data(self, subject): stim[stim == "T1"] = "left_hand" stim[stim == "T2"] = "right_hand" raw.annotations.description = stim - data["run_{idx}"] = raw + data[f"run_{idx}"] = raw idx += 1 # feet runs @@ -136,7 +136,7 @@ def _get_single_subject_data(self, subject): stim[stim == "T1"] = "hands" stim[stim == "T2"] = "feet" raw.annotations.description = stim - data["run_{idx}"] = raw + data[f"run_{idx}"] = raw idx += 1 return {"session_0": data} diff --git a/moabb/evaluations/evaluations.py b/moabb/evaluations/evaluations.py index 302823f36..a3e2d1403 100644 --- a/moabb/evaluations/evaluations.py +++ b/moabb/evaluations/evaluations.py @@ -15,6 +15,7 @@ ) from sklearn.model_selection._validation import _fit_and_score, _score from sklearn.preprocessing import LabelEncoder +from tqdm import tqdm from moabb.evaluations.base import BaseEvaluation @@ -122,7 +123,8 @@ def __init__( super().__init__(**kwargs) def _evaluate(self, dataset, pipelines): - for subject in dataset.subject_list: + # Progress Bar at subject level + for subject in tqdm(dataset.subject_list, desc=f"{dataset.code}-WithinSession"): # check if we already have result for this subject/pipeline # we might need a better granularity, if we query the DB run_pipes = self.results.not_yet_computed(pipelines, dataset, subject) @@ -235,7 +237,8 @@ def score_explicit(self, clf, X_train, y_train, X_test, y_test): return score, duration def _evaluate_learning_curve(self, dataset, pipelines): - for subject in dataset.subject_list: + # Progressbar at subject level + for subject in tqdm(dataset.subject_list, desc=f"{dataset.code}-WithinSession"): # check if we already have result for this subject/pipeline # we might need a better granularity, if we query the DB run_pipes = self.results.not_yet_computed(pipelines, dataset, subject) @@ -356,7 +359,8 @@ class CrossSessionEvaluation(BaseEvaluation): def evaluate(self, dataset, pipelines): if not self.is_valid(dataset): raise AssertionError("Dataset is not appropriate for evaluation") - for subject in dataset.subject_list: + # Progressbar at subject level + for subject in tqdm(dataset.subject_list, desc=f"{dataset.code}-CrossSession"): # check if we already have result for this subject/pipeline # we might need a better granularity, if we query the DB run_pipes = self.results.not_yet_computed(pipelines, dataset, subject) @@ -471,12 +475,18 @@ def evaluate(self, dataset, pipelines): # extract metadata groups = metadata.subject.values sessions = metadata.session.values + n_subjects = len(dataset.subject_list) scorer = get_scorer(self.paradigm.scoring) # perform leave one subject out CV cv = LeaveOneGroupOut() - for train, test in cv.split(X, y, groups): + # Progressbar at subject level + for train, test in tqdm( + cv.split(X, y, groups), + total=n_subjects, + desc=f"{dataset.code}-CrossSubject", + ): subject = groups[test[0]] # now we can check if this subject has results diff --git a/poetry.lock b/poetry.lock index 68dc5e031..098a7c9b7 100644 --- a/poetry.lock +++ b/poetry.lock @@ -25,21 +25,6 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [package.dependencies] pytz = ">=2015.7" -[[package]] -name = "backports.entry-points-selectable" -version = "1.1.0" -description = "Compatibility shim providing selectable entry points for older implementations" -category = "dev" -optional = false -python-versions = ">=2.7" - -[package.dependencies] -importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} - -[package.extras] -docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] -testing = ["pytest (>=4.6)", "pytest-flake8", "pytest-cov", "pytest-black (>=0.3.7)", "pytest-mypy", "pytest-checkdocs (>=2.4)", "pytest-enabler (>=1.0.1)"] - [[package]] name = "cached-property" version = "1.5.2" @@ -66,7 +51,7 @@ python-versions = ">=3.6.1" [[package]] name = "charset-normalizer" -version = "2.0.7" +version = "2.0.11" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." category = "main" optional = false @@ -79,7 +64,7 @@ unicode_backport = ["unicodedata2"] name = "colorama" version = "0.4.4" description = "Cross-platform colored terminal text." -category = "dev" +category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" @@ -96,18 +81,15 @@ toml = ["toml"] [[package]] name = "cycler" -version = "0.10.0" +version = "0.11.0" description = "Composable style cycles" category = "main" optional = false -python-versions = "*" - -[package.dependencies] -six = "*" +python-versions = ">=3.6" [[package]] name = "distlib" -version = "0.3.3" +version = "0.3.4" description = "Distribution utilities" category = "dev" optional = false @@ -123,19 +105,40 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" [[package]] name = "filelock" -version = "3.3.1" +version = "3.4.2" description = "A platform independent file lock." category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [package.extras] docs = ["furo (>=2021.8.17b43)", "sphinx (>=4.1)", "sphinx-autodoc-typehints (>=1.12)"] testing = ["covdefaults (>=1.2.0)", "coverage (>=4)", "pytest (>=4)", "pytest-cov", "pytest-timeout (>=1.4.2)"] +[[package]] +name = "fonttools" +version = "4.29.1" +description = "Tools to manipulate font files" +category = "main" +optional = false +python-versions = ">=3.7" + +[package.extras] +all = ["fs (>=2.2.0,<3)", "lxml (>=4.0,<5)", "zopfli (>=0.1.4)", "lz4 (>=1.7.4.2)", "matplotlib", "sympy", "skia-pathops (>=0.5.0)", "brotlicffi (>=0.8.0)", "scipy", "brotli (>=1.0.1)", "munkres", "unicodedata2 (>=14.0.0)", "xattr"] +graphite = ["lz4 (>=1.7.4.2)"] +interpolatable = ["scipy", "munkres"] +lxml = ["lxml (>=4.0,<5)"] +pathops = ["skia-pathops (>=0.5.0)"] +plot = ["matplotlib"] +symfont = ["sympy"] +type1 = ["xattr"] +ufo = ["fs (>=2.2.0,<3)"] +unicode = ["unicodedata2 (>=14.0.0)"] +woff = ["zopfli (>=0.1.4)", "brotlicffi (>=0.8.0)", "brotli (>=1.0.1)"] + [[package]] name = "h5py" -version = "3.4.0" +version = "3.6.0" description = "Read and write HDF5 files from Python" category = "main" optional = false @@ -143,22 +146,18 @@ python-versions = ">=3.7" [package.dependencies] cached-property = {version = "*", markers = "python_version < \"3.8\""} -numpy = [ - {version = ">=1.14.5", markers = "python_version == \"3.7\""}, - {version = ">=1.17.5", markers = "python_version == \"3.8\""}, - {version = ">=1.19.3", markers = "python_version >= \"3.9\""}, -] +numpy = ">=1.14.5" [[package]] name = "identify" -version = "2.3.0" +version = "2.4.7" description = "File identification library for Python" category = "dev" optional = false -python-versions = ">=3.6.1" +python-versions = ">=3.7" [package.extras] -license = ["editdistance-s"] +license = ["ukkonen"] [[package]] name = "idna" @@ -170,7 +169,7 @@ python-versions = ">=3.5" [[package]] name = "imagesize" -version = "1.2.0" +version = "1.3.0" description = "Getting image size from png/jpeg/jpeg2000/gif file" category = "dev" optional = false @@ -178,11 +177,11 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [[package]] name = "importlib-metadata" -version = "4.8.1" +version = "4.10.1" description = "Read metadata from Python packages" category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [package.dependencies] typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""} @@ -191,11 +190,11 @@ zipp = ">=0.5" [package.extras] docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] perf = ["ipython"] -testing = ["pytest (>=4.6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pep517", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy", "importlib-resources (>=1.3)"] +testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy", "importlib-resources (>=1.3)"] [[package]] name = "jinja2" -version = "3.0.2" +version = "3.0.3" description = "A very fast and expressive template engine." category = "dev" optional = false @@ -245,7 +244,7 @@ python-versions = ">=3.6" [[package]] name = "matplotlib" -version = "3.4.3" +version = "3.5.1" description = "Python plotting package" category = "main" optional = false @@ -253,11 +252,14 @@ python-versions = ">=3.7" [package.dependencies] cycler = ">=0.10" +fonttools = ">=4.22.0" kiwisolver = ">=1.0.1" -numpy = ">=1.16" +numpy = ">=1.17" +packaging = ">=20.0" pillow = ">=6.2.0" pyparsing = ">=2.2.1" python-dateutil = ">=2.7" +setuptools_scm = ">=4" [[package]] name = "mistune" @@ -269,16 +271,20 @@ python-versions = "*" [[package]] name = "mne" -version = "0.23.4" -description = "MNE python project for MEG and EEG data analysis." +version = "0.24.1" +description = "MNE-Python project for MEG and EEG data analysis." category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [package.dependencies] numpy = ">=1.15.4" scipy = ">=1.1.0" +[package.extras] +data = ["tqdm", "pooch (>=1.5)"] +test = ["pytest (!=4.6.0)", "pytest-cov", "pytest-timeout", "pytest-harvest", "flake8", "flake8-array-spacing", "numpydoc", "codespell", "pydocstyle", "check-manifest", "twine", "wheel", "pooch", "nitime", "nbclient", "sphinx-gallery", "eeglabio", "edflib-python", "imageio-ffmpeg"] + [[package]] name = "nodeenv" version = "1.6.0" @@ -297,29 +303,29 @@ python-versions = ">=3.7" [[package]] name = "numpydoc" -version = "1.1.0" +version = "1.2" description = "Sphinx extension to support docstrings in Numpy format" category = "dev" optional = false -python-versions = ">=3.5" +python-versions = ">=3.7" [package.dependencies] -Jinja2 = ">=2.3" -sphinx = ">=1.6.5" +Jinja2 = ">=2.10" +sphinx = ">=1.8" [package.extras] -testing = ["matplotlib", "pytest", "pytest-cov"] +testing = ["pytest", "pytest-cov", "matplotlib"] [[package]] name = "packaging" -version = "21.0" +version = "21.3" description = "Core utilities for Python packages" category = "main" optional = false python-versions = ">=3.6" [package.dependencies] -pyparsing = ">=2.0.2" +pyparsing = ">=2.0.2,<3.0.5 || >3.0.5" [[package]] name = "pandas" @@ -347,11 +353,11 @@ python-versions = ">=3.7" [[package]] name = "platformdirs" -version = "2.4.0" +version = "2.4.1" description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [package.extras] docs = ["Sphinx (>=4)", "furo (>=2021.7.5b38)", "proselint (>=0.10.2)", "sphinx-autodoc-typehints (>=1.12)"] @@ -359,20 +365,25 @@ test = ["appdirs (==1.4.4)", "pytest (>=6)", "pytest-cov (>=2.7)", "pytest-mock [[package]] name = "pooch" -version = "1.5.2" -description = "Pooch manages your Python library's sample data files: it automatically downloads and stores them in a local directory, with support for versioning and corruption checks." +version = "1.6.0" +description = "\"Pooch manages your Python library's sample data files: it automatically downloads and stores them in a local directory, with support for versioning and corruption checks.\"" category = "main" optional = false python-versions = ">=3.6" [package.dependencies] -appdirs = "*" -packaging = "*" -requests = "*" +appdirs = ">=1.3.0" +packaging = ">=20.0" +requests = ">=2.19.0" + +[package.extras] +progress = ["tqdm (>=4.41.0,<5.0.0)"] +sftp = ["paramiko (>=2.7.0)"] +xxhash = ["xxhash (>=1.4.3)"] [[package]] name = "pre-commit" -version = "2.15.0" +version = "2.17.0" description = "A framework for managing and maintaining multi-language pre-commit hooks." category = "dev" optional = false @@ -389,7 +400,7 @@ virtualenv = ">=20.0.8" [[package]] name = "pygments" -version = "2.10.0" +version = "2.11.2" description = "Pygments is a syntax highlighting package written in Python." category = "dev" optional = false @@ -397,11 +408,14 @@ python-versions = ">=3.5" [[package]] name = "pyparsing" -version = "2.4.7" +version = "3.0.7" description = "Python parsing module" category = "main" optional = false -python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +python-versions = ">=3.6" + +[package.extras] +diagrams = ["jinja2", "railroad-diagrams"] [[package]] name = "pyriemann" @@ -447,7 +461,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" [[package]] name = "requests" -version = "2.26.0" +version = "2.27.1" description = "Python HTTP for Humans." category = "main" optional = false @@ -465,7 +479,7 @@ use_chardet_on_py3 = ["chardet (>=3.0.2,<5)"] [[package]] name = "scikit-learn" -version = "1.0" +version = "1.0.2" description = "A set of python modules for machine learning and data mining" category = "main" optional = false @@ -478,10 +492,10 @@ scipy = ">=1.1.0" threadpoolctl = ">=2.0.0" [package.extras] -benchmark = ["matplotlib (>=2.2.2)", "pandas (>=0.25.0)", "memory-profiler (>=0.57.0)"] -docs = ["matplotlib (>=2.2.2)", "scikit-image (>=0.14.5)", "pandas (>=0.25.0)", "seaborn (>=0.9.0)", "memory-profiler (>=0.57.0)", "sphinx (>=4.0.1)", "sphinx-gallery (>=0.7.0)", "numpydoc (>=1.0.0)", "Pillow (>=7.1.2)", "sphinx-prompt (>=1.3.0)", "sphinxext-opengraph (>=0.4.2)"] -examples = ["matplotlib (>=2.2.2)", "scikit-image (>=0.14.5)", "pandas (>=0.25.0)", "seaborn (>=0.9.0)"] -tests = ["matplotlib (>=2.2.2)", "scikit-image (>=0.14.5)", "pandas (>=0.25.0)", "pytest (>=5.0.1)", "pytest-cov (>=2.9.0)", "flake8 (>=3.8.2)", "black (>=21.6b0)", "mypy (>=0.770)", "pyamg (>=4.0.0)"] +benchmark = ["matplotlib (>=2.2.3)", "pandas (>=0.25.0)", "memory-profiler (>=0.57.0)"] +docs = ["matplotlib (>=2.2.3)", "scikit-image (>=0.14.5)", "pandas (>=0.25.0)", "seaborn (>=0.9.0)", "memory-profiler (>=0.57.0)", "sphinx (>=4.0.1)", "sphinx-gallery (>=0.7.0)", "numpydoc (>=1.0.0)", "Pillow (>=7.1.2)", "sphinx-prompt (>=1.3.0)", "sphinxext-opengraph (>=0.4.2)"] +examples = ["matplotlib (>=2.2.3)", "scikit-image (>=0.14.5)", "pandas (>=0.25.0)", "seaborn (>=0.9.0)"] +tests = ["matplotlib (>=2.2.3)", "scikit-image (>=0.14.5)", "pandas (>=0.25.0)", "pytest (>=5.0.1)", "pytest-cov (>=2.9.0)", "flake8 (>=3.8.2)", "black (>=21.6b0)", "mypy (>=0.770)", "pyamg (>=4.0.0)"] [[package]] name = "scipy" @@ -508,6 +522,22 @@ numpy = ">=1.15" pandas = ">=0.23" scipy = ">=1.0" +[[package]] +name = "setuptools-scm" +version = "6.4.2" +description = "the blessed package to manage your versions by scm tags" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +packaging = ">=20.0" +tomli = ">=1.0.0" + +[package.extras] +test = ["pytest (>=6.2)", "virtualenv (>20)"] +toml = ["setuptools (>=42)"] + [[package]] name = "six" version = "1.16.0" @@ -518,7 +548,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" [[package]] name = "snowballstemmer" -version = "2.1.0" +version = "2.2.0" description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms." category = "dev" optional = false @@ -662,7 +692,7 @@ resolved_reference = "d3acc59d34e47a4f36773b3df86f0842089f65cd" [[package]] name = "threadpoolctl" -version = "3.0.0" +version = "3.1.0" description = "threadpoolctl" category = "main" optional = false @@ -676,17 +706,41 @@ category = "dev" optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +[[package]] +name = "tomli" +version = "2.0.0" +description = "A lil' TOML parser" +category = "main" +optional = false +python-versions = ">=3.7" + +[[package]] +name = "tqdm" +version = "4.62.3" +description = "Fast, Extensible Progress Meter" +category = "main" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[package.extras] +dev = ["py-make (>=0.1.0)", "twine", "wheel"] +notebook = ["ipywidgets (>=6)"] +telegram = ["requests"] + [[package]] name = "typing-extensions" -version = "3.10.0.2" -description = "Backported and Experimental Type Hints for Python 3.5+" +version = "4.0.1" +description = "Backported and Experimental Type Hints for Python 3.6+" category = "dev" optional = false -python-versions = "*" +python-versions = ">=3.6" [[package]] name = "urllib3" -version = "1.26.7" +version = "1.26.8" description = "HTTP library with thread-safe connection pooling, file post, and more." category = "main" optional = false @@ -699,40 +753,39 @@ socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] [[package]] name = "virtualenv" -version = "20.8.1" +version = "20.13.0" description = "Virtual Python Environment builder" category = "dev" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" [package.dependencies] -"backports.entry-points-selectable" = ">=1.0.4" distlib = ">=0.3.1,<1" -filelock = ">=3.0.0,<4" +filelock = ">=3.2,<4" importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} platformdirs = ">=2,<3" six = ">=1.9.0,<2" [package.extras] -docs = ["proselint (>=0.10.2)", "sphinx (>=3)", "sphinx-argparse (>=0.2.5)", "sphinx-rtd-theme (>=0.4.3)", "towncrier (>=19.9.0rc1)"] +docs = ["proselint (>=0.10.2)", "sphinx (>=3)", "sphinx-argparse (>=0.2.5)", "sphinx-rtd-theme (>=0.4.3)", "towncrier (>=21.3)"] testing = ["coverage (>=4)", "coverage-enable-subprocess (>=1)", "flaky (>=3)", "pytest (>=4)", "pytest-env (>=0.6.2)", "pytest-freezegun (>=0.4.1)", "pytest-mock (>=2)", "pytest-randomly (>=1)", "pytest-timeout (>=1)", "packaging (>=20.0)"] [[package]] name = "zipp" -version = "3.6.0" +version = "3.7.0" description = "Backport of pathlib-compatible object wrapper for zip files" category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [package.extras] docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] -testing = ["pytest (>=4.6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy"] +testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy"] [metadata] lock-version = "1.1" python-versions = "^3.7" -content-hash = "f4bff93c1e1ed8cabc50e24bad9ccc47927f1a446889e2a38e2e8213e5c666ce" +content-hash = "d8455fb5c62ccfe69f1688fcede1d8ecede6bc530ad318df77daf924ef47c112" [metadata.files] alabaster = [ @@ -747,10 +800,6 @@ babel = [ {file = "Babel-2.9.1-py2.py3-none-any.whl", hash = "sha256:ab49e12b91d937cd11f0b67cb259a57ab4ad2b59ac7a3b41d6c06c0ac5b0def9"}, {file = "Babel-2.9.1.tar.gz", hash = "sha256:bc0c176f9f6a994582230df350aa6e05ba2ebe4b3ac317eab29d9be5d2768da0"}, ] -"backports.entry-points-selectable" = [ - {file = "backports.entry_points_selectable-1.1.0-py2.py3-none-any.whl", hash = "sha256:a6d9a871cde5e15b4c4a53e3d43ba890cc6861ec1332c9c2428c92f977192acc"}, - {file = "backports.entry_points_selectable-1.1.0.tar.gz", hash = "sha256:988468260ec1c196dab6ae1149260e2f5472c9110334e5d51adcb77867361f6a"}, -] cached-property = [ {file = "cached-property-1.5.2.tar.gz", hash = "sha256:9fa5755838eecbb2d234c3aa390bd80fbd3ac6b6869109bfc1b499f7bd89a130"}, {file = "cached_property-1.5.2-py2.py3-none-any.whl", hash = "sha256:df4f613cf7ad9a588cc381aaf4a512d26265ecebd5eb9e1ba12f1319eb85a6a0"}, @@ -764,8 +813,8 @@ cfgv = [ {file = "cfgv-3.3.1.tar.gz", hash = "sha256:f5a830efb9ce7a445376bb66ec94c638a9787422f96264c98edc6bdeed8ab736"}, ] charset-normalizer = [ - {file = "charset-normalizer-2.0.7.tar.gz", hash = "sha256:e019de665e2bcf9c2b64e2e5aa025fa991da8720daa3c1138cadd2fd1856aed0"}, - {file = "charset_normalizer-2.0.7-py3-none-any.whl", hash = "sha256:f7af805c321bfa1ce6714c51f254e0d5bb5e5834039bc17db7ebe3a4cec9492b"}, + {file = "charset-normalizer-2.0.11.tar.gz", hash = "sha256:98398a9d69ee80548c762ba991a4728bfc3836768ed226b3945908d1a688371c"}, + {file = "charset_normalizer-2.0.11-py3-none-any.whl", hash = "sha256:2842d8f5e82a1f6aa437380934d5e1cd4fcf2003b06fed6940769c164a480a45"}, ] colorama = [ {file = "colorama-0.4.4-py2.py3-none-any.whl", hash = "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2"}, @@ -826,52 +875,62 @@ coverage = [ {file = "coverage-5.5.tar.gz", hash = "sha256:ebe78fe9a0e874362175b02371bdfbee64d8edc42a044253ddf4ee7d3c15212c"}, ] cycler = [ - {file = "cycler-0.10.0-py2.py3-none-any.whl", hash = "sha256:1d8a5ae1ff6c5cf9b93e8811e581232ad8920aeec647c37316ceac982b08cb2d"}, - {file = "cycler-0.10.0.tar.gz", hash = "sha256:cd7b2d1018258d7247a71425e9f26463dfb444d411c39569972f4ce586b0c9d8"}, + {file = "cycler-0.11.0-py3-none-any.whl", hash = "sha256:3a27e95f763a428a739d2add979fa7494c912a32c17c4c38c4d5f082cad165a3"}, + {file = "cycler-0.11.0.tar.gz", hash = "sha256:9c87405839a19696e837b3b818fed3f5f69f16f1eec1a1ad77e043dcea9c772f"}, ] distlib = [ - {file = "distlib-0.3.3-py2.py3-none-any.whl", hash = "sha256:c8b54e8454e5bf6237cc84c20e8264c3e991e824ef27e8f1e81049867d861e31"}, - {file = "distlib-0.3.3.zip", hash = "sha256:d982d0751ff6eaaab5e2ec8e691d949ee80eddf01a62eaa96ddb11531fe16b05"}, + {file = "distlib-0.3.4-py2.py3-none-any.whl", hash = "sha256:6564fe0a8f51e734df6333d08b8b94d4ea8ee6b99b5ed50613f731fd4089f34b"}, + {file = "distlib-0.3.4.zip", hash = "sha256:e4b58818180336dc9c529bfb9a0b58728ffc09ad92027a3f30b7cd91e3458579"}, ] docutils = [ {file = "docutils-0.16-py2.py3-none-any.whl", hash = "sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af"}, {file = "docutils-0.16.tar.gz", hash = "sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc"}, ] filelock = [ - {file = "filelock-3.3.1-py3-none-any.whl", hash = "sha256:2b5eb3589e7fdda14599e7eb1a50e09b4cc14f34ed98b8ba56d33bfaafcbef2f"}, - {file = "filelock-3.3.1.tar.gz", hash = "sha256:34a9f35f95c441e7b38209775d6e0337f9a3759f3565f6c5798f19618527c76f"}, + {file = "filelock-3.4.2-py3-none-any.whl", hash = "sha256:cf0fc6a2f8d26bd900f19bf33915ca70ba4dd8c56903eeb14e1e7a2fd7590146"}, + {file = "filelock-3.4.2.tar.gz", hash = "sha256:38b4f4c989f9d06d44524df1b24bd19e167d851f19b50bf3e3559952dddc5b80"}, +] +fonttools = [ + {file = "fonttools-4.29.1-py3-none-any.whl", hash = "sha256:1933415e0fbdf068815cb1baaa1f159e17830215f7e8624e5731122761627557"}, + {file = "fonttools-4.29.1.zip", hash = "sha256:2b18a172120e32128a80efee04cff487d5d140fe7d817deb648b2eee023a40e4"}, ] h5py = [ - {file = "h5py-3.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:aa511bd05a9174c3008becdc93bd5785e254d34a6ab5f0425e6b2fbbc88afa6d"}, - {file = "h5py-3.4.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:708ddff49af12c01d77e0f9782bb1a0364d96459ec0d1f85d90baea6d203764b"}, - {file = "h5py-3.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:be2a545f09074546f73305e0db6d36aaf1fb6ea2fcf1add2ce306b9c7f78e55a"}, - {file = "h5py-3.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b0f002f5f341afe7d3d7e15198e80d9021da24a4d182d88068d79bfc91fba86"}, - {file = "h5py-3.4.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:46917f20021dde02865572a5fd2bb620945f7b7cd268bdc8e3f5720c32b38140"}, - {file = "h5py-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:8e809149f95d9a3a33b1279bfbf894c78635a5497e8d5ac37420fa5ec0cf4f29"}, - {file = "h5py-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8745e5159830d7975a9cf38690455f22601509cda04de29b7e88b3fbdc747611"}, - {file = "h5py-3.4.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:bb4ce46095e3b16c872aaf62adad33f40039fecae04674eb62c035386affcb91"}, - {file = "h5py-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:1edf33e722d47c6eb3878d51173b23dd848939f006f41b498bafceff87fb4cbd"}, - {file = "h5py-3.4.0.tar.gz", hash = "sha256:ee1c683d91ab010d5e85cb61e8f9e7ee0d8eab545bf3dd50a9618f1d0e8f615e"}, + {file = "h5py-3.6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a5320837c60870911645e9a935099bdb2be6a786fcf0dac5c860f3b679e2de55"}, + {file = "h5py-3.6.0-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98646e659bf8591a2177e12a4461dced2cad72da0ba4247643fd118db88880d2"}, + {file = "h5py-3.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:5996ff5adefd2d68c330a4265b6ef92e51b2fc674834a5990add5033bf109e20"}, + {file = "h5py-3.6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c9a5529343a619fea777b7caa27d493595b28b5af8b005e8d1817559fcccf493"}, + {file = "h5py-3.6.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e2b49c48df05e19bb20b400b7ff7dc6f1ee36b84dc717c3771c468b33697b466"}, + {file = "h5py-3.6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd9447633b0bafaf82190d9a8d56f3cb2e8d30169483aee67d800816e028190a"}, + {file = "h5py-3.6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:1c5acc660c458421e88c4c5fe092ce15923adfac4c732af1ac4fced683a5ea97"}, + {file = "h5py-3.6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:35ab552c6f0a93365b3cb5664a5305f3920daa0a43deb5b2c547c52815ec46b9"}, + {file = "h5py-3.6.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:542781d50e1182b8fb619b1265dfe1c765e18215f818b0ab28b2983c28471325"}, + {file = "h5py-3.6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f39242960b8d7f86f3056cc2546aa3047ff4835985f6483229af8f029e9c8db"}, + {file = "h5py-3.6.0-cp38-cp38-win_amd64.whl", hash = "sha256:8ecedf16c613973622a334701f67edcc0249469f9daa0576e994fb20ac0405db"}, + {file = "h5py-3.6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d8cacad89aa7daf3626fce106f7f2662ac35b14849df22d252d0d8fab9dc1c0b"}, + {file = "h5py-3.6.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dbaa1ed9768bf9ff04af0919acc55746e62b28333644f0251f38768313f31745"}, + {file = "h5py-3.6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:954c5c39a09b5302f69f752c3bbf165d368a65c8d200f7d5655e0fa6368a75e6"}, + {file = "h5py-3.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:9fd8a14236fdd092a20c0bdf25c3aba3777718d266fabb0fdded4fcf252d1630"}, + {file = "h5py-3.6.0.tar.gz", hash = "sha256:8752d2814a92aba4e2b2a5922d2782d0029102d99caaf3c201a566bc0b40db29"}, ] identify = [ - {file = "identify-2.3.0-py2.py3-none-any.whl", hash = "sha256:d1e82c83d063571bb88087676f81261a4eae913c492dafde184067c584bc7c05"}, - {file = "identify-2.3.0.tar.gz", hash = "sha256:fd08c97f23ceee72784081f1ce5125c8f53a02d3f2716dde79a6ab8f1039fea5"}, + {file = "identify-2.4.7-py2.py3-none-any.whl", hash = "sha256:e64210654dfbca6ced33230eb1b137591a0981425e1a60b4c6c36309f787bbd5"}, + {file = "identify-2.4.7.tar.gz", hash = "sha256:8408f01e0be25492017346d7dffe7e7711b762b23375c775d24d3bc38618fabc"}, ] idna = [ {file = "idna-3.3-py3-none-any.whl", hash = "sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff"}, {file = "idna-3.3.tar.gz", hash = "sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d"}, ] imagesize = [ - {file = "imagesize-1.2.0-py2.py3-none-any.whl", hash = "sha256:6965f19a6a2039c7d48bca7dba2473069ff854c36ae6f19d2cde309d998228a1"}, - {file = "imagesize-1.2.0.tar.gz", hash = "sha256:b1f6b5a4eab1f73479a50fb79fcf729514a900c341d8503d62a62dbc4127a2b1"}, + {file = "imagesize-1.3.0-py2.py3-none-any.whl", hash = "sha256:1db2f82529e53c3e929e8926a1fa9235aa82d0bd0c580359c67ec31b2fddaa8c"}, + {file = "imagesize-1.3.0.tar.gz", hash = "sha256:cd1750d452385ca327479d45b64d9c7729ecf0b3969a58148298c77092261f9d"}, ] importlib-metadata = [ - {file = "importlib_metadata-4.8.1-py3-none-any.whl", hash = "sha256:b618b6d2d5ffa2f16add5697cf57a46c76a56229b0ed1c438322e4e95645bd15"}, - {file = "importlib_metadata-4.8.1.tar.gz", hash = "sha256:f284b3e11256ad1e5d03ab86bb2ccd6f5339688ff17a4d797a0fe7df326f23b1"}, + {file = "importlib_metadata-4.10.1-py3-none-any.whl", hash = "sha256:899e2a40a8c4a1aec681feef45733de8a6c58f3f6a0dbed2eb6574b4387a77b6"}, + {file = "importlib_metadata-4.10.1.tar.gz", hash = "sha256:951f0d8a5b7260e9db5e41d429285b5f451e928479f19d80818878527d36e95e"}, ] jinja2 = [ - {file = "Jinja2-3.0.2-py3-none-any.whl", hash = "sha256:8569982d3f0889eed11dd620c706d39b60c36d6d25843961f33f77fb6bc6b20c"}, - {file = "Jinja2-3.0.2.tar.gz", hash = "sha256:827a0e32839ab1600d4eb1c4c33ec5a8edfbc5cb42dafa13b81f182f97784b45"}, + {file = "Jinja2-3.0.3-py3-none-any.whl", hash = "sha256:077ce6014f7b40d03b47d1f1ca4b0fc8328a692bd284016f806ed0eaca390ad8"}, + {file = "Jinja2-3.0.3.tar.gz", hash = "sha256:611bb273cd68f3b993fabdc4064fc858c5b47a973cb5aa7999ec1ba405c87cd7"}, ] joblib = [ {file = "joblib-1.1.0-py2.py3-none-any.whl", hash = "sha256:f21f109b3c7ff9d95f8387f752d0d9c34a02aa2f7060c2135f465da0e5160ff6"}, @@ -999,35 +1058,49 @@ markupsafe = [ {file = "MarkupSafe-2.0.1.tar.gz", hash = "sha256:594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a"}, ] matplotlib = [ - {file = "matplotlib-3.4.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5c988bb43414c7c2b0a31bd5187b4d27fd625c080371b463a6d422047df78913"}, - {file = "matplotlib-3.4.3-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:f1c5efc278d996af8a251b2ce0b07bbeccb821f25c8c9846bdcb00ffc7f158aa"}, - {file = "matplotlib-3.4.3-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:eeb1859efe7754b1460e1d4991bbd4a60a56f366bc422ef3a9c5ae05f0bc70b5"}, - {file = "matplotlib-3.4.3-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:844a7b0233e4ff7fba57e90b8799edaa40b9e31e300b8d5efc350937fa8b1bea"}, - {file = "matplotlib-3.4.3-cp37-cp37m-win32.whl", hash = "sha256:85f0c9cf724715e75243a7b3087cf4a3de056b55e05d4d76cc58d610d62894f3"}, - {file = "matplotlib-3.4.3-cp37-cp37m-win_amd64.whl", hash = "sha256:c70b6311dda3e27672f1bf48851a0de816d1ca6aaf3d49365fbdd8e959b33d2b"}, - {file = "matplotlib-3.4.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b884715a59fec9ad3b6048ecf3860f3b2ce965e676ef52593d6fa29abcf7d330"}, - {file = "matplotlib-3.4.3-cp38-cp38-manylinux1_i686.whl", hash = "sha256:a78a3b51f29448c7f4d4575e561f6b0dbb8d01c13c2046ab6c5220eb25c06506"}, - {file = "matplotlib-3.4.3-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:6a724e3a48a54b8b6e7c4ae38cd3d07084508fa47c410c8757e9db9791421838"}, - {file = "matplotlib-3.4.3-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:48e1e0859b54d5f2e29bb78ca179fd59b971c6ceb29977fb52735bfd280eb0f5"}, - {file = "matplotlib-3.4.3-cp38-cp38-win32.whl", hash = "sha256:01c9de93a2ca0d128c9064f23709362e7fefb34910c7c9e0b8ab0de8258d5eda"}, - {file = "matplotlib-3.4.3-cp38-cp38-win_amd64.whl", hash = "sha256:ebfb01a65c3f5d53a8c2a8133fec2b5221281c053d944ae81ff5822a68266617"}, - {file = "matplotlib-3.4.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b8b53f336a4688cfce615887505d7e41fd79b3594bf21dd300531a4f5b4f746a"}, - {file = "matplotlib-3.4.3-cp39-cp39-manylinux1_i686.whl", hash = "sha256:fcd6f1954943c0c192bfbebbac263f839d7055409f1173f80d8b11a224d236da"}, - {file = "matplotlib-3.4.3-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:6be8df61b1626e1a142c57e065405e869e9429b4a6dab4a324757d0dc4d42235"}, - {file = "matplotlib-3.4.3-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:41b6e307458988891fcdea2d8ecf84a8c92d53f84190aa32da65f9505546e684"}, - {file = "matplotlib-3.4.3-cp39-cp39-win32.whl", hash = "sha256:f72657f1596199dc1e4e7a10f52a4784ead8a711f4e5b59bea95bdb97cf0e4fd"}, - {file = "matplotlib-3.4.3-cp39-cp39-win_amd64.whl", hash = "sha256:f15edcb0629a0801738925fe27070480f446fcaa15de65946ff946ad99a59a40"}, - {file = "matplotlib-3.4.3-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:556965514b259204637c360d213de28d43a1f4aed1eca15596ce83f768c5a56f"}, - {file = "matplotlib-3.4.3-pp37-pypy37_pp73-manylinux2010_x86_64.whl", hash = "sha256:54a026055d5f8614f184e588f6e29064019a0aa8448450214c0b60926d62d919"}, - {file = "matplotlib-3.4.3.tar.gz", hash = "sha256:fc4f526dfdb31c9bd6b8ca06bf9fab663ca12f3ec9cdf4496fb44bc680140318"}, + {file = "matplotlib-3.5.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:456cc8334f6d1124e8ff856b42d2cc1c84335375a16448189999496549f7182b"}, + {file = "matplotlib-3.5.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8a77906dc2ef9b67407cec0bdbf08e3971141e535db888974a915be5e1e3efc6"}, + {file = "matplotlib-3.5.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e70ae6475cfd0fad3816dcbf6cac536dc6f100f7474be58d59fa306e6e768a4"}, + {file = "matplotlib-3.5.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53273c5487d1c19c3bc03b9eb82adaf8456f243b97ed79d09dded747abaf1235"}, + {file = "matplotlib-3.5.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e3b6f3fd0d8ca37861c31e9a7cab71a0ef14c639b4c95654ea1dd153158bf0df"}, + {file = "matplotlib-3.5.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8c87cdaf06fd7b2477f68909838ff4176f105064a72ca9d24d3f2a29f73d393"}, + {file = "matplotlib-3.5.1-cp310-cp310-win32.whl", hash = "sha256:e2f28a07b4f82abb40267864ad7b3a4ed76f1b1663e81c7efc84a9b9248f672f"}, + {file = "matplotlib-3.5.1-cp310-cp310-win_amd64.whl", hash = "sha256:d70a32ee1f8b55eed3fd4e892f0286df8cccc7e0475c11d33b5d0a148f5c7599"}, + {file = "matplotlib-3.5.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:68fa30cec89b6139dc559ed6ef226c53fd80396da1919a1b5ef672c911aaa767"}, + {file = "matplotlib-3.5.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e3484d8455af3fdb0424eae1789af61f6a79da0c80079125112fd5c1b604218"}, + {file = "matplotlib-3.5.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e293b16cf303fe82995e41700d172a58a15efc5331125d08246b520843ef21ee"}, + {file = "matplotlib-3.5.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:e3520a274a0e054e919f5b3279ee5dbccf5311833819ccf3399dab7c83e90a25"}, + {file = "matplotlib-3.5.1-cp37-cp37m-win32.whl", hash = "sha256:2252bfac85cec7af4a67e494bfccf9080bcba8a0299701eab075f48847cca907"}, + {file = "matplotlib-3.5.1-cp37-cp37m-win_amd64.whl", hash = "sha256:abf67e05a1b7f86583f6ebd01f69b693b9c535276f4e943292e444855870a1b8"}, + {file = "matplotlib-3.5.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6c094e4bfecd2fa7f9adffd03d8abceed7157c928c2976899de282f3600f0a3d"}, + {file = "matplotlib-3.5.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:506b210cc6e66a0d1c2bb765d055f4f6bc2745070fb1129203b67e85bbfa5c18"}, + {file = "matplotlib-3.5.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b04fc29bcef04d4e2d626af28d9d892be6aba94856cb46ed52bcb219ceac8943"}, + {file = "matplotlib-3.5.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:577ed20ec9a18d6bdedb4616f5e9e957b4c08563a9f985563a31fd5b10564d2a"}, + {file = "matplotlib-3.5.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e486f60db0cd1c8d68464d9484fd2a94011c1ac8593d765d0211f9daba2bd535"}, + {file = "matplotlib-3.5.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:b71f3a7ca935fc759f2aed7cec06cfe10bc3100fadb5dbd9c435b04e557971e1"}, + {file = "matplotlib-3.5.1-cp38-cp38-win32.whl", hash = "sha256:d24e5bb8028541ce25e59390122f5e48c8506b7e35587e5135efcb6471b4ac6c"}, + {file = "matplotlib-3.5.1-cp38-cp38-win_amd64.whl", hash = "sha256:778d398c4866d8e36ee3bf833779c940b5f57192fa0a549b3ad67bc4c822771b"}, + {file = "matplotlib-3.5.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bb1c613908f11bac270bc7494d68b1ef6e7c224b7a4204d5dacf3522a41e2bc3"}, + {file = "matplotlib-3.5.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:edf5e4e1d5fb22c18820e8586fb867455de3b109c309cb4fce3aaed85d9468d1"}, + {file = "matplotlib-3.5.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:40e0d7df05e8efe60397c69b467fc8f87a2affeb4d562fe92b72ff8937a2b511"}, + {file = "matplotlib-3.5.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7a350ca685d9f594123f652ba796ee37219bf72c8e0fc4b471473d87121d6d34"}, + {file = "matplotlib-3.5.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3e66497cd990b1a130e21919b004da2f1dc112132c01ac78011a90a0f9229778"}, + {file = "matplotlib-3.5.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:87900c67c0f1728e6db17c6809ec05c025c6624dcf96a8020326ea15378fe8e7"}, + {file = "matplotlib-3.5.1-cp39-cp39-win32.whl", hash = "sha256:b8a4fb2a0c5afbe9604f8a91d7d0f27b1832c3e0b5e365f95a13015822b4cd65"}, + {file = "matplotlib-3.5.1-cp39-cp39-win_amd64.whl", hash = "sha256:fe8d40c434a8e2c68d64c6d6a04e77f21791a93ff6afe0dce169597c110d3079"}, + {file = "matplotlib-3.5.1-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:34a1fc29f8f96e78ec57a5eff5e8d8b53d3298c3be6df61e7aa9efba26929522"}, + {file = "matplotlib-3.5.1-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:b19a761b948e939a9e20173aaae76070025f0024fc8f7ba08bef22a5c8573afc"}, + {file = "matplotlib-3.5.1-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6803299cbf4665eca14428d9e886de62e24f4223ac31ab9c5d6d5339a39782c7"}, + {file = "matplotlib-3.5.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:14334b9902ec776461c4b8c6516e26b450f7ebe0b3ef8703bf5cdfbbaecf774a"}, + {file = "matplotlib-3.5.1.tar.gz", hash = "sha256:b2e9810e09c3a47b73ce9cab5a72243a1258f61e7900969097a817232246ce1c"}, ] mistune = [ {file = "mistune-0.8.4-py2.py3-none-any.whl", hash = "sha256:88a1051873018da288eee8538d476dffe1262495144b33ecb586c4ab266bb8d4"}, {file = "mistune-0.8.4.tar.gz", hash = "sha256:59a3429db53c50b5c6bcc8a07f8848cb00d7dc8bdb431a4ab41920d201d4756e"}, ] mne = [ - {file = "mne-0.23.4-py3-none-any.whl", hash = "sha256:90f2c0e1182f42a3c5572ee023a94e347adf6cf53ebe619e8e1e4d09bb189ffa"}, - {file = "mne-0.23.4.tar.gz", hash = "sha256:ecace5caacf10961ebb74cc5e0ead4d4dbc55fed006eab1e644da144092354e9"}, + {file = "mne-0.24.1-py3-none-any.whl", hash = "sha256:26f75fc0b468ed078f35a86a26fa75bab60887b914b90707000cbc0cb8b11e78"}, + {file = "mne-0.24.1.tar.gz", hash = "sha256:38cbffd03a6ad0e83ef4a964ac9910a37d164c37fcc84894e39ed0cdf805300d"}, ] nodeenv = [ {file = "nodeenv-1.6.0-py2.py3-none-any.whl", hash = "sha256:621e6b7076565ddcacd2db0294c0381e01fd28945ab36bcf00f41c5daf63bef7"}, @@ -1064,12 +1137,12 @@ numpy = [ {file = "numpy-1.21.1.zip", hash = "sha256:dff4af63638afcc57a3dfb9e4b26d434a7a602d225b42d746ea7fe2edf1342fd"}, ] numpydoc = [ - {file = "numpydoc-1.1.0-py3-none-any.whl", hash = "sha256:c53d6311190b9e3b9285bc979390ba0257ba9acde5eca1a7065fc8dfca9d46e8"}, - {file = "numpydoc-1.1.0.tar.gz", hash = "sha256:c36fd6cb7ffdc9b4e165a43f67bf6271a7b024d0bb6b00ac468c9e2bfc76448e"}, + {file = "numpydoc-1.2-py3-none-any.whl", hash = "sha256:3ecbb9feae080031714b63128912988ebdfd4c582a085d25b8d9f7ac23c2d9ef"}, + {file = "numpydoc-1.2.tar.gz", hash = "sha256:0cec233740c6b125913005d16e8a9996e060528afcb8b7cad3f2706629dfd6f7"}, ] packaging = [ - {file = "packaging-21.0-py3-none-any.whl", hash = "sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14"}, - {file = "packaging-21.0.tar.gz", hash = "sha256:7dc96269f53a4ccec5c0670940a4281106dd0bb343f47b7471f779df49c2fbe7"}, + {file = "packaging-21.3-py3-none-any.whl", hash = "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"}, + {file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"}, ] pandas = [ {file = "pandas-1.1.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:bf23a3b54d128b50f4f9d4675b3c1857a688cc6731a32f931837d72effb2698d"}, @@ -1132,24 +1205,24 @@ pillow = [ {file = "Pillow-9.0.0.tar.gz", hash = "sha256:ee6e2963e92762923956fe5d3479b1fdc3b76c83f290aad131a2f98c3df0593e"}, ] platformdirs = [ - {file = "platformdirs-2.4.0-py3-none-any.whl", hash = "sha256:8868bbe3c3c80d42f20156f22e7131d2fb321f5bc86a2a345375c6481a67021d"}, - {file = "platformdirs-2.4.0.tar.gz", hash = "sha256:367a5e80b3d04d2428ffa76d33f124cf11e8fff2acdaa9b43d545f5c7d661ef2"}, + {file = "platformdirs-2.4.1-py3-none-any.whl", hash = "sha256:1d7385c7db91728b83efd0ca99a5afb296cab9d0ed8313a45ed8ba17967ecfca"}, + {file = "platformdirs-2.4.1.tar.gz", hash = "sha256:440633ddfebcc36264232365d7840a970e75e1018d15b4327d11f91909045fda"}, ] pooch = [ - {file = "pooch-1.5.2-py3-none-any.whl", hash = "sha256:debb159655de9eeccc366deb111fe1e33e76efac19724436b6878c09deca4293"}, - {file = "pooch-1.5.2.tar.gz", hash = "sha256:5969b2f1defbdc405df932767e05e0b536e2771c27f1f95d7f260bc99bf13581"}, + {file = "pooch-1.6.0-py3-none-any.whl", hash = "sha256:3bf0e20027096836b8dbce0152dbb785a269abeb621618eb4bdd275ff1e23c9c"}, + {file = "pooch-1.6.0.tar.gz", hash = "sha256:57d20ec4b10dd694d2b05bb64bc6b109c6e85a6c1405794ce87ed8b341ab3f44"}, ] pre-commit = [ - {file = "pre_commit-2.15.0-py2.py3-none-any.whl", hash = "sha256:a4ed01000afcb484d9eb8d504272e642c4c4099bbad3a6b27e519bd6a3e928a6"}, - {file = "pre_commit-2.15.0.tar.gz", hash = "sha256:3c25add78dbdfb6a28a651780d5c311ac40dd17f160eb3954a0c59da40a505a7"}, + {file = "pre_commit-2.17.0-py2.py3-none-any.whl", hash = "sha256:725fa7459782d7bec5ead072810e47351de01709be838c2ce1726b9591dad616"}, + {file = "pre_commit-2.17.0.tar.gz", hash = "sha256:c1a8040ff15ad3d648c70cc3e55b93e4d2d5b687320955505587fd79bbaed06a"}, ] pygments = [ - {file = "Pygments-2.10.0-py3-none-any.whl", hash = "sha256:b8e67fe6af78f492b3c4b3e2970c0624cbf08beb1e493b2c99b9fa1b67a20380"}, - {file = "Pygments-2.10.0.tar.gz", hash = "sha256:f398865f7eb6874156579fdf36bc840a03cab64d1cde9e93d68f46a425ec52c6"}, + {file = "Pygments-2.11.2-py3-none-any.whl", hash = "sha256:44238f1b60a76d78fc8ca0528ee429702aae011c265fe6a8dd8b63049ae41c65"}, + {file = "Pygments-2.11.2.tar.gz", hash = "sha256:4e426f72023d88d03b2fa258de560726ce890ff3b630f88c21cbb8b2503b8c6a"}, ] pyparsing = [ - {file = "pyparsing-2.4.7-py2.py3-none-any.whl", hash = "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"}, - {file = "pyparsing-2.4.7.tar.gz", hash = "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1"}, + {file = "pyparsing-3.0.7-py3-none-any.whl", hash = "sha256:a6c06a88f252e6c322f65faf8f418b16213b51bdfaece0524c1c1bc30c63c484"}, + {file = "pyparsing-3.0.7.tar.gz", hash = "sha256:18ee9022775d270c55187733956460083db60b37d0d0fb357445f3094eed3eea"}, ] pyriemann = [ {file = "pyriemann-0.2.7.tar.gz", hash = "sha256:1feed8f72d94414bdc9ca4485333711a3f91f9742ae8794874a7399f4166758a"}, @@ -1194,35 +1267,42 @@ pyyaml = [ {file = "PyYAML-5.4.1.tar.gz", hash = "sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e"}, ] requests = [ - {file = "requests-2.26.0-py2.py3-none-any.whl", hash = "sha256:6c1246513ecd5ecd4528a0906f910e8f0f9c6b8ec72030dc9fd154dc1a6efd24"}, - {file = "requests-2.26.0.tar.gz", hash = "sha256:b8aa58f8cf793ffd8782d3d8cb19e66ef36f7aba4353eec859e74678b01b07a7"}, + {file = "requests-2.27.1-py2.py3-none-any.whl", hash = "sha256:f22fa1e554c9ddfd16e6e41ac79759e17be9e492b3587efa038054674760e72d"}, + {file = "requests-2.27.1.tar.gz", hash = "sha256:68d7c56fd5a8999887728ef304a6d12edc7be74f1cfa47714fc8b414525c9a61"}, ] scikit-learn = [ - {file = "scikit-learn-1.0.tar.gz", hash = "sha256:776800194e757cd212b47cd05907e0eb67a554ad333fe76776060dbb729e3427"}, - {file = "scikit_learn-1.0-cp37-cp37m-macosx_10_13_x86_64.whl", hash = "sha256:e8a6074f7d505bbfd30bcc1c57dc7cb150cc9c021459c2e2729854be1aefb5f7"}, - {file = "scikit_learn-1.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:56ab58978c7aa181856a42f8f491be953b755105040aeb070ebd6b180896f146"}, - {file = "scikit_learn-1.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b1df4d1151dd6d945324583125e6449bb74ec7cd91ffd7f850015cdb75f151b5"}, - {file = "scikit_learn-1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9f10b85dcd9ce80f738e33f55a32b3a538b47409dc1a59eec30b46ea96759db"}, - {file = "scikit_learn-1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:663a6aaad92e5690b03d931f849016c9718beaa654e9a15f08bfcac750241036"}, - {file = "scikit_learn-1.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:190c178028f9073d9f61cd30a19c685993236b9b2df884f16608cbb3ff03800b"}, - {file = "scikit_learn-1.0-cp37-cp37m-win32.whl", hash = "sha256:555f4b4c10d3bef9e3cda63c3b45670a091fb50328fccd54948cd8a7cf887198"}, - {file = "scikit_learn-1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:9f103cd6d7e15fa537a844c1a85c9beeeee8ec38357287c9efd3ee4bb8354e1d"}, - {file = "scikit_learn-1.0-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:121f78d6564000dc5e968394f45aac87981fcaaf2be40cfcd8f07b2baa1e1829"}, - {file = "scikit_learn-1.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:83ab0d0447b8de8450c554952a8399791544605caf274fc3c904e247e1584ced"}, - {file = "scikit_learn-1.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f8aecb3edc443e5625725ae1ef8f500fa78ce7cb0e864115864bb9f234d18290"}, - {file = "scikit_learn-1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1f710bba72925aa96e60828df5d2a4872f5d4a4ad7bb4a4c9a6a41c9ce9a198"}, - {file = "scikit_learn-1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4cb5ccb2b63c617ead48c6d92001273ad1b0e8e2bd4a4857edb58749a88b6d82"}, - {file = "scikit_learn-1.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:29559c207616604bbaa664bf98eed81b32d9f3d4c975065a206a5e2b268fe784"}, - {file = "scikit_learn-1.0-cp38-cp38-win32.whl", hash = "sha256:c9c329ec195cdea6a4dee3cebdb1602f4e0f69351c63bc58a4812f3c8a9f4f2d"}, - {file = "scikit_learn-1.0-cp38-cp38-win_amd64.whl", hash = "sha256:14bd46639b2149b3ed613adc095511313a0db62ba9fa31117bdcb5c23722e93b"}, - {file = "scikit_learn-1.0-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:efeac34d0ce6bf9404d268545867cbde9d6ecadd0e9bd7e6b468e5f4e2349875"}, - {file = "scikit_learn-1.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:af94b89a8f7759603c696b320e86e57f4b2bb4911e02bf2bae33c714ac498fb8"}, - {file = "scikit_learn-1.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6d8bdacde73f5f484325179f466ce2011f79360e9a152100179c3dafb88f2a35"}, - {file = "scikit_learn-1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7053801ceb7c51ce674c6a8e37a18fcc221c292f66ef7da84744ecf13b4a0c0"}, - {file = "scikit_learn-1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e35135657b7103a70298cf557e4fad06af97607cb0780d8f44a2f91ca7769458"}, - {file = "scikit_learn-1.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:9d8caf7fa58791b6b26e912e44d5056818b7bb3142bfa7806f54bde47c189078"}, - {file = "scikit_learn-1.0-cp39-cp39-win32.whl", hash = "sha256:6a056637f7f9876e4c9db9b5434d340e0c97e25f00c4c04458f0ff906e82488e"}, - {file = "scikit_learn-1.0-cp39-cp39-win_amd64.whl", hash = "sha256:eed33b7ca2bf3fdd585339db42838ab0b641952e064564bff6e9a10573ea665c"}, + {file = "scikit-learn-1.0.2.tar.gz", hash = "sha256:b5870959a5484b614f26d31ca4c17524b1b0317522199dc985c3b4256e030767"}, + {file = "scikit_learn-1.0.2-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:da3c84694ff693b5b3194d8752ccf935a665b8b5edc33a283122f4273ca3e687"}, + {file = "scikit_learn-1.0.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:75307d9ea39236cad7eea87143155eea24d48f93f3a2f9389c817f7019f00705"}, + {file = "scikit_learn-1.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f14517e174bd7332f1cca2c959e704696a5e0ba246eb8763e6c24876d8710049"}, + {file = "scikit_learn-1.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9aac97e57c196206179f674f09bc6bffcd0284e2ba95b7fe0b402ac3f986023"}, + {file = "scikit_learn-1.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:d93d4c28370aea8a7cbf6015e8a669cd5d69f856cc2aa44e7a590fb805bb5583"}, + {file = "scikit_learn-1.0.2-cp37-cp37m-macosx_10_13_x86_64.whl", hash = "sha256:85260fb430b795d806251dd3bb05e6f48cdc777ac31f2bcf2bc8bbed3270a8f5"}, + {file = "scikit_learn-1.0.2-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a053a6a527c87c5c4fa7bf1ab2556fa16d8345cf99b6c5a19030a4a7cd8fd2c0"}, + {file = "scikit_learn-1.0.2-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:245c9b5a67445f6f044411e16a93a554edc1efdcce94d3fc0bc6a4b9ac30b752"}, + {file = "scikit_learn-1.0.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:158faf30684c92a78e12da19c73feff9641a928a8024b4fa5ec11d583f3d8a87"}, + {file = "scikit_learn-1.0.2-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:08ef968f6b72033c16c479c966bf37ccd49b06ea91b765e1cc27afefe723920b"}, + {file = "scikit_learn-1.0.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16455ace947d8d9e5391435c2977178d0ff03a261571e67f627c8fee0f9d431a"}, + {file = "scikit_learn-1.0.2-cp37-cp37m-win32.whl", hash = "sha256:2f3b453e0b149898577e301d27e098dfe1a36943f7bb0ad704d1e548efc3b448"}, + {file = "scikit_learn-1.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:46f431ec59dead665e1370314dbebc99ead05e1c0a9df42f22d6a0e00044820f"}, + {file = "scikit_learn-1.0.2-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:ff3fa8ea0e09e38677762afc6e14cad77b5e125b0ea70c9bba1992f02c93b028"}, + {file = "scikit_learn-1.0.2-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:9369b030e155f8188743eb4893ac17a27f81d28a884af460870c7c072f114243"}, + {file = "scikit_learn-1.0.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:7d6b2475f1c23a698b48515217eb26b45a6598c7b1840ba23b3c5acece658dbb"}, + {file = "scikit_learn-1.0.2-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:285db0352e635b9e3392b0b426bc48c3b485512d3b4ac3c7a44ec2a2ba061e66"}, + {file = "scikit_learn-1.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cb33fe1dc6f73dc19e67b264dbb5dde2a0539b986435fdd78ed978c14654830"}, + {file = "scikit_learn-1.0.2-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b1391d1a6e2268485a63c3073111fe3ba6ec5145fc957481cfd0652be571226d"}, + {file = "scikit_learn-1.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc3744dabc56b50bec73624aeca02e0def06b03cb287de26836e730659c5d29c"}, + {file = "scikit_learn-1.0.2-cp38-cp38-win32.whl", hash = "sha256:a999c9f02ff9570c783069f1074f06fe7386ec65b84c983db5aeb8144356a355"}, + {file = "scikit_learn-1.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:7626a34eabbf370a638f32d1a3ad50526844ba58d63e3ab81ba91e2a7c6d037e"}, + {file = "scikit_learn-1.0.2-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:a90b60048f9ffdd962d2ad2fb16367a87ac34d76e02550968719eb7b5716fd10"}, + {file = "scikit_learn-1.0.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:7a93c1292799620df90348800d5ac06f3794c1316ca247525fa31169f6d25855"}, + {file = "scikit_learn-1.0.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:eabceab574f471de0b0eb3f2ecf2eee9f10b3106570481d007ed1c84ebf6d6a1"}, + {file = "scikit_learn-1.0.2-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:55f2f3a8414e14fbee03782f9fe16cca0f141d639d2b1c1a36779fa069e1db57"}, + {file = "scikit_learn-1.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80095a1e4b93bd33261ef03b9bc86d6db649f988ea4dbcf7110d0cded8d7213d"}, + {file = "scikit_learn-1.0.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fa38a1b9b38ae1fad2863eff5e0d69608567453fdfc850c992e6e47eb764e846"}, + {file = "scikit_learn-1.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff746a69ff2ef25f62b36338c615dd15954ddc3ab8e73530237dd73235e76d62"}, + {file = "scikit_learn-1.0.2-cp39-cp39-win32.whl", hash = "sha256:e174242caecb11e4abf169342641778f68e1bfaba80cd18acd6bc84286b9a534"}, + {file = "scikit_learn-1.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:b54a62c6e318ddbfa7d22c383466d38d2ee770ebdb5ddb668d56a099f6eaf75f"}, ] scipy = [ {file = "scipy-1.6.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a15a1f3fc0abff33e792d6049161b7795909b40b97c6cc2934ed54384017ab76"}, @@ -1249,13 +1329,17 @@ seaborn = [ {file = "seaborn-0.11.2-py3-none-any.whl", hash = "sha256:85a6baa9b55f81a0623abddc4a26b334653ff4c6b18c418361de19dbba0ef283"}, {file = "seaborn-0.11.2.tar.gz", hash = "sha256:cf45e9286d40826864be0e3c066f98536982baf701a7caa386511792d61ff4f6"}, ] +setuptools-scm = [ + {file = "setuptools_scm-6.4.2-py3-none-any.whl", hash = "sha256:acea13255093849de7ccb11af9e1fb8bde7067783450cee9ef7a93139bddf6d4"}, + {file = "setuptools_scm-6.4.2.tar.gz", hash = "sha256:6833ac65c6ed9711a4d5d2266f8024cfa07c533a0e55f4c12f6eff280a5a9e30"}, +] six = [ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, ] snowballstemmer = [ - {file = "snowballstemmer-2.1.0-py2.py3-none-any.whl", hash = "sha256:b51b447bea85f9968c13b650126a888aabd4cb4463fca868ec596826325dedc2"}, - {file = "snowballstemmer-2.1.0.tar.gz", hash = "sha256:e997baa4f2e9139951b6f4c631bad912dfd3c792467e2f03d7239464af90e914"}, + {file = "snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a"}, + {file = "snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"}, ] sphinx = [ {file = "Sphinx-3.5.4-py3-none-any.whl", hash = "sha256:2320d4e994a191f4b4be27da514e46b3d6b420f2ff895d064f52415d342461e8"}, @@ -1294,27 +1378,34 @@ sphinxcontrib-serializinghtml = [ ] tdlda = [] threadpoolctl = [ - {file = "threadpoolctl-3.0.0-py3-none-any.whl", hash = "sha256:4fade5b3b48ae4b1c30f200b28f39180371104fccc642e039e0f2435ec8cc211"}, - {file = "threadpoolctl-3.0.0.tar.gz", hash = "sha256:d03115321233d0be715f0d3a5ad1d6c065fe425ddc2d671ca8e45e9fd5d7a52a"}, + {file = "threadpoolctl-3.1.0-py3-none-any.whl", hash = "sha256:8b99adda265feb6773280df41eece7b2e6561b772d21ffd52e372f999024907b"}, + {file = "threadpoolctl-3.1.0.tar.gz", hash = "sha256:a335baacfaa4400ae1f0d8e3a58d6674d2f8828e3716bb2802c44955ad391380"}, ] toml = [ {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, ] +tomli = [ + {file = "tomli-2.0.0-py3-none-any.whl", hash = "sha256:b5bde28da1fed24b9bd1d4d2b8cba62300bfb4ec9a6187a957e8ddb9434c5224"}, + {file = "tomli-2.0.0.tar.gz", hash = "sha256:c292c34f58502a1eb2bbb9f5bbc9a5ebc37bee10ffb8c2d6bbdfa8eb13cc14e1"}, +] +tqdm = [ + {file = "tqdm-4.62.3-py2.py3-none-any.whl", hash = "sha256:8dd278a422499cd6b727e6ae4061c40b48fce8b76d1ccbf5d34fca9b7f925b0c"}, + {file = "tqdm-4.62.3.tar.gz", hash = "sha256:d359de7217506c9851b7869f3708d8ee53ed70a1b8edbba4dbcb47442592920d"}, +] typing-extensions = [ - {file = "typing_extensions-3.10.0.2-py2-none-any.whl", hash = "sha256:d8226d10bc02a29bcc81df19a26e56a9647f8b0a6d4a83924139f4a8b01f17b7"}, - {file = "typing_extensions-3.10.0.2-py3-none-any.whl", hash = "sha256:f1d25edafde516b146ecd0613dabcc61409817af4766fbbcfb8d1ad4ec441a34"}, - {file = "typing_extensions-3.10.0.2.tar.gz", hash = "sha256:49f75d16ff11f1cd258e1b988ccff82a3ca5570217d7ad8c5f48205dd99a677e"}, + {file = "typing_extensions-4.0.1-py3-none-any.whl", hash = "sha256:7f001e5ac290a0c0401508864c7ec868be4e701886d5b573a9528ed3973d9d3b"}, + {file = "typing_extensions-4.0.1.tar.gz", hash = "sha256:4ca091dea149f945ec56afb48dae714f21e8692ef22a395223bcd328961b6a0e"}, ] urllib3 = [ - {file = "urllib3-1.26.7-py2.py3-none-any.whl", hash = "sha256:c4fdf4019605b6e5423637e01bc9fe4daef873709a7973e195ceba0a62bbc844"}, - {file = "urllib3-1.26.7.tar.gz", hash = "sha256:4987c65554f7a2dbf30c18fd48778ef124af6fab771a377103da0585e2336ece"}, + {file = "urllib3-1.26.8-py2.py3-none-any.whl", hash = "sha256:000ca7f471a233c2251c6c7023ee85305721bfdf18621ebff4fd17a8653427ed"}, + {file = "urllib3-1.26.8.tar.gz", hash = "sha256:0e7c33d9a63e7ddfcb86780aac87befc2fbddf46c58dbb487e0855f7ceec283c"}, ] virtualenv = [ - {file = "virtualenv-20.8.1-py2.py3-none-any.whl", hash = "sha256:10062e34c204b5e4ec5f62e6ef2473f8ba76513a9a617e873f1f8fb4a519d300"}, - {file = "virtualenv-20.8.1.tar.gz", hash = "sha256:bcc17f0b3a29670dd777d6f0755a4c04f28815395bca279cdcb213b97199a6b8"}, + {file = "virtualenv-20.13.0-py2.py3-none-any.whl", hash = "sha256:339f16c4a86b44240ba7223d0f93a7887c3ca04b5f9c8129da7958447d079b09"}, + {file = "virtualenv-20.13.0.tar.gz", hash = "sha256:d8458cf8d59d0ea495ad9b34c2599487f8a7772d796f9910858376d1600dd2dd"}, ] zipp = [ - {file = "zipp-3.6.0-py3-none-any.whl", hash = "sha256:9fe5ea21568a0a70e50f273397638d39b03353731e6cbbb3fd8502a33fec40bc"}, - {file = "zipp-3.6.0.tar.gz", hash = "sha256:71c644c5369f4a6e07636f0aa966270449561fcea2e3d6747b8d23efaa9d7832"}, + {file = "zipp-3.7.0-py3-none-any.whl", hash = "sha256:b47250dd24f92b7dd6a0a8fc5244da14608f3ca90a5efcd37a3b1642fac9a375"}, + {file = "zipp-3.7.0.tar.gz", hash = "sha256:9f50f446828eb9d45b267433fd3e9da8d801f614129124863f9c51ebceafb87d"}, ] diff --git a/pyproject.toml b/pyproject.toml index 26316e386..a21e45e83 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -22,9 +22,10 @@ pyriemann = ">=0.2.6" matplotlib = "^3.0" seaborn = ">=0.9" PyYAML = "^5.0" -pooch = "^1.3" +pooch = "^1.6" requests = "^2.15.1" coverage = "^5.5" +tqdm = "^4.62" [tool.poetry.dev-dependencies] Sphinx = "^3.3" @@ -32,6 +33,7 @@ sphinx-gallery = "^0.8.2" sphinx-bootstrap-theme = "^0.8" numpydoc = "^1.1.0" m2r2 = "^0.2.7" +mistune = "<2" pre-commit = "^2.11.1" tdlda = {git = "https://github.com/jsosulski/tdlda.git", rev = "0.1.0"} From fe095ddeee6dec469e9d8ad469739ed861b5037f Mon Sep 17 00:00:00 2001 From: Sylvain Chevallier Date: Wed, 9 Feb 2022 19:03:40 +0100 Subject: [PATCH 07/19] fix doc url in readme (#262) * fix doc url in readme * correct links in the docs --- README.md | 4 ++-- docs/source/README.md | 5 ++--- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index af3c142e7..119239a35 100644 --- a/README.md +++ b/README.md @@ -129,7 +129,7 @@ can upgrade your pip version using: `pip install -U pip` before installing `moab ## Supported datasets The list of supported datasets can be found here : -http://moabb.neurotechx.com/docs/datasets.html +https://neurotechx.github.io/moabb/datasets.html ### Submit a new dataset @@ -256,6 +256,6 @@ BCI algorithms applied on an extensive list of freely available EEG datasets. [link_sylvain]: https://sylvchev.github.io/ [link_neurotechx_signup]: https://neurotechx.com/ [link_gitter]: https://gitter.im/moabb_dev/community -[link_moabb_docs]: http://moabb.neurotechx.com/docs/index.html +[link_moabb_docs]: https://neurotechx.github.io/moabb/ [link_arxiv]: https://arxiv.org/abs/1805.06427 [link_jne]: http://iopscience.iop.org/article/10.1088/1741-2552/aadea0/meta diff --git a/docs/source/README.md b/docs/source/README.md index 09a1e7331..20c8a7f0a 100644 --- a/docs/source/README.md +++ b/docs/source/README.md @@ -128,8 +128,7 @@ can upgrade your pip version using: `pip install -U pip` before installing `moab ## Supported datasets -The list of supported datasets can be found here : -http://moabb.neurotechx.com/docs/datasets.html +The list of supported datasets can be found here : https://neurotechx.github.io/moabb/ ### Submit a new dataset @@ -258,6 +257,6 @@ BCI algorithms applied on an extensive list of freely available EEG datasets. [link_sylvain]: https://sylvchev.github.io/ [link_neurotechx_signup]: https://neurotechx.com/ [link_gitter]: https://gitter.im/moabb_dev/community -[link_moabb_docs]: http://moabb.neurotechx.com/docs/index.html +[link_moabb_docs]: https://neurotechx.github.io/moabb/ [link_arxiv]: https://arxiv.org/abs/1805.06427 [link_jne]: http://iopscience.iop.org/article/10.1088/1741-2552/aadea0/meta From 2c05b9b58a05360aad0e07803eef447cf1715445 Mon Sep 17 00:00:00 2001 From: robintibor Date: Mon, 14 Feb 2022 15:02:22 +0100 Subject: [PATCH 08/19] Schirrmeister2017 High-Gamma Dataset from EDF (#265) * loading Schirrmeister2017 High-Gamma Dataset from EDF * remove commented import of requests module * rename to session_0 --- moabb/datasets/schirrmeister2017.py | 190 +++------------------------- 1 file changed, 15 insertions(+), 175 deletions(-) diff --git a/moabb/datasets/schirrmeister2017.py b/moabb/datasets/schirrmeister2017.py index db57957d4..3b2521d65 100644 --- a/moabb/datasets/schirrmeister2017.py +++ b/moabb/datasets/schirrmeister2017.py @@ -1,11 +1,7 @@ import logging -import re -import h5py import mne - -# import requests -import numpy as np +from mne.channels import make_standard_montage from moabb.datasets import download as dl from moabb.datasets.base import BaseDataset @@ -77,7 +73,7 @@ def data_path( raise (ValueError("Invalid subject number")) def _url(prefix): - return "/".join([GIN_URL, prefix, "{:d}.mat".format(subject)]) + return "/".join([GIN_URL, prefix, "{:d}.edf".format(subject)]) return [ dl.data_dl(_url(t), "SCHIRRMEISTER2017", path, force_update, verbose) @@ -85,174 +81,18 @@ def _url(prefix): ] def _get_single_subject_data(self, subject): - train, test = [BBCIDataset(path) for path in self.data_path(subject)] - sessions = {} - sessions["session_0"] = {"run_0": train.load(), "run_1": test.load()} - return sessions - - -class BBCIDataset(object): - """ - Loader class for files created by saving BBCI files in matlab (make - sure to save with '-v7.3' in matlab, see - https://de.mathworks.com/help/matlab/import_export/mat-file-versions.html#buk6i87 - ) - Parameters - ---------- - filename: str - load_sensor_names: list of str, optional - Also speeds up loading if you only load some sensors. - None means load all sensors. - - Copyright Robin Schirrmeister, 2017 - Altered by Vinay Jayaram, 2018 - """ - - def __init__(self, filename, load_sensor_names=None): - self.__dict__.update(locals()) - del self.self - - def load(self): - cnt = self._load_continuous_signal() - cnt = self._add_markers(cnt) - return cnt - - def _load_continuous_signal(self): - wanted_chan_inds, wanted_sensor_names = self._determine_sensors() - fs = self._determine_samplingrate() - with h5py.File(self.filename, "r") as h5file: - samples = int(h5file["nfo"]["T"][0, 0]) - cnt_signal_shape = (samples, len(wanted_chan_inds)) - continuous_signal = np.ones(cnt_signal_shape, dtype=np.float32) * np.nan - for chan_ind_arr, chan_ind_set in enumerate(wanted_chan_inds): - # + 1 because matlab/this hdf5-naming logic - # has 1-based indexing - # i.e ch1,ch2,.... - chan_set_name = "ch" + str(chan_ind_set + 1) - # first 0 to unpack into vector, before it is 1xN matrix - chan_signal = h5file[chan_set_name][ - : - ].squeeze() # already load into memory - continuous_signal[:, chan_ind_arr] = chan_signal - assert not np.any(np.isnan(continuous_signal)), "No NaNs expected in signal" - - # Assume we cant know channel type here automatically - ch_types = ["eeg"] * len(wanted_chan_inds) - info = mne.create_info(ch_names=wanted_sensor_names, sfreq=fs, ch_types=ch_types) - # Scale to volts from microvolts, (VJ 19.6.18) - continuous_signal = continuous_signal * 1e-6 - cnt = mne.io.RawArray(continuous_signal.T, info) - return cnt - - def _determine_sensors(self): - all_sensor_names = self.get_all_sensors(self.filename, pattern=None) - if self.load_sensor_names is None: - - # if no sensor names given, take all EEG-chans - eeg_sensor_names = all_sensor_names - eeg_sensor_names = filter(lambda s: not s.startswith("BIP"), eeg_sensor_names) - eeg_sensor_names = filter(lambda s: not s.startswith("E"), eeg_sensor_names) - eeg_sensor_names = filter( - lambda s: not s.startswith("Microphone"), eeg_sensor_names - ) - eeg_sensor_names = filter( - lambda s: not s.startswith("Breath"), eeg_sensor_names - ) - eeg_sensor_names = filter(lambda s: not s.startswith("GSR"), eeg_sensor_names) - eeg_sensor_names = list(eeg_sensor_names) - assert len(eeg_sensor_names) in set( - [128, 64, 32, 16] - ), "check this code if you have different sensors..." # noqa - self.load_sensor_names = eeg_sensor_names - chan_inds = self._determine_chan_inds(all_sensor_names, self.load_sensor_names) - return chan_inds, self.load_sensor_names - - def _determine_samplingrate(self): - with h5py.File(self.filename, "r") as h5file: - fs = h5file["nfo"]["fs"][0, 0] - assert isinstance(fs, int) or fs.is_integer() - fs = int(fs) - return fs - - @staticmethod - def _determine_chan_inds(all_sensor_names, sensor_names): - assert sensor_names is not None - chan_inds = [all_sensor_names.index(s) for s in sensor_names] - assert len(chan_inds) == len(sensor_names), "All" "sensors" "should be there." - # TODO: is it possible for this to fail? the list - # comp fails first right? - assert len(set(chan_inds)) == len(chan_inds), "No" "duplicated sensors" "wanted." - return chan_inds - - @staticmethod - def get_all_sensors(filename, pattern=None): - """ - Get all sensors that exist in the given file. - - Parameters - ---------- - filename: str - pattern: str, optional - Only return those sensor names that match the given pattern. - Returns - ------- - sensor_names: list of str - Sensor names that match the pattern or all - sensor names in the file. - """ - with h5py.File(filename, "r") as h5file: - clab_set = h5file["nfo"]["clab"][:].squeeze() - all_sensor_names = [ - "".join(chr(c.squeeze()) for c in h5file[obj_ref]) for obj_ref in clab_set - ] - if pattern is not None: - all_sensor_names = filter( - lambda sname: re.search(pattern, sname), all_sensor_names - ) - return all_sensor_names - - def _add_markers(self, cnt): - with h5py.File(self.filename, "r") as h5file: - event_times_in_ms = h5file["mrk"]["time"][:].squeeze() - event_classes = h5file["mrk"]["event"]["desc"][:].squeeze().astype(np.int64) - - # Check whether class names known and correct order - # class_name_set = h5file['nfo']['className'][:].squeeze() - # all_class_names = [''.join(chr(c) for c in h5file[obj_ref]) - # for obj_ref in class_name_set] - - event_times_in_samples = event_times_in_ms * cnt.info["sfreq"] / 1000.0 - event_times_in_samples = np.uint32(np.round(event_times_in_samples)) - - # Check if there are markers at the same time - previous_i_sample = -1 - for i_event, (i_sample, _) in enumerate( - zip(event_times_in_samples, event_classes) - ): - if i_sample == previous_i_sample: - info = "{:d}: ({:.0f} and {:.0f}).\n".format( - i_sample, event_classes[i_event - 1], event_classes[i_event] - ) - log.warning( - f"Same sample has at least two markers.\n{info}Marker codes will be summed." - ) - previous_i_sample = i_sample - - # Now create stim chan - stim_chan = np.zeros_like(cnt.get_data()[0]) - for i_sample, id_class in zip(event_times_in_samples, event_classes): - stim_chan[i_sample] += id_class - info = mne.create_info( - ch_names=["STI 014"], sfreq=cnt.info["sfreq"], ch_types=["stim"] - ) - stim_cnt = mne.io.RawArray(stim_chan[None], info, verbose="WARNING") - cnt = cnt.add_channels([stim_cnt]) - event_arr = [ - event_times_in_samples, - [0] * len(event_times_in_samples), - event_classes, + train_raw, test_raw = [ + mne.io.read_raw_edf(path, infer_types=True) + for path in self.data_path(subject) ] - cnt.info["events"] = [ - dict(list=np.array(event_arr).T, channels=None), + + # Select only EEG sensors (remove EOG, EMG), + # and also set montage for visualizations + montage = make_standard_montage("standard_1005") + train_raw, test_raw = [ + raw.pick_types(eeg=True).set_montage(montage) for raw in (train_raw, test_raw) ] - return cnt + sessions = { + "session_0": {"train": train_raw, "test": test_raw}, + } + return sessions From 3e9d1d7fc90314214ea6a8db4e770f5a9ac3fd67 Mon Sep 17 00:00:00 2001 From: Jan Sosulski Date: Wed, 16 Feb 2022 11:27:10 +0100 Subject: [PATCH 09/19] added 13 + 12 subjects speller datasets by huebner (#260) * added 13 + 12 subjects speller datasets by huebner * clean up legacy run splitting code * added use_blocks_as_sessions parameter for data Co-authored-by: Sylvain Chevallier --- moabb/datasets/__init__.py | 1 + moabb/datasets/huebner_llp.py | 352 ++++++++++++++++++++++++++++++++++ 2 files changed, 353 insertions(+) create mode 100644 moabb/datasets/huebner_llp.py diff --git a/moabb/datasets/__init__.py b/moabb/datasets/__init__.py index ffeb62a3e..024652182 100644 --- a/moabb/datasets/__init__.py +++ b/moabb/datasets/__init__.py @@ -20,6 +20,7 @@ from .braininvaders import bi2013a from .epfl import EPFLP300 from .gigadb import Cho2017 +from .huebner_llp import Huebner2017, Huebner2018 from .Lee2019 import Lee2019_ERP, Lee2019_MI, Lee2019_SSVEP from .mpi_mi import MunichMI from .neiry import DemonsP300 diff --git a/moabb/datasets/huebner_llp.py b/moabb/datasets/huebner_llp.py new file mode 100644 index 000000000..ea75109af --- /dev/null +++ b/moabb/datasets/huebner_llp.py @@ -0,0 +1,352 @@ +import glob +import os +import re +import zipfile +from abc import ABC + +import mne +import numpy as np + +from moabb.datasets import download as dl +from moabb.datasets.base import BaseDataset + + +VSPELL_BASE_URL = "https://zenodo.org/record/" +VISUAL_SPELLER_LLP_URL = VSPELL_BASE_URL + "5831826/files/" +VISUAL_SPELLER_MIX_URL = VSPELL_BASE_URL + "5831879/files/" +OPTICAL_MARKER_CODE = 500 + + +class _BaseVisualMatrixSpellerDataset(BaseDataset, ABC): + def __init__( + self, src_url, n_subjects, raw_slice_offset, use_blocks_as_sessions=True, **kwargs + ): + + self.n_channels = 31 # all channels except 5 times x_* CH and EOGvu + if kwargs["interval"] is None: + # "Epochs were windowed to [−200, 700] ms relative to the stimulus onset [...]." + kwargs["interval"] = [-0.2, 0.7] + + super().__init__( + events=dict(Target=10002, NonTarget=10001), + paradigm="p300", + subjects=(np.arange(n_subjects) + 1).tolist(), + **kwargs, + ) + + self.raw_slice_offset = 2_000 if raw_slice_offset is None else raw_slice_offset + self._src_url = src_url + self.use_blocks_as_sessions = use_blocks_as_sessions + + @staticmethod + def _filename_trial_info_extraction(vhdr_file_path): + vhdr_file_name = os.path.basename(vhdr_file_path) + run_file_pattern = "^matrixSpeller_Block([0-9]+)_Run([0-9]+)\\.vhdr$" + vhdr_file_patter_match = re.match(run_file_pattern, vhdr_file_name) + + if not vhdr_file_patter_match: + # TODO: raise a wild exception? + print(vhdr_file_path) + + session_name = os.path.basename(os.path.dirname(vhdr_file_path)) + block_idx = vhdr_file_patter_match.group(1) + run_idx = vhdr_file_patter_match.group(2) + return session_name, block_idx, run_idx + + def _get_single_subject_data(self, subject): + subject_data_vhdr_files = self.data_path(subject) + sessions = dict() + + for _file_idx, subject_data_vhdr_file in enumerate(subject_data_vhdr_files): + ( + session_name, + block_idx, + run_idx, + ) = Huebner2017._filename_trial_info_extraction(subject_data_vhdr_file) + + raw_bvr_list = _read_raw_llp_study_data( + vhdr_fname=subject_data_vhdr_file, + raw_slice_offset=self.raw_slice_offset, + verbose=None, + ) + + if self.use_blocks_as_sessions: + session_name = f"{session_name}_block_{block_idx}" + else: + session_name = f"{session_name}" + if session_name not in sessions.keys(): + sessions[session_name] = dict() + sessions[session_name][run_idx] = raw_bvr_list[0] + + return sessions + + def data_path( + self, subject, path=None, force_update=False, update_path=None, verbose=None + ): + url = f"{self._src_url}subject{subject:02d}.zip" + data_archive_path = dl.data_path(url, "llp") + data_dir_extracted_path = os.path.dirname(data_archive_path) + # else: + # raise ValueError(f'URL or data path must be given but both are None.') + + subject_dir_path = os.path.join(data_dir_extracted_path, f"subject{subject:02d}") + + data_extracted = os.path.isdir(subject_dir_path) + if not data_extracted: + # print('unzip', path_to_data_archive) # TODO logging? check verbose + zipfile_path = glob.glob( + os.path.join(data_dir_extracted_path, data_archive_path, "*.zip") + )[0] + _BaseVisualMatrixSpellerDataset._extract_data( + data_dir_extracted_path, zipfile_path + ) + + run_glob_pattern = os.path.join( + data_dir_extracted_path, + f"subject{subject:02d}", + "matrixSpeller_Block*_Run*.vhdr", + ) + subject_paths = glob.glob(run_glob_pattern) + return sorted(subject_paths) + + @staticmethod + def _extract_data(data_dir_extracted_path, data_archive_path): + zip_ref = zipfile.ZipFile(data_archive_path, "r") + zip_ref.extractall(data_dir_extracted_path) + + +class Huebner2017(_BaseVisualMatrixSpellerDataset): + """ + Learning from label proportions for a visual matrix speller (ERP) dataset from Hübner et al 2017 [1]_. + + + **Dataset description** + + The subjects were asked to spell the sentence: “Franzy jagt im komplett verwahrlosten Taxi quer durch Freiburg”. + The sentence was chosen because it contains each letter used in German at least once. Each subject spelled this + sentence three times. The stimulus onset asynchrony (SOA) was 250 ms (corresponding to 15 frames on the LCD screen + utilized) while the stimulus duration was 100 ms (corresponding to 6 frames on the LCD screen utilized). For each + character, 68 highlighting events occurred and a total of 63 characters were spelled three times. This resulted in + a total of 68 ⋅ 63 ⋅ 3 = 12852 EEG epochs per subject. Spelling one character took around 25 s including 4 s for + cueing the current symbol, 17 s for highlighting and 4 s to provide feedback to the user. Assuming a perfect + decoding, these timing constraints would allow for a maximum spelling speed of 2.4 characters per minute. Fig 1 + shows the complete experimental structure and how LLP is used to reconstruct average target and non-target ERP + responses. + + Subjects were placed in a chair at 80 cm distance from a 24-inch flat screen. EEG signals from 31 passive Ag/AgCl + electrodes (EasyCap) were recorded, which were placed approximately equidistantly according to the extended + 10–20 system, and whose impedances were kept below 20 kΩ. All channels were referenced against the nose and the + ground was at FCz. The signals were registered by multichannel EEG amplifiers (BrainAmp DC, Brain Products) at a + sampling rate of 1 kHz. To control for vertical ocular movements and eye blinks, we recorded with an EOG electrode + placed below the right eye and referenced against the EEG channel Fp2 above the eye. In addition, pulse and + breathing activity were recorded. + + Parameters + ---------- + interval: array_like + range/interval in milliseconds in which the brain response/activity relative to an event/stimulus onset lies in. + Default is set to [-.2, .7]. + raw_slice_offset: int, None + defines the crop offset in milliseconds before the first and after the last event (target or non-targeet) onset. + Default None which crops with an offset 2,000 ms. + + References + ---------- + .. [1] Hübner, D., Verhoeven, T., Schmid, K., Müller, K. R., Tangermann, M., & Kindermans, P. J. (2017) + Learning from label proportions in brain-computer interfaces: Online unsupervised learning with guarantees. + PLOS ONE 12(4): e0175856. + https://doi.org/10.1371/journal.pone.0175856 + """ + + def __init__(self, interval=None, raw_slice_offset=None, use_blocks_as_sessions=True): + llp_speller_paper_doi = "10.1371/journal.pone.0175856" + super().__init__( + src_url=VISUAL_SPELLER_LLP_URL, + raw_slice_offset=raw_slice_offset, + n_subjects=13, + sessions_per_subject=1, # if varying, take minimum + code="Visual Speller LLP", + interval=interval, + doi=llp_speller_paper_doi, + use_blocks_as_sessions=use_blocks_as_sessions, + ) + + +class Huebner2018(_BaseVisualMatrixSpellerDataset): + """ + Mixture of LLP and EM for a visual matrix speller (ERP) dataset from Hübner et al 2018 [1]_. + + Within a single session, a subject was asked to spell the beginning of a sentence in each of three blocks.The text + consists of the 35 symbols “Franzy jagt im Taxi quer durch das ”. Each block, one of the three decoding + algorithms (EM, LLP, MIX) was used in order to guess the attended symbol. The order of the blocks was + pseudo-randomized over subjects, such that each possible order of the three decoding algorithms was used twice. + This randomization should reduce systematic biases by order effects or temporal effects, e.g., due to fatigue or + task-learning. + + A trial describes the process of spelling one character. Each of the 35 trials per block contained 68 highlighting + events. The stimulus onset asynchrony (SOA) was 250 ms and the stimulus duration was 100 ms leading to an + interstimulus interval (ISI) of 150 ms. + + Parameters + ---------- + interval: array_like + range/interval in milliseconds in which the brain response/activity relative to an event/stimulus onset lies in. + Default is set to [-.2, .7]. + raw_slice_offset: int, None + defines the crop offset in milliseconds before the first and after the last event (target or non-targeet) onset. + Default None which crops with an offset 2,000 ms. + + References + ---------- + .. [1] Huebner, D., Verhoeven, T., Mueller, K. R., Kindermans, P. J., & Tangermann, M. (2018). + Unsupervised learning for brain-computer interfaces based on event-related potentials: Review and online comparison [research frontier]. + IEEE Computational Intelligence Magazine, 13(2), 66-77. + https://doi.org/10.1109/MCI.2018.2807039 + """ + + def __init__(self, interval=None, raw_slice_offset=None, use_blocks_as_sessions=True): + mix_speller_paper_doi = "10.1109/MCI.2018.2807039" + super().__init__( + src_url=VISUAL_SPELLER_MIX_URL, + raw_slice_offset=raw_slice_offset, + n_subjects=12, + sessions_per_subject=1, # if varying, take minimum + code="Visual Speller MIX", + interval=interval, + doi=mix_speller_paper_doi, + use_blocks_as_sessions=use_blocks_as_sessions, + ) + + +def _read_raw_llp_study_data(vhdr_fname, raw_slice_offset, verbose=None): + """ + Read LLP BVR recordings file. Ignore the different sequence lengths. Just tag event as target or non-target if it + contains a target or does not contain a target. + + Parameters + ---------- + vhdr_fname: str + Path to the EEG header file. + verbose : bool, int, None + specify the loglevel. + + Returns + ------- + raw_object: mne.io.Raw + the loaded BVR raw object. + """ + non_scalp_channels = ["EOGvu", "x_EMGl", "x_GSR", "x_Respi", "x_Pulse", "x_Optic"] + raw_bvr = mne.io.read_raw_brainvision( + vhdr_fname=vhdr_fname, # eog='EOGvu', + misc=non_scalp_channels, + preload=True, + verbose=verbose, + ) # type: mne.io.Raw + raw_bvr.set_montage("standard_1020") + + events = _parse_events(raw_bvr) + + onset_arr_list, marker_arr_list = _extract_target_non_target_description(events) + + def annotate_and_crop_raw(onset_arr, marker_arr): + raw = raw_bvr + + raw_annotated = raw.set_annotations( + _create_annotations_from(marker_arr, onset_arr, raw) + ) + + tmin = max((onset_arr[0] - raw_slice_offset) / 1e3, 0) + tmax = min((onset_arr[-1] + raw_slice_offset) / 1e3, raw.times[-1]) + return raw_annotated.crop(tmin=tmin, tmax=tmax, include_tmax=True) + + return list(map(annotate_and_crop_raw, onset_arr_list, marker_arr_list)) + + +def _create_annotations_from(marker_arr, onset_arr, raw_bvr): + default_bvr_marker_duration = raw_bvr.annotations[0]["duration"] + + onset = onset_arr / 1e3 # convert onset in seconds to ms + durations = np.repeat(default_bvr_marker_duration, len(marker_arr)) + description = list(map(lambda m: f"Stimulus/S {m:3}", marker_arr)) + orig_time = raw_bvr.annotations[0]["orig_time"] + return mne.Annotations( + onset=onset, duration=durations, description=description, orig_time=orig_time + ) + + +def _parse_events(raw_bvr): + stimulus_pattern = re.compile("(Stimulus/S|Optic/O) *([0-9]+)") + + def parse_marker(desc): + match = stimulus_pattern.match(desc) + if match is None: + return None + if match.group(1) == "Optic/O": + return OPTICAL_MARKER_CODE + + return int(match.group(2)) + + events, _ = mne.events_from_annotations( + raw=raw_bvr, event_id=parse_marker, verbose=None + ) + return events + + +def _find_single_trial_start_end_idx(events): + trial_start_end_markers = [21, 22, 10] + return np.where(np.isin(events[:, 2], trial_start_end_markers))[0] + + +def _extract_target_non_target_description(events): + single_trial_start_end_idx = _find_single_trial_start_end_idx(events) + + n_events = single_trial_start_end_idx.size - 1 + + onset_arr = np.empty((n_events,), dtype=np.int64) + marker_arr = np.empty((n_events,), dtype=np.int64) + + broken_events_idx = list() + for epoch_idx in range(n_events): + epoch_start_idx = single_trial_start_end_idx[epoch_idx] + epoch_end_idx = single_trial_start_end_idx[epoch_idx + 1] + + epoch_events = events[epoch_start_idx:epoch_end_idx] + + onset_ms = _find_epoch_onset(epoch_events) + if onset_ms == -1: + broken_events_idx.append(epoch_idx) + continue + + onset_arr[epoch_idx] = onset_ms + marker_arr[epoch_idx] = int( + _single_trial_contains_target(epoch_events) + ) # 1/true if single trial has target + + return [np.delete(onset_arr, broken_events_idx)], [ + np.delete(marker_arr, broken_events_idx) + ] + + +def _find_epoch_onset(epoch_events): + optical_idx = epoch_events[:, 2] == OPTICAL_MARKER_CODE + stimulus_onset_time = epoch_events[optical_idx, 0] + + def second_optical_is_feedback(): + if stimulus_onset_time.size != 2: + return False + + stimulus_prior_second_optical_marker = epoch_events[ + np.where(optical_idx)[0][1] - 1, 2 + ] + return stimulus_prior_second_optical_marker in [50, 51, 11] + + if stimulus_onset_time.size == 1 or second_optical_is_feedback(): + return stimulus_onset_time[0] + + # broken epoch: no true onset found.. + return -1 + + +def _single_trial_contains_target(trial_events): + trial_markers = trial_events[:, 2] + return np.any((trial_markers > 100) & (trial_markers <= 142)) From 8a42108c41517ba5cb29816060c6f6be260e8052 Mon Sep 17 00:00:00 2001 From: Jan Sosulski Date: Thu, 17 Feb 2022 09:23:23 +0100 Subject: [PATCH 10/19] added Spot Auditory oddball dataset (#266) * added Spot Auditory oddball dataset * replaced usage of deprecated dl.data_path Co-authored-by: Sylvain Chevallier --- moabb/datasets/__init__.py | 1 + moabb/datasets/huebner_llp.py | 2 +- moabb/datasets/sosulski2019.py | 175 +++++++++++++++++++++++++++++++++ 3 files changed, 177 insertions(+), 1 deletion(-) create mode 100644 moabb/datasets/sosulski2019.py diff --git a/moabb/datasets/__init__.py b/moabb/datasets/__init__.py index 024652182..010254c58 100644 --- a/moabb/datasets/__init__.py +++ b/moabb/datasets/__init__.py @@ -26,6 +26,7 @@ from .neiry import DemonsP300 from .physionet_mi import PhysionetMI from .schirrmeister2017 import Schirrmeister2017 +from .sosulski2019 import Sosulski2019 from .ssvep_exo import SSVEPExo from .ssvep_mamem import MAMEM1, MAMEM2, MAMEM3 from .ssvep_nakanishi import Nakanishi2015 diff --git a/moabb/datasets/huebner_llp.py b/moabb/datasets/huebner_llp.py index ea75109af..6bb33671b 100644 --- a/moabb/datasets/huebner_llp.py +++ b/moabb/datasets/huebner_llp.py @@ -84,7 +84,7 @@ def data_path( self, subject, path=None, force_update=False, update_path=None, verbose=None ): url = f"{self._src_url}subject{subject:02d}.zip" - data_archive_path = dl.data_path(url, "llp") + data_archive_path = dl.data_dl(url, "llp") data_dir_extracted_path = os.path.dirname(data_archive_path) # else: # raise ValueError(f'URL or data path must be given but both are None.') diff --git a/moabb/datasets/sosulski2019.py b/moabb/datasets/sosulski2019.py new file mode 100644 index 000000000..24fceba4a --- /dev/null +++ b/moabb/datasets/sosulski2019.py @@ -0,0 +1,175 @@ +import glob +import os +import re +import zipfile + +import mne + +from moabb.datasets import download as dl +from moabb.datasets.base import BaseDataset + + +SPOT_PILOT_P300_URL = ( + "https://freidok.uni-freiburg.de/fedora/objects/freidok:154576/datastreams" +) + + +class Sosulski2019(BaseDataset): + """P300 dataset from initial spot study. + + Dataset [1], study on spatial transfer between SOAs [2], actual paradigm / online optimization [3]. + + **Dataset Description** + + This dataset contains multiple small trials of an auditory oddball paradigm. The paradigm presented two different + sinusoidal tones. A low-pitched (500 Hz, 40 ms duration) non-target tone and a high-pitched (1000 Hz, + 40 ms duration) target tone. Subjects were instructed to attend to the high-pitched target tones and ignore the + low-pitched tones. + + One trial (= one file) consisted of 90 tones, 15 targets and 75 non-targets. The order was pseudo-randomized in a + way that at least two non-target tones occur between two target tones. Additionally, if you split the 90 tones of + one trial into consecutive sets of six tones, there will always be exactly one target and five non-target tones + in each set. + + In the first part of the experiment (run 1), each subject performed 50-70 trials with various different stimulus + onset asynchronies (SOAs) -- i.e. the time between the onset of successive tones -- for each trial. In the second + part (run 2), 4-5 SOAs were played, with blocks of 5 trials having the same SOA. All SOAs were in the range of 60 + ms to 600 ms. Regardless of the experiment part, after a set of five trials, subjects were given the opportunity + to take a short break to e.g. drink etc. + + Finally, before and after each run, resting data was recorded. One minute with eyes open and one minute with eyes + closed, i.e. in total four minutes of resting data are available for each subject. + + Data was recorded using a BrainAmp DC (BrainVision) amplifier and a 31 passive electrode EasyCap. The cap was + placed according to the extended 10-20 electrode layout. The reference electrode was placed on the nose. Before + recording, the cap was prepared such that impedances on all electrodes were around 20 kOhm. The EEG signal was + recorded at 1000 Hz. + + The data contains 31 scalp channels, one EOG channel and five miscellaneous non-EEG signal channels. However, + only scalp EEG and the EOG channel is available in all subjects. The markers in the marker file indicate the + onset of target tones (21) and non-target tones (1). + + WARNING: Note that this wrapper currently only loads the second part of the experiment and uses pseudo-sessions + to achieve the functionality to handle different conditions in MOABB. As a result, the statistical testing + features of MOABB cannot be used for this dataset. + + References + ---------- + + .. [1] Sosulski, J., Tangermann, M.: Electroencephalogram signals recorded from 13 healthy subjects during an + auditory oddball paradigm under different stimulus onset asynchrony conditions. Dataset. DOI: 10.6094/UNIFR/154576 + .. [2] Sosulski, J., Tangermann, M.: Spatial filters for auditory evoked potentials transfer between different + experimental conditions. Graz BCI Conference. 2019. + .. [3] Sosulski, J., Hübner, D., Klein, A., Tangermann, M.: Online Optimization of Stimulation Speed in an Auditory Brain-Computer + Interface under Time Constraints. arXiv preprint. 2021. + """ + + def __init__( + self, + use_soas_as_sessions=True, + load_soa_60=False, + reject_non_iid=False, + interval=None, + ): + """ + :param use_soa_as_sessions: 1800 epochs were recorded at different SOAs each. Depending on + the subject between 3 and 4 (4-5 if 60 is loaded). Training classifiers on mixtures of SOAs + rarely is useful. Setting this to True loads these as individual sessions for e.g. + WithinSessionEvaluation. + :param load_soa_60: whether to load SOA 60. Note that this was always recorded, but the + recorded ERP was extremely weak (as expected). + :param reject_non_iid: if true removes the first 6 and last 6 epochs of each trial. + """ + self.load_soa_60 = load_soa_60 + self.reject_non_iid = reject_non_iid + self.stimulus_modality = "tone_oddball" + self.n_channels = 31 + self.use_soas_as_sessions = use_soas_as_sessions + code = "Spot Pilot P300 dataset" + interval = [-0.2, 1] if interval is None else interval + super().__init__( + subjects=list(range(1, 13 + 1)), + sessions_per_subject=1, + events=dict(Target=21, NonTarget=1), + code=code, + interval=interval, + paradigm="p300", + doi="10.6094/UNIFR/154576", + ) + + @staticmethod + def _map_subject_to_filenumber(subject_number): + # The ordering of the uploaded files on freidok makes no sense, this function maps subject_numbers to corresponding files + mapping = [5, 2, 4, 6, 3, 1, 10, 7, 12, 9, 8, 11, 13] + return mapping[subject_number - 1] + + @staticmethod + def filename_trial_info_extraction(filepath): + info_pattern = "Oddball_Run_([0-9]+)_Trial_([0-9]+)_SOA_[0-9]\\.([0-9]+)\\.vhdr" + filename = filepath.split(os.path.sep)[-1] + trial_info = dict() + re_matches = re.match(info_pattern, filename) + trial_info["run"] = int(re_matches.group(1)) + trial_info["trial"] = int(re_matches.group(2)) + trial_info["soa"] = int(re_matches.group(3)) + return trial_info + + def _get_single_run_data(self, file_path): + non_scalp_channels = ["EOGvu", "x_EMGl", "x_GSR", "x_Respi", "x_Pulse", "x_Optic"] + raw = mne.io.read_raw_brainvision( + file_path, misc=non_scalp_channels, preload=True + ) + raw.set_montage("standard_1020") + if self.reject_non_iid: + raw.set_annotations(raw.annotations[7:85]) # non-iid rejection + return raw + + def _get_single_subject_data(self, subject): + """return data for a single subject""" + + file_path_list = self.data_path(subject) + sessions = {} + + for p_i, file_path in enumerate(file_path_list): + file_exp_info = Sosulski2019.filename_trial_info_extraction(file_path) + soa = file_exp_info["soa"] + # trial = file_exp_info["trial"] + if soa == 60 and not self.load_soa_60: + continue + if self.use_soas_as_sessions: + session_name = f"session_1_soa_{soa}" + else: + session_name = "session_1" + + if session_name not in sessions.keys(): + sessions[session_name] = {} + + run_name = f"run_{p_i + 1}_soa_{p_i}" + sessions[session_name][run_name] = self._get_single_run_data(file_path) + + return sessions + + def data_path( + self, subject, path=None, force_update=False, update_path=None, verbose=None + ): + + if subject not in self.subject_list: + raise (ValueError("Invalid subject number")) + + # check if has the .zip + file_number = Sosulski2019._map_subject_to_filenumber(subject) + url = f"{SPOT_PILOT_P300_URL}/FILE{file_number}/content" + path_zip = dl.data_dl(url, "spot") + path_folder = path_zip[:-8] + f"/subject{subject}" + + # check if has to unzip + if not (os.path.isdir(path_folder)): + print("unzip", path_zip) + zip_ref = zipfile.ZipFile(path_zip, "r") + zip_ref.extractall(path_zip[:-7]) + + # get the path to all files + # We only load data from the second run. The first run is a potpourri of SOAs + pattern = "/*Run_2*.vhdr" + subject_paths = glob.glob(path_folder + pattern) + return sorted(subject_paths) From fc11180083a0c17592312369c37a1dc8e66891b2 Mon Sep 17 00:00:00 2001 From: Jan Sosulski Date: Mon, 21 Feb 2022 20:09:52 +0100 Subject: [PATCH 11/19] Visualize all ERP datasets (#261) * Visualize all ERP datasets * * use paradigm.datasets instead of manual list * more verbose sanity check script * fix epo data leak + remove title bf * moved data visualization added disclaimer regarding data size Co-authored-by: Sylvain Chevallier --- scripts/README.md | 11 ++ scripts/data_visualization_p300.py | 224 +++++++++++++++++++++++++++++ 2 files changed, 235 insertions(+) create mode 100644 scripts/README.md create mode 100644 scripts/data_visualization_p300.py diff --git a/scripts/README.md b/scripts/README.md new file mode 100644 index 000000000..b0d197fb5 --- /dev/null +++ b/scripts/README.md @@ -0,0 +1,11 @@ +# Utility Scripts for MOABB + +**If you only want to use MOABB for benchmarking you probably will not need to use any of +these scripts.** + +## Overview + +- `data_visualization_*`: Scripts starting with data_visualization download ALL datasets + for a specific paradigm to create, e.g., descriptive statistics and plots. As datasets + should not change over time, most of these only need to be run once and the results + could be stored somewhere else. diff --git a/scripts/data_visualization_p300.py b/scripts/data_visualization_p300.py new file mode 100644 index 000000000..72042d597 --- /dev/null +++ b/scripts/data_visualization_p300.py @@ -0,0 +1,224 @@ +""" +=========================== +Script for visualization of ALL P300 datasets +=========================== + +This script will download ALL P300 datasets and create +descriptive plots for every single session. + +Total downloaded size will be (as of now) 120GB. + +""" +# Authors: Jan Sosulski +# +# License: BSD (3-clause) +import warnings +from pathlib import Path + +import matplotlib +import mne +import numpy as np +import seaborn as sns +from matplotlib import pyplot as plt + +from moabb.paradigms import P300 + + +matplotlib.use("agg") +sns.set_style("whitegrid") +mne.set_log_level("WARNING") + + +def create_plot_overview(epo, plot_opts=None, path=None, description=""): + # Butterflyplot + suptitle = f"{description} ({epo_summary(epo)[1]})" + epo_t = epo["Target"] + epo_nt = epo["NonTarget"] + evkd_t = epo_t.average() + evkd_nt = epo_nt.average() + ix_t = epo.events[:, 2] == epo.event_id["Target"] + ix_nt = epo.events[:, 2] == epo.event_id["NonTarget"] + + fig0, ax = plt.subplots(1, 1, figsize=(10, 3), sharey="all", sharex="all") + ax.scatter( + epo.events[ix_t, 0], + np.ones((np.sum(ix_t),)), + color="r", + marker="|", + label="Target", + ) + ax.scatter( + epo.events[ix_nt, 0], + np.zeros((np.sum(ix_nt),)), + color="b", + marker="|", + label="NonTarget", + ) + ax.legend() + ax.set_title("Event timeline") + fig0.suptitle(suptitle) + fig0.tight_layout() + fig0.savefig(path / f"event_timeline.{plot_format}", dpi=plot_opts["dpi"]) + + fig1, axes = plt.subplots(2, 1, figsize=(6, 6), sharey="all", sharex="all") + evkd_t.plot(spatial_colors=True, show=False, axes=axes[0]) + axes[0].set_title("Target response") + evkd_nt.plot(spatial_colors=True, show=False, axes=axes[1]) + axes[1].set_title("NonTarget response") + fig1.suptitle(suptitle) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + fig1.tight_layout() + fig1.savefig(path / f"target_nontarget_erps.{plot_format}", dpi=plot_opts["dpi"]) + + # topomap + tp = plot_opts["topo"]["timepoints"] + tmin, tmax = plot_opts["topo"]["tmin"], plot_opts["topo"]["tmax"] + times = np.linspace(tmin, tmax, tp) + fig2 = evkd_t.plot_topomap(times=times, colorbar=True, show=False) + fig2.suptitle(suptitle) + fig2.savefig( + path / f"target_topomap_{tp}_timepoints.{plot_format}", + dpi=plot_opts["dpi"], + ) + + # jointmap + fig3 = evkd_t.plot_joint(show=False) + fig3.suptitle(suptitle) + fig3.savefig(path / f"target_erp_topo.{plot_format}", dpi=plot_opts["dpi"]) + + # sensorplot + fig4 = mne.viz.plot_compare_evokeds( + [evkd_t.crop(0, 0.6), evkd_nt.crop(0, 0.6)], axes="topo", show=False + ) + fig4[0].suptitle(suptitle) + fig4[0].savefig(path / f"sensorplot.{plot_format}", dpi=plot_opts["dpi"]) + + fig5, ax = plt.subplots(2, 1, figsize=(8, 6), sharex="all", sharey="all") + t_data = epo_t.get_data() * 1e6 + nt_data = epo_nt.get_data() * 1e6 + data = epo.get_data() * 1e6 + minmax = np.max(data, axis=2) - np.min(data, axis=2) + per_channel = np.mean(minmax, axis=0) + worst_ch = np.argsort(per_channel) + worst_ch = worst_ch[max(-8, -len(epo.ch_names)) :] + minmax_t = np.max(t_data, axis=2) - np.min(t_data, axis=2) + minmax_nt = np.max(nt_data, axis=2) - np.min(nt_data, axis=2) + ch = epo_t.ch_names + for i in range(minmax_nt.shape[1]): + lab = ch[i] if i in worst_ch else None + sns.kdeplot(minmax_t[:, i], ax=ax[0], label=lab, clip=(0, 300)) + sns.kdeplot(minmax_nt[:, i], ax=ax[1], label=lab, clip=(0, 300)) + ax[0].set_xlim(0, 200) + ax[0].set_title("Target minmax") + ax[1].set_title("NonTarget minmax") + ax[1].set_xlabel("Minmax in $\\mu$V") + ax[1].legend(title="Worst channels") + fig5.suptitle(suptitle) + fig5.tight_layout() + fig5.savefig(path / f"minmax.{plot_format}", dpi=plot_opts["dpi"]) + + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + fig6 = epo.plot_psd(0, 20, bandwidth=1) + fig6.suptitle(suptitle) + fig6.tight_layout() + fig6.savefig(path / f"spectrum.{plot_format}", dpi=plot_opts["dpi"]) + + plt.close("all") + + +def epo_summary(epos): + summary = dict() + summary["mne_string"] = repr(epos) + summary["n_channels"] = len(epos.ch_names) + summary["n_target"] = len(epos["Target"]) + summary["n_nontarget"] = len(epos["NonTarget"]) + info_str = ( + f"Ch:{len(epos.ch_names)},T:{len(epos['Target'])},NT:{len(epos['NonTarget'])}" + ) + return summary, info_str + + +FIGURES_PATH = Path("/home/jan/bci_data/figures/moabb_erps") + +# Changing this to False re-generates all plots even if they exist. Use with caution. +cache_plots = True + +plot_format = "png" +baseline = None +highpass = 0.5 +lowpass = 16 +sampling_rate = 100 + +paradigm = P300( + resample=sampling_rate, + fmin=highpass, + fmax=lowpass, + baseline=baseline, +) + +ival = [-0.3, 1] + +plot_opts = { + "dpi": 120, + "topo": { + "timepoints": 10, + "tmin": 0, + "tmax": 0.6, + }, +} + +plt.ioff() +# dsets = P300_DSETS +dsets = paradigm.datasets +for dset in dsets: + dset.interval = ival + dset_name = dset.__class__.__name__ + + print(f"Processing dataset: {dset_name}") + + data_path = FIGURES_PATH / dset_name # Path of the dataset folder + Path(data_path).mkdir(parents=True, exist_ok=True) + all_subjects_cached = True + for subject in dset.subject_list: + subject_path = data_path / f"subject_{subject}" + if cache_plots and subject_path.exists(): + continue + all_subjects_cached = False + print(f" Processing subject: {subject}") + + subject_path.mkdir(parents=True, exist_ok=True) + try: + epos, labels, meta = paradigm.get_data(dset, [subject], return_epochs=True) + except Exception: # catch all, dont stop processing pls + print(f"Failed to get data for {dset_name}-{subject}") + (subject_path / "processing_error").touch() + continue + + description = f"Dset: {dset_name}, Sub: {subject}, Ses: all" + + create_plot_overview( + epos, + plot_opts=plot_opts, + path=subject_path, + description=description, + ) + + if len(meta["session"].unique()) > 1: + for session in meta["session"].unique(): + session_path = subject_path / f"session_{session}" + session_path.mkdir(parents=True, exist_ok=True) + ix = meta.session == session + description = f"Dset: {dset_name}, Sub: {subject}, Ses: {session}" + create_plot_overview( + epos[ix], + plot_opts=plot_opts, + path=session_path, + description=description, + ) + + if all_subjects_cached: + print(" No plots necessary, every subject has output folder.") + +print("All datasets processed.") From c9ea033da10c159da38dead21d76f9ced002623f Mon Sep 17 00:00:00 2001 From: Sylvain Chevallier Date: Tue, 22 Feb 2022 11:44:45 +0100 Subject: [PATCH 12/19] update to v0.4.5 (#269) * update to v0.4.5 * update poetry and requirements --- docs/source/whats_new.rst | 32 +++- moabb/__init__.py | 2 +- moabb/datasets/huebner_llp.py | 4 + moabb/datasets/sosulski2019.py | 5 +- poetry.lock | 238 +++++++++++------------ pyproject.toml | 2 +- requirements.txt | 294 ++++++++++++++++------------- scripts/data_visualization_p300.py | 2 + 8 files changed, 307 insertions(+), 272 deletions(-) diff --git a/docs/source/whats_new.rst b/docs/source/whats_new.rst index 55fe9f2f9..ad255dca1 100644 --- a/docs/source/whats_new.rst +++ b/docs/source/whats_new.rst @@ -18,16 +18,12 @@ Develop branch Enhancements ~~~~~~~~~~~~ -- Progress bars, pooch, tqdm (:gh:`258` by `Divyesh Narayanan`_ and `Sylvain Chevallier`_) -- Adding Test and Example for set_download_dir (:gh:`249` by `Divyesh Narayanan`_) +- None Bugs ~~~~ -- Removing dependency on mne method for PhysionetMI data downloading, renaming runs (:gh:`257` by `Divyesh Narayanan`_) -- Correcting events management in Schirrmeister2017, renaming session and run (:gh:`255` by `Pierre Guetschel`_ and `Sylvain Chevallier`_) -- Switch session and runs in MAMEM1, 2 and 3 to avoid error in WithinSessionEvaluation (:gh:`256` by `Sylvain Chevallier`_) -- Correct doctstrings for the documentation, incuding Lee2017 (:gh:`256` by `Sylvain Chevallier`_) +- None API changes ~~~~~~~~~~~ @@ -35,7 +31,29 @@ API changes - None -Version - 0.4.4 (Stable - PyPi) +Version - 0.4.5 (Stable - PyPi) +--------------------------------- + +Enhancements +~~~~~~~~~~~~ + +- Progress bars, pooch, tqdm (:gh:`258` by `Divyesh Narayanan`_ and `Sylvain Chevallier`_) +- Adding test and example for set_download_dir (:gh:`249` by `Divyesh Narayanan`_) +- Update to newer version of Schirrmeister2017 dataset (:gh:`265` by `Robin Schirrmeister`_) +- Adding Huebner2017 and Huebner2018 P300 datasets (:gh:`260` by `Jan Sosulski`_) +- Adding Sosulski2019 auditory P300 datasets (:gh:`266` by `Jan Sosulski`_) +- New script to visualize ERP on all datasets, as a sanity check (:gh:`261` by `Jan Sosulski`_) + +Bugs +~~~~ + +- Removing dependency on mne method for PhysionetMI data downloading, renaming runs (:gh:`257` by `Divyesh Narayanan`_) +- Correcting events management in Schirrmeister2017, renaming session and run (:gh:`255` by `Pierre Guetschel`_ and `Sylvain Chevallier`_) +- Switch session and runs in MAMEM1, 2 and 3 to avoid error in WithinSessionEvaluation (:gh:`256` by `Sylvain Chevallier`_) +- Correct doctstrings for the documentation, incuding Lee2017 (:gh:`256` by `Sylvain Chevallier`_) + + +Version - 0.4.4 --------------- Enhancements diff --git a/moabb/__init__.py b/moabb/__init__.py index a1cd23b5f..d1ad9a8de 100644 --- a/moabb/__init__.py +++ b/moabb/__init__.py @@ -1,4 +1,4 @@ # flake8: noqa -__version__ = "0.4.4" +__version__ = "0.4.5" from moabb.utils import set_log_level diff --git a/moabb/datasets/huebner_llp.py b/moabb/datasets/huebner_llp.py index 6bb33671b..a620187be 100644 --- a/moabb/datasets/huebner_llp.py +++ b/moabb/datasets/huebner_llp.py @@ -156,6 +156,8 @@ class Huebner2017(_BaseVisualMatrixSpellerDataset): Learning from label proportions in brain-computer interfaces: Online unsupervised learning with guarantees. PLOS ONE 12(4): e0175856. https://doi.org/10.1371/journal.pone.0175856 + + .. versionadded:: 0.4.5 """ def __init__(self, interval=None, raw_slice_offset=None, use_blocks_as_sessions=True): @@ -202,6 +204,8 @@ class Huebner2018(_BaseVisualMatrixSpellerDataset): Unsupervised learning for brain-computer interfaces based on event-related potentials: Review and online comparison [research frontier]. IEEE Computational Intelligence Magazine, 13(2), 66-77. https://doi.org/10.1109/MCI.2018.2807039 + + .. versionadded:: 0.4.5 """ def __init__(self, interval=None, raw_slice_offset=None, use_blocks_as_sessions=True): diff --git a/moabb/datasets/sosulski2019.py b/moabb/datasets/sosulski2019.py index 24fceba4a..1e41ad9c6 100644 --- a/moabb/datasets/sosulski2019.py +++ b/moabb/datasets/sosulski2019.py @@ -60,8 +60,9 @@ class Sosulski2019(BaseDataset): auditory oddball paradigm under different stimulus onset asynchrony conditions. Dataset. DOI: 10.6094/UNIFR/154576 .. [2] Sosulski, J., Tangermann, M.: Spatial filters for auditory evoked potentials transfer between different experimental conditions. Graz BCI Conference. 2019. - .. [3] Sosulski, J., Hübner, D., Klein, A., Tangermann, M.: Online Optimization of Stimulation Speed in an Auditory Brain-Computer - Interface under Time Constraints. arXiv preprint. 2021. + .. [3] Sosulski, J., Hübner, D., Klein, A., Tangermann, M.: Online Optimization of Stimulation Speed in an Auditory Brain-Computer Interface under Time Constraints. arXiv preprint. 2021. + + .. versionadded:: 0.4.5 """ def __init__( diff --git a/poetry.lock b/poetry.lock index 098a7c9b7..c6e0e1cff 100644 --- a/poetry.lock +++ b/poetry.lock @@ -51,7 +51,7 @@ python-versions = ">=3.6.1" [[package]] name = "charset-normalizer" -version = "2.0.11" +version = "2.0.12" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." category = "main" optional = false @@ -105,7 +105,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" [[package]] name = "filelock" -version = "3.4.2" +version = "3.6.0" description = "A platform independent file lock." category = "dev" optional = false @@ -150,7 +150,7 @@ numpy = ">=1.14.5" [[package]] name = "identify" -version = "2.4.7" +version = "2.4.10" description = "File identification library for Python" category = "dev" optional = false @@ -177,7 +177,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [[package]] name = "importlib-metadata" -version = "4.10.1" +version = "4.11.1" description = "Read metadata from Python packages" category = "dev" optional = false @@ -190,7 +190,7 @@ zipp = ">=0.5" [package.extras] docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] perf = ["ipython"] -testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy", "importlib-resources (>=1.3)"] +testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)", "importlib-resources (>=1.3)"] [[package]] name = "jinja2" @@ -236,11 +236,11 @@ mistune = "*" [[package]] name = "markupsafe" -version = "2.0.1" +version = "2.1.0" description = "Safely add untrusted strings to HTML/XML markup." category = "dev" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" [[package]] name = "matplotlib" @@ -345,7 +345,7 @@ test = ["pytest (>=4.0.2)", "pytest-xdist", "hypothesis (>=3.58)"] [[package]] name = "pillow" -version = "9.0.0" +version = "9.0.1" description = "Python Imaging Library (Fork)" category = "main" optional = false @@ -353,7 +353,7 @@ python-versions = ">=3.7" [[package]] name = "platformdirs" -version = "2.4.1" +version = "2.5.1" description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." category = "dev" optional = false @@ -587,7 +587,7 @@ test = ["pytest", "pytest-cov", "html5lib", "cython", "typed-ast"] [[package]] name = "sphinx-bootstrap-theme" -version = "0.8.0" +version = "0.8.1" description = "Sphinx Bootstrap Theme." category = "dev" optional = false @@ -708,7 +708,7 @@ python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" [[package]] name = "tomli" -version = "2.0.0" +version = "2.0.1" description = "A lil' TOML parser" category = "main" optional = false @@ -732,7 +732,7 @@ telegram = ["requests"] [[package]] name = "typing-extensions" -version = "4.0.1" +version = "4.1.1" description = "Backported and Experimental Type Hints for Python 3.6+" category = "dev" optional = false @@ -753,7 +753,7 @@ socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] [[package]] name = "virtualenv" -version = "20.13.0" +version = "20.13.1" description = "Virtual Python Environment builder" category = "dev" optional = false @@ -813,8 +813,8 @@ cfgv = [ {file = "cfgv-3.3.1.tar.gz", hash = "sha256:f5a830efb9ce7a445376bb66ec94c638a9787422f96264c98edc6bdeed8ab736"}, ] charset-normalizer = [ - {file = "charset-normalizer-2.0.11.tar.gz", hash = "sha256:98398a9d69ee80548c762ba991a4728bfc3836768ed226b3945908d1a688371c"}, - {file = "charset_normalizer-2.0.11-py3-none-any.whl", hash = "sha256:2842d8f5e82a1f6aa437380934d5e1cd4fcf2003b06fed6940769c164a480a45"}, + {file = "charset-normalizer-2.0.12.tar.gz", hash = "sha256:2857e29ff0d34db842cd7ca3230549d1a697f96ee6d3fb071cfa6c7393832597"}, + {file = "charset_normalizer-2.0.12-py3-none-any.whl", hash = "sha256:6881edbebdb17b39b4eaaa821b438bf6eddffb4468cf344f09f89def34a8b1df"}, ] colorama = [ {file = "colorama-0.4.4-py2.py3-none-any.whl", hash = "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2"}, @@ -887,8 +887,8 @@ docutils = [ {file = "docutils-0.16.tar.gz", hash = "sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc"}, ] filelock = [ - {file = "filelock-3.4.2-py3-none-any.whl", hash = "sha256:cf0fc6a2f8d26bd900f19bf33915ca70ba4dd8c56903eeb14e1e7a2fd7590146"}, - {file = "filelock-3.4.2.tar.gz", hash = "sha256:38b4f4c989f9d06d44524df1b24bd19e167d851f19b50bf3e3559952dddc5b80"}, + {file = "filelock-3.6.0-py3-none-any.whl", hash = "sha256:f8314284bfffbdcfa0ff3d7992b023d4c628ced6feb957351d4c48d059f56bc0"}, + {file = "filelock-3.6.0.tar.gz", hash = "sha256:9cd540a9352e432c7246a48fe4e8712b10acb1df2ad1f30e8c070b82ae1fed85"}, ] fonttools = [ {file = "fonttools-4.29.1-py3-none-any.whl", hash = "sha256:1933415e0fbdf068815cb1baaa1f159e17830215f7e8624e5731122761627557"}, @@ -913,8 +913,8 @@ h5py = [ {file = "h5py-3.6.0.tar.gz", hash = "sha256:8752d2814a92aba4e2b2a5922d2782d0029102d99caaf3c201a566bc0b40db29"}, ] identify = [ - {file = "identify-2.4.7-py2.py3-none-any.whl", hash = "sha256:e64210654dfbca6ced33230eb1b137591a0981425e1a60b4c6c36309f787bbd5"}, - {file = "identify-2.4.7.tar.gz", hash = "sha256:8408f01e0be25492017346d7dffe7e7711b762b23375c775d24d3bc38618fabc"}, + {file = "identify-2.4.10-py2.py3-none-any.whl", hash = "sha256:7d10baf6ba6f1912a0a49f4c1c2c49fa1718765c3a37d72d13b07779567c5b85"}, + {file = "identify-2.4.10.tar.gz", hash = "sha256:e12b2aea3cf108de73ae055c2260783bde6601de09718f6768cf8e9f6f6322a6"}, ] idna = [ {file = "idna-3.3-py3-none-any.whl", hash = "sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff"}, @@ -925,8 +925,8 @@ imagesize = [ {file = "imagesize-1.3.0.tar.gz", hash = "sha256:cd1750d452385ca327479d45b64d9c7729ecf0b3969a58148298c77092261f9d"}, ] importlib-metadata = [ - {file = "importlib_metadata-4.10.1-py3-none-any.whl", hash = "sha256:899e2a40a8c4a1aec681feef45733de8a6c58f3f6a0dbed2eb6574b4387a77b6"}, - {file = "importlib_metadata-4.10.1.tar.gz", hash = "sha256:951f0d8a5b7260e9db5e41d429285b5f451e928479f19d80818878527d36e95e"}, + {file = "importlib_metadata-4.11.1-py3-none-any.whl", hash = "sha256:e0bc84ff355328a4adfc5240c4f211e0ab386f80aa640d1b11f0618a1d282094"}, + {file = "importlib_metadata-4.11.1.tar.gz", hash = "sha256:175f4ee440a0317f6e8d81b7f8d4869f93316170a65ad2b007d2929186c8052c"}, ] jinja2 = [ {file = "Jinja2-3.0.3-py3-none-any.whl", hash = "sha256:077ce6014f7b40d03b47d1f1ca4b0fc8328a692bd284016f806ed0eaca390ad8"}, @@ -987,75 +987,46 @@ m2r2 = [ {file = "m2r2-0.2.8.tar.gz", hash = "sha256:ca39e1db74991818d667c7367e4fc2de13ecefd2a04d69d83b0ffa76d20d7e29"}, ] markupsafe = [ - {file = "MarkupSafe-2.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d8446c54dc28c01e5a2dbac5a25f071f6653e6e40f3a8818e8b45d790fe6ef53"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:36bc903cbb393720fad60fc28c10de6acf10dc6cc883f3e24ee4012371399a38"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d7d807855b419fc2ed3e631034685db6079889a1f01d5d9dac950f764da3dad"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:add36cb2dbb8b736611303cd3bfcee00afd96471b09cda130da3581cbdc56a6d"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:168cd0a3642de83558a5153c8bd34f175a9a6e7f6dc6384b9655d2697312a646"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4dc8f9fb58f7364b63fd9f85013b780ef83c11857ae79f2feda41e270468dd9b"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:20dca64a3ef2d6e4d5d615a3fd418ad3bde77a47ec8a23d984a12b5b4c74491a"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cdfba22ea2f0029c9261a4bd07e830a8da012291fbe44dc794e488b6c9bb353a"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-win32.whl", hash = "sha256:99df47edb6bda1249d3e80fdabb1dab8c08ef3975f69aed437cb69d0a5de1e28"}, - {file = "MarkupSafe-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:e0f138900af21926a02425cf736db95be9f4af72ba1bb21453432a07f6082134"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f9081981fe268bd86831e5c75f7de206ef275defcb82bc70740ae6dc507aee51"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:0955295dd5eec6cb6cc2fe1698f4c6d84af2e92de33fbcac4111913cd100a6ff"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:0446679737af14f45767963a1a9ef7620189912317d095f2d9ffa183a4d25d2b"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:f826e31d18b516f653fe296d967d700fddad5901ae07c622bb3705955e1faa94"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:fa130dd50c57d53368c9d59395cb5526eda596d3ffe36666cd81a44d56e48872"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:905fec760bd2fa1388bb5b489ee8ee5f7291d692638ea5f67982d968366bef9f"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf5d821ffabf0ef3533c39c518f3357b171a1651c1ff6827325e4489b0e46c3c"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0d4b31cc67ab36e3392bbf3862cfbadac3db12bdd8b02a2731f509ed5b829724"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:baa1a4e8f868845af802979fcdbf0bb11f94f1cb7ced4c4b8a351bb60d108145"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:deb993cacb280823246a026e3b2d81c493c53de6acfd5e6bfe31ab3402bb37dd"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:63f3268ba69ace99cab4e3e3b5840b03340efed0948ab8f78d2fd87ee5442a4f"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:8d206346619592c6200148b01a2142798c989edcb9c896f9ac9722a99d4e77e6"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-win32.whl", hash = "sha256:6c4ca60fa24e85fe25b912b01e62cb969d69a23a5d5867682dd3e80b5b02581d"}, - {file = "MarkupSafe-2.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b2f4bf27480f5e5e8ce285a8c8fd176c0b03e93dcc6646477d4630e83440c6a9"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0717a7390a68be14b8c793ba258e075c6f4ca819f15edfc2a3a027c823718567"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:6557b31b5e2c9ddf0de32a691f2312a32f77cd7681d8af66c2692efdbef84c18"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:49e3ceeabbfb9d66c3aef5af3a60cc43b85c33df25ce03d0031a608b0a8b2e3f"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:d7f9850398e85aba693bb640262d3611788b1f29a79f0c93c565694658f4071f"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:6a7fae0dd14cf60ad5ff42baa2e95727c3d81ded453457771d02b7d2b3f9c0c2"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:b7f2d075102dc8c794cbde1947378051c4e5180d52d276987b8d28a3bd58c17d"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e9936f0b261d4df76ad22f8fee3ae83b60d7c3e871292cd42f40b81b70afae85"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:2a7d351cbd8cfeb19ca00de495e224dea7e7d919659c2841bbb7f420ad03e2d6"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:60bf42e36abfaf9aff1f50f52644b336d4f0a3fd6d8a60ca0d054ac9f713a864"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d6c7ebd4e944c85e2c3421e612a7057a2f48d478d79e61800d81468a8d842207"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f0567c4dc99f264f49fe27da5f735f414c4e7e7dd850cfd8e69f0862d7c74ea9"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:89c687013cb1cd489a0f0ac24febe8c7a666e6e221b783e53ac50ebf68e45d86"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-win32.whl", hash = "sha256:a30e67a65b53ea0a5e62fe23682cfe22712e01f453b95233b25502f7c61cb415"}, - {file = "MarkupSafe-2.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:611d1ad9a4288cf3e3c16014564df047fe08410e628f89805e475368bd304914"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5bb28c636d87e840583ee3adeb78172efc47c8b26127267f54a9c0ec251d41a9"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:be98f628055368795d818ebf93da628541e10b75b41c559fdf36d104c5787066"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:1d609f577dc6e1aa17d746f8bd3c31aa4d258f4070d61b2aa5c4166c1539de35"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:7d91275b0245b1da4d4cfa07e0faedd5b0812efc15b702576d103293e252af1b"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:01a9b8ea66f1658938f65b93a85ebe8bc016e6769611be228d797c9d998dd298"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:47ab1e7b91c098ab893b828deafa1203de86d0bc6ab587b160f78fe6c4011f75"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:97383d78eb34da7e1fa37dd273c20ad4320929af65d156e35a5e2d89566d9dfb"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fcf051089389abe060c9cd7caa212c707e58153afa2c649f00346ce6d260f1b"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5855f8438a7d1d458206a2466bf82b0f104a3724bf96a1c781ab731e4201731a"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3dd007d54ee88b46be476e293f48c85048603f5f516008bee124ddd891398ed6"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:aca6377c0cb8a8253e493c6b451565ac77e98c2951c45f913e0b52facdcff83f"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:04635854b943835a6ea959e948d19dcd311762c5c0c6e1f0e16ee57022669194"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6300b8454aa6930a24b9618fbb54b5a68135092bc666f7b06901f897fa5c2fee"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-win32.whl", hash = "sha256:023cb26ec21ece8dc3907c0e8320058b2e0cb3c55cf9564da612bc325bed5e64"}, - {file = "MarkupSafe-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:984d76483eb32f1bcb536dc27e4ad56bba4baa70be32fa87152832cdd9db0833"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2ef54abee730b502252bcdf31b10dacb0a416229b72c18b19e24a4509f273d26"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3c112550557578c26af18a1ccc9e090bfe03832ae994343cfdacd287db6a6ae7"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux1_i686.whl", hash = "sha256:53edb4da6925ad13c07b6d26c2a852bd81e364f95301c66e930ab2aef5b5ddd8"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:f5653a225f31e113b152e56f154ccbe59eeb1c7487b39b9d9f9cdb58e6c79dc5"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:4efca8f86c54b22348a5467704e3fec767b2db12fc39c6d963168ab1d3fc9135"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:ab3ef638ace319fa26553db0624c4699e31a28bb2a835c5faca8f8acf6a5a902"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:f8ba0e8349a38d3001fae7eadded3f6606f0da5d748ee53cc1dab1d6527b9509"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c47adbc92fc1bb2b3274c4b3a43ae0e4573d9fbff4f54cd484555edbf030baf1"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:37205cac2a79194e3750b0af2a5720d95f786a55ce7df90c3af697bfa100eaac"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:1f2ade76b9903f39aa442b4aadd2177decb66525062db244b35d71d0ee8599b6"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4296f2b1ce8c86a6aea78613c34bb1a672ea0e3de9c6ba08a960efe0b0a09047"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f02365d4e99430a12647f09b6cc8bab61a6564363f313126f775eb4f6ef798e"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5b6d930f030f8ed98e3e6c98ffa0652bdb82601e7a016ec2ab5d7ff23baa78d1"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-win32.whl", hash = "sha256:10f82115e21dc0dfec9ab5c0223652f7197feb168c940f3ef61563fc2d6beb74"}, - {file = "MarkupSafe-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:693ce3f9e70a6cf7d2fb9e6c9d8b204b6b39897a2c4a1aa65728d5ac97dcc1d8"}, - {file = "MarkupSafe-2.0.1.tar.gz", hash = "sha256:594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a"}, + {file = "MarkupSafe-2.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3028252424c72b2602a323f70fbf50aa80a5d3aa616ea6add4ba21ae9cc9da4c"}, + {file = "MarkupSafe-2.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:290b02bab3c9e216da57c1d11d2ba73a9f73a614bbdcc027d299a60cdfabb11a"}, + {file = "MarkupSafe-2.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e104c0c2b4cd765b4e83909cde7ec61a1e313f8a75775897db321450e928cce"}, + {file = "MarkupSafe-2.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24c3be29abb6b34052fd26fc7a8e0a49b1ee9d282e3665e8ad09a0a68faee5b3"}, + {file = "MarkupSafe-2.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:204730fd5fe2fe3b1e9ccadb2bd18ba8712b111dcabce185af0b3b5285a7c989"}, + {file = "MarkupSafe-2.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d3b64c65328cb4cd252c94f83e66e3d7acf8891e60ebf588d7b493a55a1dbf26"}, + {file = "MarkupSafe-2.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:96de1932237abe0a13ba68b63e94113678c379dca45afa040a17b6e1ad7ed076"}, + {file = "MarkupSafe-2.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:75bb36f134883fdbe13d8e63b8675f5f12b80bb6627f7714c7d6c5becf22719f"}, + {file = "MarkupSafe-2.1.0-cp310-cp310-win32.whl", hash = "sha256:4056f752015dfa9828dce3140dbadd543b555afb3252507348c493def166d454"}, + {file = "MarkupSafe-2.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:d4e702eea4a2903441f2735799d217f4ac1b55f7d8ad96ab7d4e25417cb0827c"}, + {file = "MarkupSafe-2.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f0eddfcabd6936558ec020130f932d479930581171368fd728efcfb6ef0dd357"}, + {file = "MarkupSafe-2.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ddea4c352a488b5e1069069f2f501006b1a4362cb906bee9a193ef1245a7a61"}, + {file = "MarkupSafe-2.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:09c86c9643cceb1d87ca08cdc30160d1b7ab49a8a21564868921959bd16441b8"}, + {file = "MarkupSafe-2.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0a0abef2ca47b33fb615b491ce31b055ef2430de52c5b3fb19a4042dbc5cadb"}, + {file = "MarkupSafe-2.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:736895a020e31b428b3382a7887bfea96102c529530299f426bf2e636aacec9e"}, + {file = "MarkupSafe-2.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:679cbb78914ab212c49c67ba2c7396dc599a8479de51b9a87b174700abd9ea49"}, + {file = "MarkupSafe-2.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:84ad5e29bf8bab3ad70fd707d3c05524862bddc54dc040982b0dbcff36481de7"}, + {file = "MarkupSafe-2.1.0-cp37-cp37m-win32.whl", hash = "sha256:8da5924cb1f9064589767b0f3fc39d03e3d0fb5aa29e0cb21d43106519bd624a"}, + {file = "MarkupSafe-2.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:454ffc1cbb75227d15667c09f164a0099159da0c1f3d2636aa648f12675491ad"}, + {file = "MarkupSafe-2.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:142119fb14a1ef6d758912b25c4e803c3ff66920635c44078666fe7cc3f8f759"}, + {file = "MarkupSafe-2.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b2a5a856019d2833c56a3dcac1b80fe795c95f401818ea963594b345929dffa7"}, + {file = "MarkupSafe-2.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d1fb9b2eec3c9714dd936860850300b51dbaa37404209c8d4cb66547884b7ed"}, + {file = "MarkupSafe-2.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:62c0285e91414f5c8f621a17b69fc0088394ccdaa961ef469e833dbff64bd5ea"}, + {file = "MarkupSafe-2.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fc3150f85e2dbcf99e65238c842d1cfe69d3e7649b19864c1cc043213d9cd730"}, + {file = "MarkupSafe-2.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f02cf7221d5cd915d7fa58ab64f7ee6dd0f6cddbb48683debf5d04ae9b1c2cc1"}, + {file = "MarkupSafe-2.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d5653619b3eb5cbd35bfba3c12d575db2a74d15e0e1c08bf1db788069d410ce8"}, + {file = "MarkupSafe-2.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:7d2f5d97fcbd004c03df8d8fe2b973fe2b14e7bfeb2cfa012eaa8759ce9a762f"}, + {file = "MarkupSafe-2.1.0-cp38-cp38-win32.whl", hash = "sha256:3cace1837bc84e63b3fd2dfce37f08f8c18aeb81ef5cf6bb9b51f625cb4e6cd8"}, + {file = "MarkupSafe-2.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:fabbe18087c3d33c5824cb145ffca52eccd053061df1d79d4b66dafa5ad2a5ea"}, + {file = "MarkupSafe-2.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:023af8c54fe63530545f70dd2a2a7eed18d07a9a77b94e8bf1e2ff7f252db9a3"}, + {file = "MarkupSafe-2.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d66624f04de4af8bbf1c7f21cc06649c1c69a7f84109179add573ce35e46d448"}, + {file = "MarkupSafe-2.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c532d5ab79be0199fa2658e24a02fce8542df196e60665dd322409a03db6a52c"}, + {file = "MarkupSafe-2.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e67ec74fada3841b8c5f4c4f197bea916025cb9aa3fe5abf7d52b655d042f956"}, + {file = "MarkupSafe-2.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30c653fde75a6e5eb814d2a0a89378f83d1d3f502ab710904ee585c38888816c"}, + {file = "MarkupSafe-2.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:961eb86e5be7d0973789f30ebcf6caab60b844203f4396ece27310295a6082c7"}, + {file = "MarkupSafe-2.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:598b65d74615c021423bd45c2bc5e9b59539c875a9bdb7e5f2a6b92dfcfc268d"}, + {file = "MarkupSafe-2.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:599941da468f2cf22bf90a84f6e2a65524e87be2fce844f96f2dd9a6c9d1e635"}, + {file = "MarkupSafe-2.1.0-cp39-cp39-win32.whl", hash = "sha256:e6f7f3f41faffaea6596da86ecc2389672fa949bd035251eab26dc6697451d05"}, + {file = "MarkupSafe-2.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:b8811d48078d1cf2a6863dafb896e68406c5f513048451cd2ded0473133473c7"}, + {file = "MarkupSafe-2.1.0.tar.gz", hash = "sha256:80beaf63ddfbc64a0452b841d8036ca0611e049650e20afcb882f5d3c266d65f"}, ] matplotlib = [ {file = "matplotlib-3.5.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:456cc8334f6d1124e8ff856b42d2cc1c84335375a16448189999496549f7182b"}, @@ -1171,42 +1142,45 @@ pandas = [ {file = "pandas-1.1.5.tar.gz", hash = "sha256:f10fc41ee3c75a474d3bdf68d396f10782d013d7f67db99c0efbfd0acb99701b"}, ] pillow = [ - {file = "Pillow-9.0.0-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:113723312215b25c22df1fdf0e2da7a3b9c357a7d24a93ebbe80bfda4f37a8d4"}, - {file = "Pillow-9.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bb47a548cea95b86494a26c89d153fd31122ed65255db5dcbc421a2d28eb3379"}, - {file = "Pillow-9.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31b265496e603985fad54d52d11970383e317d11e18e856971bdbb86af7242a4"}, - {file = "Pillow-9.0.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d154ed971a4cc04b93a6d5b47f37948d1f621f25de3e8fa0c26b2d44f24e3e8f"}, - {file = "Pillow-9.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80fe92813d208ce8aa7d76da878bdc84b90809f79ccbad2a288e9bcbeac1d9bd"}, - {file = "Pillow-9.0.0-cp310-cp310-win32.whl", hash = "sha256:d5dcea1387331c905405b09cdbfb34611050cc52c865d71f2362f354faee1e9f"}, - {file = "Pillow-9.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:52abae4c96b5da630a8b4247de5428f593465291e5b239f3f843a911a3cf0105"}, - {file = "Pillow-9.0.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:72c3110228944019e5f27232296c5923398496b28be42535e3b2dc7297b6e8b6"}, - {file = "Pillow-9.0.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97b6d21771da41497b81652d44191489296555b761684f82b7b544c49989110f"}, - {file = "Pillow-9.0.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:72f649d93d4cc4d8cf79c91ebc25137c358718ad75f99e99e043325ea7d56100"}, - {file = "Pillow-9.0.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7aaf07085c756f6cb1c692ee0d5a86c531703b6e8c9cae581b31b562c16b98ce"}, - {file = "Pillow-9.0.0-cp37-cp37m-win32.whl", hash = "sha256:03b27b197deb4ee400ed57d8d4e572d2d8d80f825b6634daf6e2c18c3c6ccfa6"}, - {file = "Pillow-9.0.0-cp37-cp37m-win_amd64.whl", hash = "sha256:a09a9d4ec2b7887f7a088bbaacfd5c07160e746e3d47ec5e8050ae3b2a229e9f"}, - {file = "Pillow-9.0.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:490e52e99224858f154975db61c060686df8a6b3f0212a678e5d2e2ce24675c9"}, - {file = "Pillow-9.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:500d397ddf4bbf2ca42e198399ac13e7841956c72645513e8ddf243b31ad2128"}, - {file = "Pillow-9.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ebd8b9137630a7bbbff8c4b31e774ff05bbb90f7911d93ea2c9371e41039b52"}, - {file = "Pillow-9.0.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd0e5062f11cb3e730450a7d9f323f4051b532781026395c4323b8ad055523c4"}, - {file = "Pillow-9.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f3b4522148586d35e78313db4db0df4b759ddd7649ef70002b6c3767d0fdeb7"}, - {file = "Pillow-9.0.0-cp38-cp38-win32.whl", hash = "sha256:0b281fcadbb688607ea6ece7649c5d59d4bbd574e90db6cd030e9e85bde9fecc"}, - {file = "Pillow-9.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:b5050d681bcf5c9f2570b93bee5d3ec8ae4cf23158812f91ed57f7126df91762"}, - {file = "Pillow-9.0.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:c2067b3bb0781f14059b112c9da5a91c80a600a97915b4f48b37f197895dd925"}, - {file = "Pillow-9.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2d16b6196fb7a54aff6b5e3ecd00f7c0bab1b56eee39214b2b223a9d938c50af"}, - {file = "Pillow-9.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98cb63ca63cb61f594511c06218ab4394bf80388b3d66cd61d0b1f63ee0ea69f"}, - {file = "Pillow-9.0.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bc462d24500ba707e9cbdef436c16e5c8cbf29908278af053008d9f689f56dee"}, - {file = "Pillow-9.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3586e12d874ce2f1bc875a3ffba98732ebb12e18fb6d97be482bd62b56803281"}, - {file = "Pillow-9.0.0-cp39-cp39-win32.whl", hash = "sha256:68e06f8b2248f6dc8b899c3e7ecf02c9f413aab622f4d6190df53a78b93d97a5"}, - {file = "Pillow-9.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:6579f9ba84a3d4f1807c4aab4be06f373017fc65fff43498885ac50a9b47a553"}, - {file = "Pillow-9.0.0-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:47f5cf60bcb9fbc46011f75c9b45a8b5ad077ca352a78185bd3e7f1d294b98bb"}, - {file = "Pillow-9.0.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2fd8053e1f8ff1844419842fd474fc359676b2e2a2b66b11cc59f4fa0a301315"}, - {file = "Pillow-9.0.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c5439bfb35a89cac50e81c751317faea647b9a3ec11c039900cd6915831064d"}, - {file = "Pillow-9.0.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:95545137fc56ce8c10de646074d242001a112a92de169986abd8c88c27566a05"}, - {file = "Pillow-9.0.0.tar.gz", hash = "sha256:ee6e2963e92762923956fe5d3479b1fdc3b76c83f290aad131a2f98c3df0593e"}, + {file = "Pillow-9.0.1-1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a5d24e1d674dd9d72c66ad3ea9131322819ff86250b30dc5821cbafcfa0b96b4"}, + {file = "Pillow-9.0.1-1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2632d0f846b7c7600edf53c48f8f9f1e13e62f66a6dbc15191029d950bfed976"}, + {file = "Pillow-9.0.1-1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b9618823bd237c0d2575283f2939655f54d51b4527ec3972907a927acbcc5bfc"}, + {file = "Pillow-9.0.1-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:9bfdb82cdfeccec50aad441afc332faf8606dfa5e8efd18a6692b5d6e79f00fd"}, + {file = "Pillow-9.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5100b45a4638e3c00e4d2320d3193bdabb2d75e79793af7c3eb139e4f569f16f"}, + {file = "Pillow-9.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:528a2a692c65dd5cafc130de286030af251d2ee0483a5bf50c9348aefe834e8a"}, + {file = "Pillow-9.0.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f29d831e2151e0b7b39981756d201f7108d3d215896212ffe2e992d06bfe049"}, + {file = "Pillow-9.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:855c583f268edde09474b081e3ddcd5cf3b20c12f26e0d434e1386cc5d318e7a"}, + {file = "Pillow-9.0.1-cp310-cp310-win32.whl", hash = "sha256:d9d7942b624b04b895cb95af03a23407f17646815495ce4547f0e60e0b06f58e"}, + {file = "Pillow-9.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:81c4b81611e3a3cb30e59b0cf05b888c675f97e3adb2c8672c3154047980726b"}, + {file = "Pillow-9.0.1-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:413ce0bbf9fc6278b2d63309dfeefe452835e1c78398efb431bab0672fe9274e"}, + {file = "Pillow-9.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80fe64a6deb6fcfdf7b8386f2cf216d329be6f2781f7d90304351811fb591360"}, + {file = "Pillow-9.0.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cef9c85ccbe9bee00909758936ea841ef12035296c748aaceee535969e27d31b"}, + {file = "Pillow-9.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d19397351f73a88904ad1aee421e800fe4bbcd1aeee6435fb62d0a05ccd1030"}, + {file = "Pillow-9.0.1-cp37-cp37m-win32.whl", hash = "sha256:d21237d0cd37acded35154e29aec853e945950321dd2ffd1a7d86fe686814669"}, + {file = "Pillow-9.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:ede5af4a2702444a832a800b8eb7f0a7a1c0eed55b644642e049c98d589e5092"}, + {file = "Pillow-9.0.1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:b5b3f092fe345c03bca1e0b687dfbb39364b21ebb8ba90e3fa707374b7915204"}, + {file = "Pillow-9.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:335ace1a22325395c4ea88e00ba3dc89ca029bd66bd5a3c382d53e44f0ccd77e"}, + {file = "Pillow-9.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:db6d9fac65bd08cea7f3540b899977c6dee9edad959fa4eaf305940d9cbd861c"}, + {file = "Pillow-9.0.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f154d173286a5d1863637a7dcd8c3437bb557520b01bddb0be0258dcb72696b5"}, + {file = "Pillow-9.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14d4b1341ac07ae07eb2cc682f459bec932a380c3b122f5540432d8977e64eae"}, + {file = "Pillow-9.0.1-cp38-cp38-win32.whl", hash = "sha256:effb7749713d5317478bb3acb3f81d9d7c7f86726d41c1facca068a04cf5bb4c"}, + {file = "Pillow-9.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:7f7609a718b177bf171ac93cea9fd2ddc0e03e84d8fa4e887bdfc39671d46b00"}, + {file = "Pillow-9.0.1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:80ca33961ced9c63358056bd08403ff866512038883e74f3a4bf88ad3eb66838"}, + {file = "Pillow-9.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1c3c33ac69cf059bbb9d1a71eeaba76781b450bc307e2291f8a4764d779a6b28"}, + {file = "Pillow-9.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:12875d118f21cf35604176872447cdb57b07126750a33748bac15e77f90f1f9c"}, + {file = "Pillow-9.0.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:514ceac913076feefbeaf89771fd6febde78b0c4c1b23aaeab082c41c694e81b"}, + {file = "Pillow-9.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3c5c79ab7dfce6d88f1ba639b77e77a17ea33a01b07b99840d6ed08031cb2a7"}, + {file = "Pillow-9.0.1-cp39-cp39-win32.whl", hash = "sha256:718856856ba31f14f13ba885ff13874be7fefc53984d2832458f12c38205f7f7"}, + {file = "Pillow-9.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:f25ed6e28ddf50de7e7ea99d7a976d6a9c415f03adcaac9c41ff6ff41b6d86ac"}, + {file = "Pillow-9.0.1-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:011233e0c42a4a7836498e98c1acf5e744c96a67dd5032a6f666cc1fb97eab97"}, + {file = "Pillow-9.0.1-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:253e8a302a96df6927310a9d44e6103055e8fb96a6822f8b7f514bb7ef77de56"}, + {file = "Pillow-9.0.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6295f6763749b89c994fcb6d8a7f7ce03c3992e695f89f00b741b4580b199b7e"}, + {file = "Pillow-9.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:a9f44cd7e162ac6191491d7249cceb02b8116b0f7e847ee33f739d7cb1ea1f70"}, + {file = "Pillow-9.0.1.tar.gz", hash = "sha256:6c8bc8238a7dfdaf7a75f5ec5a663f4173f8c367e5a39f87e720495e1eed75fa"}, ] platformdirs = [ - {file = "platformdirs-2.4.1-py3-none-any.whl", hash = "sha256:1d7385c7db91728b83efd0ca99a5afb296cab9d0ed8313a45ed8ba17967ecfca"}, - {file = "platformdirs-2.4.1.tar.gz", hash = "sha256:440633ddfebcc36264232365d7840a970e75e1018d15b4327d11f91909045fda"}, + {file = "platformdirs-2.5.1-py3-none-any.whl", hash = "sha256:bcae7cab893c2d310a711b70b24efb93334febe65f8de776ee320b517471e227"}, + {file = "platformdirs-2.5.1.tar.gz", hash = "sha256:7535e70dfa32e84d4b34996ea99c5e432fa29a708d0f4e394bbcb2a8faa4f16d"}, ] pooch = [ {file = "pooch-1.6.0-py3-none-any.whl", hash = "sha256:3bf0e20027096836b8dbce0152dbb785a269abeb621618eb4bdd275ff1e23c9c"}, @@ -1346,8 +1320,8 @@ sphinx = [ {file = "Sphinx-3.5.4.tar.gz", hash = "sha256:19010b7b9fa0dc7756a6e105b2aacd3a80f798af3c25c273be64d7beeb482cb1"}, ] sphinx-bootstrap-theme = [ - {file = "sphinx-bootstrap-theme-0.8.0.tar.gz", hash = "sha256:038ee7e89478e064b5dd7e614de6f3f4cec81d9f9efbebb06e105693d6a50924"}, - {file = "sphinx_bootstrap_theme-0.8.0-py2.py3-none-any.whl", hash = "sha256:8b648023a0587f1695460670554ca3fb493e344313189b74a87b0ba27168ca47"}, + {file = "sphinx-bootstrap-theme-0.8.1.tar.gz", hash = "sha256:683e3b735448dadd0149f76edecf95ff4bd9157787e9e77e0d048ca6f1d680df"}, + {file = "sphinx_bootstrap_theme-0.8.1-py2.py3-none-any.whl", hash = "sha256:6ef36206c211846ea6cbdb45bc85645578e7c62d0a883361181708f8b6ea743b"}, ] sphinx-gallery = [ {file = "sphinx-gallery-0.8.2.tar.gz", hash = "sha256:74feba1af9b88cc1674bb1c927f56683ba4b1c78326c97106716ff360ede4462"}, @@ -1386,24 +1360,24 @@ toml = [ {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, ] tomli = [ - {file = "tomli-2.0.0-py3-none-any.whl", hash = "sha256:b5bde28da1fed24b9bd1d4d2b8cba62300bfb4ec9a6187a957e8ddb9434c5224"}, - {file = "tomli-2.0.0.tar.gz", hash = "sha256:c292c34f58502a1eb2bbb9f5bbc9a5ebc37bee10ffb8c2d6bbdfa8eb13cc14e1"}, + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, ] tqdm = [ {file = "tqdm-4.62.3-py2.py3-none-any.whl", hash = "sha256:8dd278a422499cd6b727e6ae4061c40b48fce8b76d1ccbf5d34fca9b7f925b0c"}, {file = "tqdm-4.62.3.tar.gz", hash = "sha256:d359de7217506c9851b7869f3708d8ee53ed70a1b8edbba4dbcb47442592920d"}, ] typing-extensions = [ - {file = "typing_extensions-4.0.1-py3-none-any.whl", hash = "sha256:7f001e5ac290a0c0401508864c7ec868be4e701886d5b573a9528ed3973d9d3b"}, - {file = "typing_extensions-4.0.1.tar.gz", hash = "sha256:4ca091dea149f945ec56afb48dae714f21e8692ef22a395223bcd328961b6a0e"}, + {file = "typing_extensions-4.1.1-py3-none-any.whl", hash = "sha256:21c85e0fe4b9a155d0799430b0ad741cdce7e359660ccbd8b530613e8df88ce2"}, + {file = "typing_extensions-4.1.1.tar.gz", hash = "sha256:1a9462dcc3347a79b1f1c0271fbe79e844580bb598bafa1ed208b94da3cdcd42"}, ] urllib3 = [ {file = "urllib3-1.26.8-py2.py3-none-any.whl", hash = "sha256:000ca7f471a233c2251c6c7023ee85305721bfdf18621ebff4fd17a8653427ed"}, {file = "urllib3-1.26.8.tar.gz", hash = "sha256:0e7c33d9a63e7ddfcb86780aac87befc2fbddf46c58dbb487e0855f7ceec283c"}, ] virtualenv = [ - {file = "virtualenv-20.13.0-py2.py3-none-any.whl", hash = "sha256:339f16c4a86b44240ba7223d0f93a7887c3ca04b5f9c8129da7958447d079b09"}, - {file = "virtualenv-20.13.0.tar.gz", hash = "sha256:d8458cf8d59d0ea495ad9b34c2599487f8a7772d796f9910858376d1600dd2dd"}, + {file = "virtualenv-20.13.1-py2.py3-none-any.whl", hash = "sha256:45e1d053cad4cd453181ae877c4ffc053546ae99e7dd049b9ff1d9be7491abf7"}, + {file = "virtualenv-20.13.1.tar.gz", hash = "sha256:e0621bcbf4160e4e1030f05065c8834b4e93f4fcc223255db2a823440aca9c14"}, ] zipp = [ {file = "zipp-3.7.0-py3-none-any.whl", hash = "sha256:b47250dd24f92b7dd6a0a8fc5244da14608f3ca90a5efcd37a3b1642fac9a375"}, diff --git a/pyproject.toml b/pyproject.toml index a21e45e83..e8f433c71 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "moabb" -version = "0.4.4" +version = "0.4.5" description = "Mother of All BCI Benchmarks" authors = ["Alexandre Barachant", "Vinay Jayaram"] maintainers = ["Sylvain Chevallier "] diff --git a/requirements.txt b/requirements.txt index 5de722f3b..a1ade61aa 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,9 +7,12 @@ cached-property==1.5.2; python_version < "3.8" and python_version >= "3.7" \ certifi==2021.10.8; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6" \ --hash=sha256:d62a0163eb4c2344ac042ab2bdf75399a71a2d8c7d47eac2e2ee91b9d6339569 \ --hash=sha256:78884e7c1d4b00ce3cea67b44566851c4343c120abd683433ce934a68ea58872 -charset-normalizer==2.0.7; python_full_version >= "3.6.0" and python_version >= "3.6" \ - --hash=sha256:e019de665e2bcf9c2b64e2e5aa025fa991da8720daa3c1138cadd2fd1856aed0 \ - --hash=sha256:f7af805c321bfa1ce6714c51f254e0d5bb5e5834039bc17db7ebe3a4cec9492b +charset-normalizer==2.0.12; python_full_version >= "3.6.0" and python_version >= "3.6" \ + --hash=sha256:2857e29ff0d34db842cd7ca3230549d1a697f96ee6d3fb071cfa6c7393832597 \ + --hash=sha256:6881edbebdb17b39b4eaaa821b438bf6eddffb4468cf344f09f89def34a8b1df +colorama==0.4.4; python_version >= "2.7" and python_full_version < "3.0.0" and platform_system == "Windows" or python_full_version >= "3.5.0" and platform_system == "Windows" \ + --hash=sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2 \ + --hash=sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b coverage==5.5; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.5.0" and python_version < "4") \ --hash=sha256:b6d534e4b2ab35c9f93f46229363e17f63c53ad01330df9f2d6bd1187e5eaacf \ --hash=sha256:b7895207b4c843c76a25ab8c1e866261bcfe27bfaa20c192de5190121770672b \ @@ -63,20 +66,29 @@ coverage==5.5; (python_version >= "2.7" and python_full_version < "3.0.0") or (p --hash=sha256:f030f8873312a16414c0d8e1a1ddff2d3235655a2174e3648b4fa66b3f2f1079 \ --hash=sha256:2a3859cb82dcbda1cfd3e6f71c27081d18aa251d20a17d87d26d4cd216fb0af4 \ --hash=sha256:ebe78fe9a0e874362175b02371bdfbee64d8edc42a044253ddf4ee7d3c15212c -cycler==0.10.0; python_version >= "3.7" \ - --hash=sha256:1d8a5ae1ff6c5cf9b93e8811e581232ad8920aeec647c37316ceac982b08cb2d \ - --hash=sha256:cd7b2d1018258d7247a71425e9f26463dfb444d411c39569972f4ce586b0c9d8 -h5py==3.4.0; python_version >= "3.7" \ - --hash=sha256:aa511bd05a9174c3008becdc93bd5785e254d34a6ab5f0425e6b2fbbc88afa6d \ - --hash=sha256:708ddff49af12c01d77e0f9782bb1a0364d96459ec0d1f85d90baea6d203764b \ - --hash=sha256:be2a545f09074546f73305e0db6d36aaf1fb6ea2fcf1add2ce306b9c7f78e55a \ - --hash=sha256:0b0f002f5f341afe7d3d7e15198e80d9021da24a4d182d88068d79bfc91fba86 \ - --hash=sha256:46917f20021dde02865572a5fd2bb620945f7b7cd268bdc8e3f5720c32b38140 \ - --hash=sha256:8e809149f95d9a3a33b1279bfbf894c78635a5497e8d5ac37420fa5ec0cf4f29 \ - --hash=sha256:8745e5159830d7975a9cf38690455f22601509cda04de29b7e88b3fbdc747611 \ - --hash=sha256:bb4ce46095e3b16c872aaf62adad33f40039fecae04674eb62c035386affcb91 \ - --hash=sha256:1edf33e722d47c6eb3878d51173b23dd848939f006f41b498bafceff87fb4cbd \ - --hash=sha256:ee1c683d91ab010d5e85cb61e8f9e7ee0d8eab545bf3dd50a9618f1d0e8f615e +cycler==0.11.0; python_version >= "3.7" \ + --hash=sha256:3a27e95f763a428a739d2add979fa7494c912a32c17c4c38c4d5f082cad165a3 \ + --hash=sha256:9c87405839a19696e837b3b818fed3f5f69f16f1eec1a1ad77e043dcea9c772f +fonttools==4.29.1; python_version >= "3.7" \ + --hash=sha256:1933415e0fbdf068815cb1baaa1f159e17830215f7e8624e5731122761627557 \ + --hash=sha256:2b18a172120e32128a80efee04cff487d5d140fe7d817deb648b2eee023a40e4 +h5py==3.6.0; python_version >= "3.7" \ + --hash=sha256:a5320837c60870911645e9a935099bdb2be6a786fcf0dac5c860f3b679e2de55 \ + --hash=sha256:98646e659bf8591a2177e12a4461dced2cad72da0ba4247643fd118db88880d2 \ + --hash=sha256:5996ff5adefd2d68c330a4265b6ef92e51b2fc674834a5990add5033bf109e20 \ + --hash=sha256:c9a5529343a619fea777b7caa27d493595b28b5af8b005e8d1817559fcccf493 \ + --hash=sha256:e2b49c48df05e19bb20b400b7ff7dc6f1ee36b84dc717c3771c468b33697b466 \ + --hash=sha256:cd9447633b0bafaf82190d9a8d56f3cb2e8d30169483aee67d800816e028190a \ + --hash=sha256:1c5acc660c458421e88c4c5fe092ce15923adfac4c732af1ac4fced683a5ea97 \ + --hash=sha256:35ab552c6f0a93365b3cb5664a5305f3920daa0a43deb5b2c547c52815ec46b9 \ + --hash=sha256:542781d50e1182b8fb619b1265dfe1c765e18215f818b0ab28b2983c28471325 \ + --hash=sha256:9f39242960b8d7f86f3056cc2546aa3047ff4835985f6483229af8f029e9c8db \ + --hash=sha256:8ecedf16c613973622a334701f67edcc0249469f9daa0576e994fb20ac0405db \ + --hash=sha256:d8cacad89aa7daf3626fce106f7f2662ac35b14849df22d252d0d8fab9dc1c0b \ + --hash=sha256:dbaa1ed9768bf9ff04af0919acc55746e62b28333644f0251f38768313f31745 \ + --hash=sha256:954c5c39a09b5302f69f752c3bbf165d368a65c8d200f7d5655e0fa6368a75e6 \ + --hash=sha256:9fd8a14236fdd092a20c0bdf25c3aba3777718d266fabb0fdded4fcf252d1630 \ + --hash=sha256:8752d2814a92aba4e2b2a5922d2782d0029102d99caaf3c201a566bc0b40db29 idna==3.3; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version >= "3.6" \ --hash=sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff \ --hash=sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d @@ -128,31 +140,45 @@ kiwisolver==1.3.2; python_version >= "3.7" \ --hash=sha256:25405f88a37c5f5bcba01c6e350086d65e7465fd1caaf986333d2a045045a223 \ --hash=sha256:bcadb05c3d4794eb9eee1dddf1c24215c92fb7b55a80beae7a60530a91060560 \ --hash=sha256:fc4453705b81d03568d5b808ad8f09c77c47534f6ac2e72e733f9ca4714aa75c -matplotlib==3.4.3; python_version >= "3.7" \ - --hash=sha256:5c988bb43414c7c2b0a31bd5187b4d27fd625c080371b463a6d422047df78913 \ - --hash=sha256:f1c5efc278d996af8a251b2ce0b07bbeccb821f25c8c9846bdcb00ffc7f158aa \ - --hash=sha256:eeb1859efe7754b1460e1d4991bbd4a60a56f366bc422ef3a9c5ae05f0bc70b5 \ - --hash=sha256:844a7b0233e4ff7fba57e90b8799edaa40b9e31e300b8d5efc350937fa8b1bea \ - --hash=sha256:85f0c9cf724715e75243a7b3087cf4a3de056b55e05d4d76cc58d610d62894f3 \ - --hash=sha256:c70b6311dda3e27672f1bf48851a0de816d1ca6aaf3d49365fbdd8e959b33d2b \ - --hash=sha256:b884715a59fec9ad3b6048ecf3860f3b2ce965e676ef52593d6fa29abcf7d330 \ - --hash=sha256:a78a3b51f29448c7f4d4575e561f6b0dbb8d01c13c2046ab6c5220eb25c06506 \ - --hash=sha256:6a724e3a48a54b8b6e7c4ae38cd3d07084508fa47c410c8757e9db9791421838 \ - --hash=sha256:48e1e0859b54d5f2e29bb78ca179fd59b971c6ceb29977fb52735bfd280eb0f5 \ - --hash=sha256:01c9de93a2ca0d128c9064f23709362e7fefb34910c7c9e0b8ab0de8258d5eda \ - --hash=sha256:ebfb01a65c3f5d53a8c2a8133fec2b5221281c053d944ae81ff5822a68266617 \ - --hash=sha256:b8b53f336a4688cfce615887505d7e41fd79b3594bf21dd300531a4f5b4f746a \ - --hash=sha256:fcd6f1954943c0c192bfbebbac263f839d7055409f1173f80d8b11a224d236da \ - --hash=sha256:6be8df61b1626e1a142c57e065405e869e9429b4a6dab4a324757d0dc4d42235 \ - --hash=sha256:41b6e307458988891fcdea2d8ecf84a8c92d53f84190aa32da65f9505546e684 \ - --hash=sha256:f72657f1596199dc1e4e7a10f52a4784ead8a711f4e5b59bea95bdb97cf0e4fd \ - --hash=sha256:f15edcb0629a0801738925fe27070480f446fcaa15de65946ff946ad99a59a40 \ - --hash=sha256:556965514b259204637c360d213de28d43a1f4aed1eca15596ce83f768c5a56f \ - --hash=sha256:54a026055d5f8614f184e588f6e29064019a0aa8448450214c0b60926d62d919 \ - --hash=sha256:fc4f526dfdb31c9bd6b8ca06bf9fab663ca12f3ec9cdf4496fb44bc680140318 -mne==0.23.4; python_version >= "3.6" \ - --hash=sha256:90f2c0e1182f42a3c5572ee023a94e347adf6cf53ebe619e8e1e4d09bb189ffa \ - --hash=sha256:ecace5caacf10961ebb74cc5e0ead4d4dbc55fed006eab1e644da144092354e9 +matplotlib==3.5.1; python_version >= "3.7" \ + --hash=sha256:456cc8334f6d1124e8ff856b42d2cc1c84335375a16448189999496549f7182b \ + --hash=sha256:8a77906dc2ef9b67407cec0bdbf08e3971141e535db888974a915be5e1e3efc6 \ + --hash=sha256:8e70ae6475cfd0fad3816dcbf6cac536dc6f100f7474be58d59fa306e6e768a4 \ + --hash=sha256:53273c5487d1c19c3bc03b9eb82adaf8456f243b97ed79d09dded747abaf1235 \ + --hash=sha256:e3b6f3fd0d8ca37861c31e9a7cab71a0ef14c639b4c95654ea1dd153158bf0df \ + --hash=sha256:e8c87cdaf06fd7b2477f68909838ff4176f105064a72ca9d24d3f2a29f73d393 \ + --hash=sha256:e2f28a07b4f82abb40267864ad7b3a4ed76f1b1663e81c7efc84a9b9248f672f \ + --hash=sha256:d70a32ee1f8b55eed3fd4e892f0286df8cccc7e0475c11d33b5d0a148f5c7599 \ + --hash=sha256:68fa30cec89b6139dc559ed6ef226c53fd80396da1919a1b5ef672c911aaa767 \ + --hash=sha256:2e3484d8455af3fdb0424eae1789af61f6a79da0c80079125112fd5c1b604218 \ + --hash=sha256:e293b16cf303fe82995e41700d172a58a15efc5331125d08246b520843ef21ee \ + --hash=sha256:e3520a274a0e054e919f5b3279ee5dbccf5311833819ccf3399dab7c83e90a25 \ + --hash=sha256:2252bfac85cec7af4a67e494bfccf9080bcba8a0299701eab075f48847cca907 \ + --hash=sha256:abf67e05a1b7f86583f6ebd01f69b693b9c535276f4e943292e444855870a1b8 \ + --hash=sha256:6c094e4bfecd2fa7f9adffd03d8abceed7157c928c2976899de282f3600f0a3d \ + --hash=sha256:506b210cc6e66a0d1c2bb765d055f4f6bc2745070fb1129203b67e85bbfa5c18 \ + --hash=sha256:b04fc29bcef04d4e2d626af28d9d892be6aba94856cb46ed52bcb219ceac8943 \ + --hash=sha256:577ed20ec9a18d6bdedb4616f5e9e957b4c08563a9f985563a31fd5b10564d2a \ + --hash=sha256:e486f60db0cd1c8d68464d9484fd2a94011c1ac8593d765d0211f9daba2bd535 \ + --hash=sha256:b71f3a7ca935fc759f2aed7cec06cfe10bc3100fadb5dbd9c435b04e557971e1 \ + --hash=sha256:d24e5bb8028541ce25e59390122f5e48c8506b7e35587e5135efcb6471b4ac6c \ + --hash=sha256:778d398c4866d8e36ee3bf833779c940b5f57192fa0a549b3ad67bc4c822771b \ + --hash=sha256:bb1c613908f11bac270bc7494d68b1ef6e7c224b7a4204d5dacf3522a41e2bc3 \ + --hash=sha256:edf5e4e1d5fb22c18820e8586fb867455de3b109c309cb4fce3aaed85d9468d1 \ + --hash=sha256:40e0d7df05e8efe60397c69b467fc8f87a2affeb4d562fe92b72ff8937a2b511 \ + --hash=sha256:7a350ca685d9f594123f652ba796ee37219bf72c8e0fc4b471473d87121d6d34 \ + --hash=sha256:3e66497cd990b1a130e21919b004da2f1dc112132c01ac78011a90a0f9229778 \ + --hash=sha256:87900c67c0f1728e6db17c6809ec05c025c6624dcf96a8020326ea15378fe8e7 \ + --hash=sha256:b8a4fb2a0c5afbe9604f8a91d7d0f27b1832c3e0b5e365f95a13015822b4cd65 \ + --hash=sha256:fe8d40c434a8e2c68d64c6d6a04e77f21791a93ff6afe0dce169597c110d3079 \ + --hash=sha256:34a1fc29f8f96e78ec57a5eff5e8d8b53d3298c3be6df61e7aa9efba26929522 \ + --hash=sha256:b19a761b948e939a9e20173aaae76070025f0024fc8f7ba08bef22a5c8573afc \ + --hash=sha256:6803299cbf4665eca14428d9e886de62e24f4223ac31ab9c5d6d5339a39782c7 \ + --hash=sha256:14334b9902ec776461c4b8c6516e26b450f7ebe0b3ef8703bf5cdfbbaecf774a \ + --hash=sha256:b2e9810e09c3a47b73ce9cab5a72243a1258f61e7900969097a817232246ce1c +mne==0.24.1; python_version >= "3.7" \ + --hash=sha256:26f75fc0b468ed078f35a86a26fa75bab60887b914b90707000cbc0cb8b11e78 \ + --hash=sha256:38cbffd03a6ad0e83ef4a964ac9910a37d164c37fcc84894e39ed0cdf805300d numpy==1.21.1; python_version >= "3.7" \ --hash=sha256:38e8648f9449a549a7dfe8d8755a5979b45b3538520d1e735637ef28e8c2dc50 \ --hash=sha256:fd7d7409fa643a91d0a05c7554dd68aa9c9bb16e186f6ccfe40d6e003156e33a \ @@ -182,9 +208,9 @@ numpy==1.21.1; python_version >= "3.7" \ --hash=sha256:01721eefe70544d548425a07c80be8377096a54118070b8a62476866d5208e33 \ --hash=sha256:2d4d1de6e6fb3d28781c73fbde702ac97f03d79e4ffd6598b880b2d95d62ead4 \ --hash=sha256:dff4af63638afcc57a3dfb9e4b26d434a7a602d225b42d746ea7fe2edf1342fd -packaging==21.0; python_version >= "3.6" \ - --hash=sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14 \ - --hash=sha256:7dc96269f53a4ccec5c0670940a4281106dd0bb343f47b7471f779df49c2fbe7 +packaging==21.3; python_version >= "3.7" \ + --hash=sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522 \ + --hash=sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb pandas==1.1.5; python_full_version >= "3.6.1" \ --hash=sha256:bf23a3b54d128b50f4f9d4675b3c1857a688cc6731a32f931837d72effb2698d \ --hash=sha256:5a780260afc88268a9d3ac3511d8f494fdcf637eece62fb9eb656a63d53eb7ca \ @@ -210,60 +236,54 @@ pandas==1.1.5; python_full_version >= "3.6.1" \ --hash=sha256:c94ff2780a1fd89f190390130d6d36173ca59fcfb3fe0ff596f9a56518191ccb \ --hash=sha256:edda9bacc3843dfbeebaf7a701763e68e741b08fccb889c003b0a52f0ee95782 \ --hash=sha256:f10fc41ee3c75a474d3bdf68d396f10782d013d7f67db99c0efbfd0acb99701b -pillow==8.4.0; python_version >= "3.7" \ - --hash=sha256:81f8d5c81e483a9442d72d182e1fb6dcb9723f289a57e8030811bac9ea3fef8d \ - --hash=sha256:3f97cfb1e5a392d75dd8b9fd274d205404729923840ca94ca45a0af57e13dbe6 \ - --hash=sha256:eb9fc393f3c61f9054e1ed26e6fe912c7321af2f41ff49d3f83d05bacf22cc78 \ - --hash=sha256:d82cdb63100ef5eedb8391732375e6d05993b765f72cb34311fab92103314649 \ - --hash=sha256:62cc1afda735a8d109007164714e73771b499768b9bb5afcbbee9d0ff374b43f \ - --hash=sha256:e3dacecfbeec9a33e932f00c6cd7996e62f53ad46fbe677577394aaa90ee419a \ - --hash=sha256:620582db2a85b2df5f8a82ddeb52116560d7e5e6b055095f04ad828d1b0baa39 \ - --hash=sha256:1bc723b434fbc4ab50bb68e11e93ce5fb69866ad621e3c2c9bdb0cd70e345f55 \ - --hash=sha256:72cbcfd54df6caf85cc35264c77ede902452d6df41166010262374155947460c \ - --hash=sha256:70ad9e5c6cb9b8487280a02c0ad8a51581dcbbe8484ce058477692a27c151c0a \ - --hash=sha256:25a49dc2e2f74e65efaa32b153527fc5ac98508d502fa46e74fa4fd678ed6645 \ - --hash=sha256:93ce9e955cc95959df98505e4608ad98281fff037350d8c2671c9aa86bcf10a9 \ - --hash=sha256:2e4440b8f00f504ee4b53fe30f4e381aae30b0568193be305256b1462216feff \ - --hash=sha256:8c803ac3c28bbc53763e6825746f05cc407b20e4a69d0122e526a582e3b5e153 \ - --hash=sha256:c8a17b5d948f4ceeceb66384727dde11b240736fddeda54ca740b9b8b1556b29 \ - --hash=sha256:1394a6ad5abc838c5cd8a92c5a07535648cdf6d09e8e2d6df916dfa9ea86ead8 \ - --hash=sha256:792e5c12376594bfcb986ebf3855aa4b7c225754e9a9521298e460e92fb4a488 \ - --hash=sha256:d99ec152570e4196772e7a8e4ba5320d2d27bf22fdf11743dd882936ed64305b \ - --hash=sha256:7b7017b61bbcdd7f6363aeceb881e23c46583739cb69a3ab39cb384f6ec82e5b \ - --hash=sha256:d89363f02658e253dbd171f7c3716a5d340a24ee82d38aab9183f7fdf0cdca49 \ - --hash=sha256:0a0956fdc5defc34462bb1c765ee88d933239f9a94bc37d132004775241a7585 \ - --hash=sha256:5b7bb9de00197fb4261825c15551adf7605cf14a80badf1761d61e59da347779 \ - --hash=sha256:72b9e656e340447f827885b8d7a15fc8c4e68d410dc2297ef6787eec0f0ea409 \ - --hash=sha256:a5a4532a12314149d8b4e4ad8ff09dde7427731fcfa5917ff16d0291f13609df \ - --hash=sha256:82aafa8d5eb68c8463b6e9baeb4f19043bb31fefc03eb7b216b51e6a9981ae09 \ - --hash=sha256:066f3999cb3b070a95c3652712cffa1a748cd02d60ad7b4e485c3748a04d9d76 \ - --hash=sha256:5503c86916d27c2e101b7f71c2ae2cddba01a2cf55b8395b0255fd33fa4d1f1a \ - --hash=sha256:4acc0985ddf39d1bc969a9220b51d94ed51695d455c228d8ac29fcdb25810e6e \ - --hash=sha256:0b052a619a8bfcf26bd8b3f48f45283f9e977890263e4571f2393ed8898d331b \ - --hash=sha256:493cb4e415f44cd601fcec11c99836f707bb714ab03f5ed46ac25713baf0ff20 \ - --hash=sha256:b8831cb7332eda5dc89b21a7bce7ef6ad305548820595033a4b03cf3091235ed \ - --hash=sha256:5e9ac5f66616b87d4da618a20ab0a38324dbe88d8a39b55be8964eb520021e02 \ - --hash=sha256:3eb1ce5f65908556c2d8685a8f0a6e989d887ec4057326f6c22b24e8a172c66b \ - --hash=sha256:ddc4d832a0f0b4c52fff973a0d44b6c99839a9d016fe4e6a1cb8f3eea96479c2 \ - --hash=sha256:9a3e5ddc44c14042f0844b8cf7d2cd455f6cc80fd7f5eefbe657292cf601d9ad \ - --hash=sha256:c70e94281588ef053ae8998039610dbd71bc509e4acbc77ab59d7d2937b10698 \ - --hash=sha256:3862b7256046fcd950618ed22d1d60b842e3a40a48236a5498746f21189afbbc \ - --hash=sha256:a4901622493f88b1a29bd30ec1a2f683782e57c3c16a2dbc7f2595ba01f639df \ - --hash=sha256:84c471a734240653a0ec91dec0996696eea227eafe72a33bd06c92697728046b \ - --hash=sha256:244cf3b97802c34c41905d22810846802a3329ddcb93ccc432870243211c79fc \ - --hash=sha256:b8e2f83c56e141920c39464b852de3719dfbfb6e3c99a2d8da0edf4fb33176ed -pooch==1.5.2; python_version >= "3.6" \ - --hash=sha256:debb159655de9eeccc366deb111fe1e33e76efac19724436b6878c09deca4293 \ - --hash=sha256:5969b2f1defbdc405df932767e05e0b536e2771c27f1f95d7f260bc99bf13581 -pyparsing==2.4.7; python_version >= "3.7" and python_full_version < "3.0.0" or python_full_version >= "3.3.0" and python_version >= "3.7" \ - --hash=sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b \ - --hash=sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1 +pillow==9.0.1; python_version >= "3.7" \ + --hash=sha256:a5d24e1d674dd9d72c66ad3ea9131322819ff86250b30dc5821cbafcfa0b96b4 \ + --hash=sha256:2632d0f846b7c7600edf53c48f8f9f1e13e62f66a6dbc15191029d950bfed976 \ + --hash=sha256:b9618823bd237c0d2575283f2939655f54d51b4527ec3972907a927acbcc5bfc \ + --hash=sha256:9bfdb82cdfeccec50aad441afc332faf8606dfa5e8efd18a6692b5d6e79f00fd \ + --hash=sha256:5100b45a4638e3c00e4d2320d3193bdabb2d75e79793af7c3eb139e4f569f16f \ + --hash=sha256:528a2a692c65dd5cafc130de286030af251d2ee0483a5bf50c9348aefe834e8a \ + --hash=sha256:0f29d831e2151e0b7b39981756d201f7108d3d215896212ffe2e992d06bfe049 \ + --hash=sha256:855c583f268edde09474b081e3ddcd5cf3b20c12f26e0d434e1386cc5d318e7a \ + --hash=sha256:d9d7942b624b04b895cb95af03a23407f17646815495ce4547f0e60e0b06f58e \ + --hash=sha256:81c4b81611e3a3cb30e59b0cf05b888c675f97e3adb2c8672c3154047980726b \ + --hash=sha256:413ce0bbf9fc6278b2d63309dfeefe452835e1c78398efb431bab0672fe9274e \ + --hash=sha256:80fe64a6deb6fcfdf7b8386f2cf216d329be6f2781f7d90304351811fb591360 \ + --hash=sha256:cef9c85ccbe9bee00909758936ea841ef12035296c748aaceee535969e27d31b \ + --hash=sha256:1d19397351f73a88904ad1aee421e800fe4bbcd1aeee6435fb62d0a05ccd1030 \ + --hash=sha256:d21237d0cd37acded35154e29aec853e945950321dd2ffd1a7d86fe686814669 \ + --hash=sha256:ede5af4a2702444a832a800b8eb7f0a7a1c0eed55b644642e049c98d589e5092 \ + --hash=sha256:b5b3f092fe345c03bca1e0b687dfbb39364b21ebb8ba90e3fa707374b7915204 \ + --hash=sha256:335ace1a22325395c4ea88e00ba3dc89ca029bd66bd5a3c382d53e44f0ccd77e \ + --hash=sha256:db6d9fac65bd08cea7f3540b899977c6dee9edad959fa4eaf305940d9cbd861c \ + --hash=sha256:f154d173286a5d1863637a7dcd8c3437bb557520b01bddb0be0258dcb72696b5 \ + --hash=sha256:14d4b1341ac07ae07eb2cc682f459bec932a380c3b122f5540432d8977e64eae \ + --hash=sha256:effb7749713d5317478bb3acb3f81d9d7c7f86726d41c1facca068a04cf5bb4c \ + --hash=sha256:7f7609a718b177bf171ac93cea9fd2ddc0e03e84d8fa4e887bdfc39671d46b00 \ + --hash=sha256:80ca33961ced9c63358056bd08403ff866512038883e74f3a4bf88ad3eb66838 \ + --hash=sha256:1c3c33ac69cf059bbb9d1a71eeaba76781b450bc307e2291f8a4764d779a6b28 \ + --hash=sha256:12875d118f21cf35604176872447cdb57b07126750a33748bac15e77f90f1f9c \ + --hash=sha256:514ceac913076feefbeaf89771fd6febde78b0c4c1b23aaeab082c41c694e81b \ + --hash=sha256:d3c5c79ab7dfce6d88f1ba639b77e77a17ea33a01b07b99840d6ed08031cb2a7 \ + --hash=sha256:718856856ba31f14f13ba885ff13874be7fefc53984d2832458f12c38205f7f7 \ + --hash=sha256:f25ed6e28ddf50de7e7ea99d7a976d6a9c415f03adcaac9c41ff6ff41b6d86ac \ + --hash=sha256:011233e0c42a4a7836498e98c1acf5e744c96a67dd5032a6f666cc1fb97eab97 \ + --hash=sha256:253e8a302a96df6927310a9d44e6103055e8fb96a6822f8b7f514bb7ef77de56 \ + --hash=sha256:6295f6763749b89c994fcb6d8a7f7ce03c3992e695f89f00b741b4580b199b7e \ + --hash=sha256:a9f44cd7e162ac6191491d7249cceb02b8116b0f7e847ee33f739d7cb1ea1f70 \ + --hash=sha256:6c8bc8238a7dfdaf7a75f5ec5a663f4173f8c367e5a39f87e720495e1eed75fa +pooch==1.6.0; python_version >= "3.6" \ + --hash=sha256:3bf0e20027096836b8dbce0152dbb785a269abeb621618eb4bdd275ff1e23c9c \ + --hash=sha256:57d20ec4b10dd694d2b05bb64bc6b109c6e85a6c1405794ce87ed8b341ab3f44 +pyparsing==3.0.7; python_version >= "3.7" \ + --hash=sha256:a6c06a88f252e6c322f65faf8f418b16213b51bdfaece0524c1c1bc30c63c484 \ + --hash=sha256:18ee9022775d270c55187733956460083db60b37d0d0fb357445f3094eed3eea pyriemann==0.2.7 \ --hash=sha256:1feed8f72d94414bdc9ca4485333711a3f91f9742ae8794874a7399f4166758a python-dateutil==2.8.2; python_full_version >= "3.6.1" and python_version >= "3.7" and (python_version >= "3.7" and python_full_version < "3.0.0" or python_full_version >= "3.3.0" and python_version >= "3.7") \ --hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \ --hash=sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9 -pytz==2021.3; python_full_version >= "3.6.1" \ +pytz==2021.3; python_full_version >= "3.6.1" and python_version >= "3.6" \ --hash=sha256:3672058bc3453457b622aab7a1c3bfd5ab0bdae451512f6cf25f64ed37f5b87c \ --hash=sha256:acad2d8b20a1af07d4e4c9d2e9285c5ed9104354062f275f3fcd88dcef4f1326 pyyaml==5.4.1; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.6.0") \ @@ -296,35 +316,42 @@ pyyaml==5.4.1; (python_version >= "2.7" and python_full_version < "3.0.0") or (p --hash=sha256:49d4cdd9065b9b6e206d0595fee27a96b5dd22618e7520c33204a4a3239d5b10 \ --hash=sha256:c20cfa2d49991c8b4147af39859b167664f2ad4561704ee74c1de03318e898db \ --hash=sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e -requests==2.26.0; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.6.0") \ - --hash=sha256:6c1246513ecd5ecd4528a0906f910e8f0f9c6b8ec72030dc9fd154dc1a6efd24 \ - --hash=sha256:b8aa58f8cf793ffd8782d3d8cb19e66ef36f7aba4353eec859e74678b01b07a7 -scikit-learn==1.0; python_version >= "3.7" \ - --hash=sha256:776800194e757cd212b47cd05907e0eb67a554ad333fe76776060dbb729e3427 \ - --hash=sha256:e8a6074f7d505bbfd30bcc1c57dc7cb150cc9c021459c2e2729854be1aefb5f7 \ - --hash=sha256:56ab58978c7aa181856a42f8f491be953b755105040aeb070ebd6b180896f146 \ - --hash=sha256:b1df4d1151dd6d945324583125e6449bb74ec7cd91ffd7f850015cdb75f151b5 \ - --hash=sha256:b9f10b85dcd9ce80f738e33f55a32b3a538b47409dc1a59eec30b46ea96759db \ - --hash=sha256:663a6aaad92e5690b03d931f849016c9718beaa654e9a15f08bfcac750241036 \ - --hash=sha256:190c178028f9073d9f61cd30a19c685993236b9b2df884f16608cbb3ff03800b \ - --hash=sha256:555f4b4c10d3bef9e3cda63c3b45670a091fb50328fccd54948cd8a7cf887198 \ - --hash=sha256:9f103cd6d7e15fa537a844c1a85c9beeeee8ec38357287c9efd3ee4bb8354e1d \ - --hash=sha256:121f78d6564000dc5e968394f45aac87981fcaaf2be40cfcd8f07b2baa1e1829 \ - --hash=sha256:83ab0d0447b8de8450c554952a8399791544605caf274fc3c904e247e1584ced \ - --hash=sha256:f8aecb3edc443e5625725ae1ef8f500fa78ce7cb0e864115864bb9f234d18290 \ - --hash=sha256:c1f710bba72925aa96e60828df5d2a4872f5d4a4ad7bb4a4c9a6a41c9ce9a198 \ - --hash=sha256:4cb5ccb2b63c617ead48c6d92001273ad1b0e8e2bd4a4857edb58749a88b6d82 \ - --hash=sha256:29559c207616604bbaa664bf98eed81b32d9f3d4c975065a206a5e2b268fe784 \ - --hash=sha256:c9c329ec195cdea6a4dee3cebdb1602f4e0f69351c63bc58a4812f3c8a9f4f2d \ - --hash=sha256:14bd46639b2149b3ed613adc095511313a0db62ba9fa31117bdcb5c23722e93b \ - --hash=sha256:efeac34d0ce6bf9404d268545867cbde9d6ecadd0e9bd7e6b468e5f4e2349875 \ - --hash=sha256:af94b89a8f7759603c696b320e86e57f4b2bb4911e02bf2bae33c714ac498fb8 \ - --hash=sha256:6d8bdacde73f5f484325179f466ce2011f79360e9a152100179c3dafb88f2a35 \ - --hash=sha256:f7053801ceb7c51ce674c6a8e37a18fcc221c292f66ef7da84744ecf13b4a0c0 \ - --hash=sha256:e35135657b7103a70298cf557e4fad06af97607cb0780d8f44a2f91ca7769458 \ - --hash=sha256:9d8caf7fa58791b6b26e912e44d5056818b7bb3142bfa7806f54bde47c189078 \ - --hash=sha256:6a056637f7f9876e4c9db9b5434d340e0c97e25f00c4c04458f0ff906e82488e \ - --hash=sha256:eed33b7ca2bf3fdd585339db42838ab0b641952e064564bff6e9a10573ea665c +requests==2.27.1; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.6.0") \ + --hash=sha256:f22fa1e554c9ddfd16e6e41ac79759e17be9e492b3587efa038054674760e72d \ + --hash=sha256:68d7c56fd5a8999887728ef304a6d12edc7be74f1cfa47714fc8b414525c9a61 +scikit-learn==1.0.2; python_version >= "3.7" \ + --hash=sha256:b5870959a5484b614f26d31ca4c17524b1b0317522199dc985c3b4256e030767 \ + --hash=sha256:da3c84694ff693b5b3194d8752ccf935a665b8b5edc33a283122f4273ca3e687 \ + --hash=sha256:75307d9ea39236cad7eea87143155eea24d48f93f3a2f9389c817f7019f00705 \ + --hash=sha256:f14517e174bd7332f1cca2c959e704696a5e0ba246eb8763e6c24876d8710049 \ + --hash=sha256:d9aac97e57c196206179f674f09bc6bffcd0284e2ba95b7fe0b402ac3f986023 \ + --hash=sha256:d93d4c28370aea8a7cbf6015e8a669cd5d69f856cc2aa44e7a590fb805bb5583 \ + --hash=sha256:85260fb430b795d806251dd3bb05e6f48cdc777ac31f2bcf2bc8bbed3270a8f5 \ + --hash=sha256:a053a6a527c87c5c4fa7bf1ab2556fa16d8345cf99b6c5a19030a4a7cd8fd2c0 \ + --hash=sha256:245c9b5a67445f6f044411e16a93a554edc1efdcce94d3fc0bc6a4b9ac30b752 \ + --hash=sha256:158faf30684c92a78e12da19c73feff9641a928a8024b4fa5ec11d583f3d8a87 \ + --hash=sha256:08ef968f6b72033c16c479c966bf37ccd49b06ea91b765e1cc27afefe723920b \ + --hash=sha256:16455ace947d8d9e5391435c2977178d0ff03a261571e67f627c8fee0f9d431a \ + --hash=sha256:2f3b453e0b149898577e301d27e098dfe1a36943f7bb0ad704d1e548efc3b448 \ + --hash=sha256:46f431ec59dead665e1370314dbebc99ead05e1c0a9df42f22d6a0e00044820f \ + --hash=sha256:ff3fa8ea0e09e38677762afc6e14cad77b5e125b0ea70c9bba1992f02c93b028 \ + --hash=sha256:9369b030e155f8188743eb4893ac17a27f81d28a884af460870c7c072f114243 \ + --hash=sha256:7d6b2475f1c23a698b48515217eb26b45a6598c7b1840ba23b3c5acece658dbb \ + --hash=sha256:285db0352e635b9e3392b0b426bc48c3b485512d3b4ac3c7a44ec2a2ba061e66 \ + --hash=sha256:5cb33fe1dc6f73dc19e67b264dbb5dde2a0539b986435fdd78ed978c14654830 \ + --hash=sha256:b1391d1a6e2268485a63c3073111fe3ba6ec5145fc957481cfd0652be571226d \ + --hash=sha256:bc3744dabc56b50bec73624aeca02e0def06b03cb287de26836e730659c5d29c \ + --hash=sha256:a999c9f02ff9570c783069f1074f06fe7386ec65b84c983db5aeb8144356a355 \ + --hash=sha256:7626a34eabbf370a638f32d1a3ad50526844ba58d63e3ab81ba91e2a7c6d037e \ + --hash=sha256:a90b60048f9ffdd962d2ad2fb16367a87ac34d76e02550968719eb7b5716fd10 \ + --hash=sha256:7a93c1292799620df90348800d5ac06f3794c1316ca247525fa31169f6d25855 \ + --hash=sha256:eabceab574f471de0b0eb3f2ecf2eee9f10b3106570481d007ed1c84ebf6d6a1 \ + --hash=sha256:55f2f3a8414e14fbee03782f9fe16cca0f141d639d2b1c1a36779fa069e1db57 \ + --hash=sha256:80095a1e4b93bd33261ef03b9bc86d6db649f988ea4dbcf7110d0cded8d7213d \ + --hash=sha256:fa38a1b9b38ae1fad2863eff5e0d69608567453fdfc850c992e6e47eb764e846 \ + --hash=sha256:ff746a69ff2ef25f62b36338c615dd15954ddc3ab8e73530237dd73235e76d62 \ + --hash=sha256:e174242caecb11e4abf169342641778f68e1bfaba80cd18acd6bc84286b9a534 \ + --hash=sha256:b54a62c6e318ddbfa7d22c383466d38d2ee770ebdb5ddb668d56a099f6eaf75f scipy==1.6.1; python_version >= "3.7" \ --hash=sha256:a15a1f3fc0abff33e792d6049161b7795909b40b97c6cc2934ed54384017ab76 \ --hash=sha256:e79570979ccdc3d165456dd62041d9556fb9733b86b4b6d818af7a0afc15f092 \ @@ -348,12 +375,21 @@ scipy==1.6.1; python_version >= "3.7" \ seaborn==0.11.2; python_version >= "3.6" \ --hash=sha256:85a6baa9b55f81a0623abddc4a26b334653ff4c6b18c418361de19dbba0ef283 \ --hash=sha256:cf45e9286d40826864be0e3c066f98536982baf701a7caa386511792d61ff4f6 +setuptools-scm==6.4.2; python_version >= "3.7" \ + --hash=sha256:acea13255093849de7ccb11af9e1fb8bde7067783450cee9ef7a93139bddf6d4 \ + --hash=sha256:6833ac65c6ed9711a4d5d2266f8024cfa07c533a0e55f4c12f6eff280a5a9e30 six==1.16.0; python_full_version >= "3.6.1" and python_version >= "3.7" \ --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 -threadpoolctl==3.0.0; python_version >= "3.7" \ - --hash=sha256:4fade5b3b48ae4b1c30f200b28f39180371104fccc642e039e0f2435ec8cc211 \ - --hash=sha256:d03115321233d0be715f0d3a5ad1d6c065fe425ddc2d671ca8e45e9fd5d7a52a -urllib3==1.26.7; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version < "4" and python_version >= "3.6" \ - --hash=sha256:c4fdf4019605b6e5423637e01bc9fe4daef873709a7973e195ceba0a62bbc844 \ - --hash=sha256:4987c65554f7a2dbf30c18fd48778ef124af6fab771a377103da0585e2336ece +threadpoolctl==3.1.0; python_version >= "3.7" \ + --hash=sha256:8b99adda265feb6773280df41eece7b2e6561b772d21ffd52e372f999024907b \ + --hash=sha256:a335baacfaa4400ae1f0d8e3a58d6674d2f8828e3716bb2802c44955ad391380 +tomli==2.0.1; python_version >= "3.7" \ + --hash=sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc \ + --hash=sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f +tqdm==4.62.3; (python_version >= "2.7" and python_full_version < "3.0.0") or (python_full_version >= "3.4.0") \ + --hash=sha256:8dd278a422499cd6b727e6ae4061c40b48fce8b76d1ccbf5d34fca9b7f925b0c \ + --hash=sha256:d359de7217506c9851b7869f3708d8ee53ed70a1b8edbba4dbcb47442592920d +urllib3==1.26.8; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.6.0" and python_version < "4" and python_version >= "3.6" \ + --hash=sha256:000ca7f471a233c2251c6c7023ee85305721bfdf18621ebff4fd17a8653427ed \ + --hash=sha256:0e7c33d9a63e7ddfcb86780aac87befc2fbddf46c58dbb487e0855f7ceec283c diff --git a/scripts/data_visualization_p300.py b/scripts/data_visualization_p300.py index 72042d597..3c06d5a1b 100644 --- a/scripts/data_visualization_p300.py +++ b/scripts/data_visualization_p300.py @@ -8,6 +8,8 @@ Total downloaded size will be (as of now) 120GB. + +.. versionadded:: 0.4.5 """ # Authors: Jan Sosulski # From 95940becf8b97cd90b247951718181f3db946a7f Mon Sep 17 00:00:00 2001 From: Sylvain Chevallier Date: Tue, 22 Feb 2022 12:07:37 +0100 Subject: [PATCH 13/19] correct pre-commit error and add code coverage (#271) --- .github/workflows/test.yml | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 078ad07f8..fff42ab27 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -26,10 +26,8 @@ jobs: with: python-version: ${{ matrix.python-version }} - - uses: pre-commit/action@v2.0.0 - - name: Install Poetry - uses: snok/install-poetry@v1.1.6 + uses: snok/install-poetry@v1 with: virtualenvs-create: true virtualenvs-in-project: true @@ -62,3 +60,11 @@ jobs: run: | source $VENV poetry run python -m moabb.run --pipelines=./moabb/tests/test_pipelines/ --verbose + + - name: Upload Coverage to Codecov + uses: codecov/codecov-action@v2 + if: success() + with: + verbose: true + directory: /home/runner/work/moabb/moabb + files: ./.coverage From 63a3a62ce9480bcf553dde8c35add0897f9bb4f3 Mon Sep 17 00:00:00 2001 From: Jan Sosulski Date: Sat, 26 Mar 2022 17:16:21 +0100 Subject: [PATCH 14/19] add new erp datasets to docs (#282) --- docs/source/datasets.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/source/datasets.rst b/docs/source/datasets.rst index 4f21ff8b3..366889d64 100644 --- a/docs/source/datasets.rst +++ b/docs/source/datasets.rst @@ -46,7 +46,10 @@ ERP Datasets BNCI2015003 DemonsP300 EPFLP300 + Huebner2017 + Huebner2018 Lee2019_ERP + Sosulski2019 -------------- From f310573b83f8be715c79a697ed1eaec7d66a3b5e Mon Sep 17 00:00:00 2001 From: Sylvain Chevallier Date: Wed, 6 Apr 2022 16:43:23 +0200 Subject: [PATCH 15/19] Add Brain Invaders datasets (#283) * add brain invaders datasets * add version added for datasets * correct scaling factor for dry electrodes * apply scaling factor for uncorrected datasets --- docs/source/datasets.rst | 5 + moabb/datasets/__init__.py | 2 +- moabb/datasets/braininvaders.py | 719 +++++++++++++++++++++++++++----- 3 files changed, 623 insertions(+), 103 deletions(-) diff --git a/docs/source/datasets.rst b/docs/source/datasets.rst index 366889d64..2545d58c1 100644 --- a/docs/source/datasets.rst +++ b/docs/source/datasets.rst @@ -40,7 +40,12 @@ ERP Datasets :toctree: generated/ :template: class.rst + bi2012a bi2013a + bi2014a + bi2014b + bi2015a + bi2015b BNCI2014008 BNCI2014009 BNCI2015003 diff --git a/moabb/datasets/__init__.py b/moabb/datasets/__init__.py index 010254c58..e808d2617 100644 --- a/moabb/datasets/__init__.py +++ b/moabb/datasets/__init__.py @@ -17,7 +17,7 @@ BNCI2015003, BNCI2015004, ) -from .braininvaders import bi2013a +from .braininvaders import bi2012, bi2013a, bi2014a, bi2014b, bi2015a, bi2015b from .epfl import EPFLP300 from .gigadb import Cho2017 from .huebner_llp import Huebner2017, Huebner2018 diff --git a/moabb/datasets/braininvaders.py b/moabb/datasets/braininvaders.py index 84b9a5d0a..8ea72f9b0 100644 --- a/moabb/datasets/braininvaders.py +++ b/moabb/datasets/braininvaders.py @@ -1,39 +1,409 @@ import glob import os -import zipfile +import os.path as osp +import shutil +import zipfile as z +from distutils.dir_util import copy_tree import mne +import numpy as np import yaml from mne.channels import make_standard_montage +from scipy.io import loadmat from moabb.datasets import download as dl from moabb.datasets.base import BaseDataset -BI2013a_URL = "https://zenodo.org/record/1494240/files/" +BI2012a_URL = "https://zenodo.org/record/2649069/files/" +BI2013a_URL = "https://zenodo.org/record/2669187/files/" +BI2014a_URL = "https://zenodo.org/record/3266223/files/" +BI2014b_URL = "https://zenodo.org/record/3267302/files/" +BI2015a_URL = "https://zenodo.org/record/3266930/files/" +BI2015b_URL = "https://zenodo.org/record/3268762/files/" + + +def _bi_get_subject_data(ds, subject): # noqa: C901 + file_path_list = ds.data_path(subject) + + sessions = {} + + for file_path in file_path_list: + if ds.code in [ + "Brain Invaders 2012", + "Brain Invaders 2014a", + "Brain Invaders 2014b", + "Brain Invaders 2015b", + ]: + session_name = "session_1" + elif ds.code == "Brain Invaders 2013a": + session_number = file_path.split(os.sep)[-2].replace("Session", "") + session_name = "session_" + session_number + elif ds.code == "Brain Invaders 2015a": + session_name = f'session_{file_path.split("_")[-1][1:2]}' + if session_name not in sessions.keys(): + sessions[session_name] = {} + + if ds.code == "Brain Invaders 2012": + condition = file_path.split("/")[-1].split(".")[0].split(os.sep)[-1] + run_name = "run_" + condition + # fmt: off + chnames = [ + 'F7', 'F3', 'Fz', 'F4', 'F8', 'T7', 'C3', 'Cz', 'C4', + 'T8', 'P7', 'P3', 'Pz', 'P4', 'P8', 'O1', 'O2', 'STI 014' + ] + # fmt: on + chtypes = ["eeg"] * 17 + ["stim"] + X = loadmat(file_path)[condition].T + S = X[1:18, :] * 1e-6 + stim = (X[18, :] + X[19, :])[None, :] + X = np.concatenate([S, stim]) + sfreq = 128 + elif ds.code == "Brain Invaders 2013a": + run_number = file_path.split(os.sep)[-1] + run_number = run_number.split("_")[-1] + run_number = run_number.split(".mat")[0] + run_name = "run_" + run_number + # fmt: off + chnames = [ + "Fp1", "Fp2", "F5", "AFz", "F6", "T7", "Cz", "T8", "P7", + "P3", "Pz", "P4", "P8", "O1", "Oz", "O2", "STI 014", + ] + # fmt: on + chtypes = ["eeg"] * 16 + ["stim"] + X = loadmat(file_path)["data"].T + sfreq = 512 + elif ds.code == "Brain Invaders 2014a": + run_name = "run_1" + # fmt: off + chnames = [ + 'Fp1', 'Fp2', 'F3', 'AFz', 'F4', 'T7', 'Cz', 'T8', 'P7', + 'P3', 'Pz', 'P4', 'P8', 'O1', 'Oz', 'O2', 'STI 014' + ] + # fmt: on + chtypes = ["eeg"] * 16 + ["stim"] + file_path = file_path_list[0] + D = loadmat(file_path)["samples"].T + S = D[1:17, :] * 1e-6 + stim = D[-1, :] + X = np.concatenate([S, stim[None, :]]) + sfreq = 512 + elif ds.code == "Brain Invaders 2014b": + # fmt: off + chnames = [ + 'Fp1', 'Fp2', 'AFz', 'F7', 'F3', 'F4', 'F8', 'FC5', 'FC1', 'FC2', + 'FC6', 'T7', 'C3', 'Cz', 'C4', 'T8', 'CP5', 'CP1', 'CP2', 'CP6', + 'P7', 'P3', 'Pz', 'P4', 'P8', 'PO7', 'O1', 'Oz', 'O2', 'PO8', 'PO9', + 'PO10', 'STI 014'] + # fmt: on + chtypes = ["eeg"] * 32 + ["stim"] + run_name = "run_1" + + D = loadmat(file_path)["samples"].T + if subject % 2 == 1: + S = D[1:33, :] * 1e-6 + else: + S = D[33:65, :] * 1e-6 + stim = D[-1, :] + X = np.concatenate([S, stim[None, :]]) + sfreq = 512 + elif ds.code == "Brain Invaders 2015a": + run_name = "run_1" + # fmt: off + chnames = [ + 'Fp1', 'Fp2', 'AFz', 'F7', 'F3', 'F4', 'F8', 'FC5', 'FC1', 'FC2', 'FC6', + 'T7', 'C3', 'Cz', 'C4', 'T8', 'CP5', 'CP1', 'CP2', 'CP6', 'P7', 'P3', + 'Pz', 'P4', 'P8', 'PO7', 'O1', 'Oz', 'O2', 'PO8', 'PO9', 'PO10', 'STI 014' + ] + # fmt: on + chtypes = ["eeg"] * 32 + ["stim"] + D = loadmat(file_path)["DATA"].T + S = D[1:33, :] * 1e-6 + stim = D[-2, :] + D[-1, :] + X = np.concatenate([S, stim[None, :]]) + sfreq = 512 + elif ds.code == "Brain Invaders 2015b": + run_name = "run_" + file_path.split("_")[-1].split(".")[0][1] + # fmt: off + chnames = [ + 'Fp1', 'Fp2', 'AFz', 'F7', 'F3', 'F4', 'F8', 'FC5', 'FC1', 'FC2', + 'FC6', 'T7', 'C3', 'Cz', 'C4', 'T8', 'CP5', 'CP1', 'CP2', 'CP6', + 'P7', 'P3', 'Pz', 'P4', 'P8', 'PO7', 'O1', 'Oz', 'O2', 'PO8', 'PO9', + 'PO10', 'STI 014'] + # fmt: on + chtypes = ["eeg"] * 32 + ["stim"] + + D = loadmat(file_path)["mat_data"].T + if subject % 2 == 1: + S = D[1:33, :] * 1e-6 + else: + S = D[33:65, :] * 1e-6 + stim = D[-1, :] + idx_target = (stim >= 60) & (stim <= 85) + idx_nontarget = (stim >= 20) & (stim <= 45) + stim[idx_target] = 2 + stim[idx_nontarget] = 1 + X = np.concatenate([S, stim[None, :]]) + sfreq = 512 + + info = mne.create_info( + ch_names=chnames, + sfreq=sfreq, + ch_types=chtypes, + verbose=False, + ) + raw = mne.io.RawArray(data=X, info=info, verbose=False) + raw.set_montage(make_standard_montage("standard_1020")) + + if ds.code == "Brain Invaders 2012": + # get rid of the Fz channel (it is the ground) + raw.info["bads"] = ["Fz"] + raw.pick_types(eeg=True, stim=True) + + sessions[session_name][run_name] = raw + return sessions + + +def _bi_data_path( # noqa: C901 + ds, subject, path=None, force_update=False, update_path=None, verbose=None +): + if subject not in ds.subject_list: + raise (ValueError("Invalid subject number")) + + subject_paths = [] + if ds.code == "Brain Invaders 2012": + # check if has the .zip + url = f"{BI2012a_URL}subject_{subject:02}.zip" + path_zip = dl.data_dl(url, "BRAININVADERS2012") + path_folder = path_zip.strip(f"subject_{subject:02}.zip") + + # check if has to unzip + if not (osp.isdir(path_folder + f"subject_{subject}")) and not ( + osp.isdir(path_folder + f"subject_0{subject}") + ): + zip_ref = z.ZipFile(path_zip, "r") + zip_ref.extractall(path_folder) + + # filter the data regarding the experimental conditions + if ds.training: + subject_paths.append( + osp.join(f"{path_folder}subject_{subject:02}", "training.mat") + ) + if ds.online: + subject_paths.append( + osp.join(f"{path_folder}subject_{subject:02}", "online.mat") + ) + + elif ds.code == "Brain Invaders 2013a": + if subject in [1, 2, 3, 4, 5, 6, 7]: + zipname_list = [ + f"subject{subject:02}_session{i:02}.zip" for i in range(1, 8 + 1) + ] + else: + zipname_list = [f"subject{subject:02}.zip"] + + for i, zipname in enumerate(zipname_list): + url = BI2013a_URL + zipname + path_zip = dl.data_dl(url, "BRAININVADERS2013") + path_folder = path_zip.strip(zipname) + + # check if has the directory for the subject + directory = f"{path_folder}subject_{subject:02}" + if not (osp.isdir(directory)): + os.makedirs(directory) + + if not (osp.isdir(osp.join(directory, f"Session{i + 1}"))): + zip_ref = z.ZipFile(path_zip, "r") + zip_ref.extractall(path_folder) + os.makedirs(osp.join(directory, f"Session{i + 1}")) + copy_tree(path_zip.strip(".zip"), directory) + shutil.rmtree(path_zip.strip(".zip")) + + # filter the data regarding the experimental conditions + meta_file = directory + os.sep + "meta.yml" + with open(meta_file, "r") as stream: + meta = yaml.load(stream, Loader=yaml.FullLoader) + conditions = [] + if ds.adaptive: + conditions = conditions + ["adaptive"] + if ds.nonadaptive: + conditions = conditions + ["nonadaptive"] + types = [] + if ds.training: + types = types + ["training"] + if ds.online: + types = types + ["online"] + filenames = [] + for run in meta["runs"]: + run_condition = run["experimental_condition"] + run_type = run["type"] + if (run_condition in conditions) and (run_type in types): + filenames = filenames + [run["filename"]] + + # list the filepaths for this subject + for filename in filenames: + subject_paths = subject_paths + glob.glob( + osp.join(directory, "Session*", filename.replace(".gdf", ".mat")) + ) + + elif ds.code == "Brain Invaders 2014a": + url = f"{BI2014a_URL}subject_{subject:02}.zip" + path_zip = dl.data_dl(url, "BRAININVADERS2014A") + path_folder = path_zip.strip(f"subject_{subject:02}.zip") + + # check if has to unzip + path_folder_subject = f"{path_folder}subject_{subject:02}" + if not (osp.isdir(path_folder_subject)): + os.mkdir(path_folder_subject) + zip_ref = z.ZipFile(path_zip, "r") + zip_ref.extractall(path_folder_subject) + + # filter the data regarding the experimental conditions + subject_paths.append(osp.join(path_folder_subject, f"subject_{subject:02}.mat")) + + elif ds.code == "Brain Invaders 2014b": + group = (subject + 1) // 2 + url = f"{BI2014b_URL}group_{group:02}_mat.zip" + path_zip = dl.data_dl(url, "BRAININVADERS2014B") + path_folder = path_zip.strip(f"group_{group:02}_mat.zip") + + # check if has to unzip + path_folder_subject = f"{path_folder}group_{group:02}" + if not (osp.isdir(path_folder_subject)): + os.mkdir(path_folder_subject) + zip_ref = z.ZipFile(path_zip, "r") + zip_ref.extractall(path_folder_subject) + + subject_paths = [] + # filter the data regarding the experimental conditions + if subject % 2 == 1: + subject_paths.append( + osp.join(path_folder_subject, f"group_{group:02}_sujet_01.mat") + ) + else: + subject_paths.append( + osp.join(path_folder_subject, f"group_{group:02}_sujet_02.mat") + ) + # Collaborative session are not loaded + # subject_paths.append(osp.join(path_folder_subject, f'group_{(subject+1)//2:02}.mat') + + elif ds.code == "Brain Invaders 2015a": + # TODO: possible fusion with 2014a? + url = f"{BI2015a_URL}subject_{subject:02}_mat.zip" + path_zip = dl.data_dl(url, "BRAININVADERS2015A") + path_folder = path_zip.strip(f"subject_{subject:02}.zip") + + # check if has to unzip + path_folder_subject = f"{path_folder}subject_{subject:02}" + if not (osp.isdir(path_folder_subject)): + os.mkdir(path_folder_subject) + zip_ref = z.ZipFile(path_zip, "r") + zip_ref.extractall(path_folder_subject) + + # filter the data regarding the experimental conditions + subject_paths = [] + for session in [1, 2, 3]: + subject_paths.append( + osp.join( + path_folder_subject, f"subject_{subject:02}_session_{session:02}.mat" + ) + ) + elif ds.code == "Brain Invaders 2015b": + # TODO: possible fusion with 2014b? + url = f"{BI2015b_URL}group_{(subject+1)//2:02}_mat.zip" + path_zip = dl.data_dl(url, "BRAININVADERS2015B") + path_folder = path_zip.strip(f"group_{(subject+1)//2:02}_mat.zip") + # check if has to unzip + path_folder_subject = f"{path_folder}group_{(subject+1)//2:02}" + if not (osp.isdir(path_folder_subject)): + os.mkdir(path_folder_subject) + zip_ref = z.ZipFile(path_zip, "r") + zip_ref.extractall(path_folder_subject) + + subject_paths = [] + subject_paths = [ + osp.join( + path_folder, + f"group_{(subject+1)//2:02}", + f"group_{(subject+1)//2:02}_s{i}", + ) + for i in range(1, 5) + ] + + return subject_paths + + +class bi2012(BaseDataset): + """P300 dataset bi2012 from a "Brain Invaders" experiment + + Dataset following the setup from [1]_ carried-out at University of + Grenoble Alpes. + + **Dataset Description** + + This dataset contains electroencephalographic (EEG) recordings of 25 subjects testing + the Brain Invaders, a visual P300 Brain-Computer Interface inspired by the famous vintage + video game Space Invaders (Taito, Tokyo, Japan). The visual P300 is an event-related + potential elicited by a visual stimulation, peaking 240-600 ms after stimulus onset. EEG + data were recorded by 16 electrodes in an experiment that took place in the GIPSA-lab, + Grenoble, France, in 2012). A full description of the experiment is available in [1]_. + + **Authors** + + Principal Investigator: B.Sc. Gijsbrecht Franciscus Petrus Van Veen + Technical Supervisors: Ph.D. Alexandre Barachant, Eng. Anton Andreev, Eng. Grégoire Cattan, + Eng. Pedro. L. C. Rodrigues + Scientific Supervisor: Ph.D. Marco Congedo + + **ID of the dataset** + BI.EEG.2012-GIPSA + + Notes + ----- + .. versionadded:: 0.4.6 + + References + ---------- + + .. [1] Van Veen, G., Barachant, A., Andreev, A., Cattan, G., Rodrigues, P. C., & + Congedo, M. (2019). Building Brain Invaders: EEG data of an experimental validation. + arXiv preprint arXiv:1905.05182. + """ + + def __init__(self, Training=True, Online=False): + super().__init__( + subjects=list(range(1, 26)), + sessions_per_subject=1, + events=dict(Target=2, NonTarget=1), + code="Brain Invaders 2012", + interval=[0, 1], + paradigm="p300", + doi="https://doi.org/10.5281/zenodo.2649006", + ) + + self.training = Training + self.online = Online + + def _get_single_subject_data(self, subject): + """return data for a single subject""" + return _bi_get_subject_data(self, subject) + + def data_path( + self, subject, path=None, force_update=False, update_path=None, verbose=None + ): + return _bi_data_path(self, subject, path, force_update, update_path, verbose) class bi2013a(BaseDataset): """P300 dataset bi2013a from a "Brain Invaders" experiment Dataset following the setup from [1]_ carried-out at University of - Grenoble Alpes [1]_. + Grenoble Alpes. **Dataset Description** This dataset concerns an experiment carried out at GIPSA-lab (University of Grenoble Alpes, CNRS, Grenoble-INP) in 2013. - Principal Investigators: Erwan Vaineau, Dr. Alexandre Barachant - Scientific Supervisor : Dr. Marco Congedo - Technical Supervisor : Anton Andreev - - The experiment uses the Brain Invaders P300-based Brain-Computer Interface - [7]_, which uses the Open-ViBE platform for on-line EEG data acquisition and - processing [1]_ [9]_. For classification purposes the Brain Invaders - implements on-line Riemannian MDM classifiers [2]_ [3]_ [4]_ [6]_. This experiment - features both a training-test (classical) mode of operation and a - calibration-less mode of operation [4]_ [5]_ [6]_ [8]_. - The recordings concerned 24 subjects in total. Subjects 1 to 7 participated to eight sessions, run in different days, subject 8 to 24 participated to one session. Each session consisted in two runs, one in a Non-Adaptive @@ -42,22 +412,22 @@ class bi2013a(BaseDataset): was a Training (calibration) phase and an Online phase, always passed in this order. In the non-Adaptive run the data from the Training phase was used for classifying the trials on the Online phase using the training-test - version of the MDM algorithm [3]_ [4]_. In the Adaptive run, the data from the + version of the MDM algorithm [2]_. In the Adaptive run, the data from the training phase was not used at all, instead the classifier was initialized with generic class geometric means and continuously adapted to the incoming - data using the Riemannian method explained in [4]_. Subjects were completely + data using the Riemannian method explained in [2]_. Subjects were completely blind to the mode of operation and the two runs appeared to them identical. In the Brain Invaders P300 paradigm, a repetition is composed of 12 flashes, of which 2 include the Target symbol (Target flashes) and 10 do - not (non-Target flash). Please see [7]_ for a description of the paradigm. + not (non-Target flash). Please see [3]_ for a description of the paradigm. For this experiment, in the Training phases the number of flashes is fixed (80 Target flashes and 400 non-Target flashes). In the Online phases the number of Target and non-Target still are in a ratio 1/5, however their number is variable because the Brain Invaders works with a fixed number of game levels, however the number of repetitions needed to destroy the target (hence to proceed to the next level) depends on the user’s performance - [4]_ [5]_. In any case, since the classes are unbalanced, an appropriate score + [2]_. In any case, since the classes are unbalanced, an appropriate score must be used for quantifying the performance of classification methods (e.g., balanced accuracy, AUC methods, etc). @@ -71,50 +441,37 @@ class bi2013a(BaseDataset): * Reference: left ear-lobe. * Ground: N/A. + **Authors** + + Principal Investigators: Erwan Vaineau, Dr. Alexandre Barachant + Scientific Supervisor : Dr. Marco Congedo + Technical Supervisor : Anton Andreev + References ---------- - .. [1] Arrouët C, Congedo M, Marvie J-E, Lamarche F, Lècuyer A, Arnaldi B - (2005) Open-ViBE: a 3D Platform for Real-Time Neuroscience. - Journal of Neurotherapy, 9(1), 3-25. - .. [2] Barachant A, Bonnet S, Congedo M, Jutten C (2013) Classification of - covariance matrices using a Riemannian-based kernel for BCI - applications. Neurocomputing 112, 172-178. - .. [3] Barachant A, Bonnet S, Congedo M, Jutten C (2012) Multi-Class Brain - Computer Interface, Classification by Riemannian Geometry. - IEEE Transactions on Biomedical Engineering 59(4), 920-928 - .. [4] Barachant A, Congedo M (2014) A Plug & Play P300 BCI using + .. [1] Vaineau, E., Barachant, A., Andreev, A., Rodrigues, P. C., + Cattan, G. & Congedo, M. (2019). Brain invaders adaptive + versus non-adaptive P300 brain-computer interface dataset. + arXiv preprint arXiv:1904.09111. + .. [2] Barachant A, Congedo M (2014) A Plug & Play P300 BCI using Information Geometry. arXiv:1409.0107. - .. [5] Congedo M, Barachant A, Andreev A (2013) A New Generation of - Brain-Computer Interface Based on Riemannian Geometry. - arXiv:1310.8115. - .. [6] Congedo M, Barachant A, Bhatia R (2017) Riemannian Geometry for - EEG-based Brain-Computer Interfaces; a Primer and a Review. - Brain-Computer Interfaces, 4(3), 155-174. .. [7] Congedo M, Goyat M, Tarrin N, Ionescu G, Rivet B,Varnet L, Rivet B, Phlypo R, Jrad N, Acquadro M, Jutten C (2011) “Brain Invaders”: a prototype of an open-source P300-based video game working with the OpenViBE platform. Proc. IBCI Conf., Graz, Austria, 280-283. - .. [8] Congedo M, Korczowski L, Delorme A, Lopes da Silva F. (2016) - Spatio-temporal common pattern: A companion method for ERP analysis - in the time domain. Journal of Neuroscience Methods, 267, 74-88. - .. [9] Renard Y, Lotte F, Gibert G, Congedo M, Maby E, Delannoy V, Bertrand - O, Lécuyer A (2010) OpenViBE: An Open-Source Software Platform to - Design, Test and Use Brain-Computer Interfaces in Real and Virtual - Environments. PRESENCE : Teleoperators and Virtual Environments - 19(1), 35-53. """ def __init__(self, NonAdaptive=True, Adaptive=False, Training=True, Online=False): super().__init__( - subjects=list(range(1, 24 + 1)), + subjects=list(range(1, 25)), sessions_per_subject=1, - events=dict(Target=1, NonTarget=2), + events=dict(Target=33285, NonTarget=33286), code="Brain Invaders 2013a", interval=[0, 1], paradigm="p300", - doi="", + doi="https://doi.org/10.5281/zenodo.2669187", ) self.adaptive = Adaptive @@ -124,75 +481,233 @@ def __init__(self, NonAdaptive=True, Adaptive=False, Training=True, Online=False def _get_single_subject_data(self, subject): """return data for a single subject""" + return _bi_get_subject_data(self, subject) - file_path_list = self.data_path(subject) - sessions = {} - for file_path in file_path_list: + def data_path( + self, subject, path=None, force_update=False, update_path=None, verbose=None + ): + return _bi_data_path(self, subject, path, force_update, update_path, verbose) - session_number = file_path.split(os.sep)[-2].replace("Session", "") - session_name = "session_" + session_number - if session_name not in sessions.keys(): - sessions[session_name] = {} - run_number = file_path.split(os.sep)[-1] - run_number = run_number.split("_")[-1] - run_number = run_number.split(".gdf")[0] - run_name = "run_" + run_number +class bi2014a(BaseDataset): + """P300 dataset bi2014a from a "Brain Invaders" experiment - raw_original = mne.io.read_raw_gdf(file_path, preload=True) - raw_original.rename_channels({"FP1": "Fp1", "FP2": "Fp2"}) - raw_original.set_montage(make_standard_montage("standard_1020")) + **Dataset Description** + + This dataset contains electroencephalographic (EEG) recordings of 71 subjects + playing to a visual P300 Brain-Computer Interface (BCI) videogame named Brain Invaders. + The interface uses the oddball paradigm on a grid of 36 symbols (1 Target, 35 Non-Target) + that are flashed pseudo-randomly to elicit the P300 response. EEG data were recorded + using 16 active dry electrodes with up to three game sessions. The experiment took place + at GIPSA-lab, Grenoble, France, in 2014. A full description of the experiment is available + at [1]_. The ID of this dataset is bi2014a. - sessions[session_name][run_name] = raw_original + **Authors** - return sessions + Investigators: Eng. Louis Korczowski, B. Sc. Ekaterina Ostaschenko + Technical Support: Eng. Anton Andreev, Eng. Grégoire Cattan, Eng. Pedro. L. C. Rodrigues, + M. Sc. Violette Gautheret + Scientific Supervisor: Ph.D. Marco Congedo + + Notes + ----- + .. versionadded:: 0.4.6 + + References + ---------- + + .. [1] Korczowski, L., Ostaschenko, E., Andreev, A., Cattan, G., Rodrigues, P. L. C., + Gautheret, V., & Congedo, M. (2019). Brain Invaders calibration-less P300-based + BCI using dry EEG electrodes Dataset (bi2014a). + https://hal.archives-ouvertes.fr/hal-02171575 + """ + + def __init__(self): + super().__init__( + subjects=list(range(1, 65)), + sessions_per_subject=1, + events=dict(Target=2, NonTarget=1), + code="Brain Invaders 2014a", + interval=[0, 1], + paradigm="p300", + doi="https://doi.org/10.5281/zenodo.3266222", + ) + + def _get_single_subject_data(self, subject): + """return data for a single subject""" + return _bi_get_subject_data(self, subject) def data_path( self, subject, path=None, force_update=False, update_path=None, verbose=None ): + return _bi_data_path(self, subject, path, force_update, update_path, verbose) - if subject not in self.subject_list: - raise (ValueError("Invalid subject number")) - # check if has the .zip - url = "{:s}subject{:d}.zip".format(BI2013a_URL, subject) - path_zip = dl.data_dl(url, "BRAININVADERS") - path_folder = path_zip.strip("subject{:d}.zip".format(subject)) +class bi2014b(BaseDataset): + """P300 dataset bi2014b from a "Brain Invaders" experiment - # check if has to unzip - if not (os.path.isdir(path_folder + "subject{:d}".format(subject))): - print("unzip", path_zip) - zip_ref = zipfile.ZipFile(path_zip, "r") - zip_ref.extractall(path_folder) + **Dataset Description** - # filter the data regarding the experimental conditions - meta_file = os.path.join("subject{:d}".format(subject), "meta.yml") - meta_path = path_folder + meta_file - with open(meta_path, "r") as stream: - meta = yaml.load(stream, Loader=yaml.FullLoader) - conditions = [] - if self.adaptive: - conditions = conditions + ["adaptive"] - if self.nonadaptive: - conditions = conditions + ["nonadaptive"] - types = [] - if self.training: - types = types + ["training"] - if self.online: - types = types + ["online"] - filenames = [] - for run in meta["runs"]: - run_condition = run["experimental_condition"] - run_type = run["type"] - if (run_condition in conditions) and (run_type in types): - filenames = filenames + [run["filename"]] + This dataset contains electroencephalographic (EEG) recordings of 38 subjects playing in + pair (19 pairs) to the multi-user version of a visual P300-based Brain-Computer Interface (BCI) + named Brain Invaders. The interface uses the oddball paradigm on a grid of 36 symbols (1 Target, + 35 Non-Target) that are flashed pseudo-randomly to elicit a P300 response, an evoked-potential + appearing about 300ms after stimulation onset. EEG data were recorded using 32 active wet + electrodes per subjects (total: 64 electrodes) during three randomized conditions + (Solo1, Solo2, Collaboration). The experiment took place at GIPSA-lab, Grenoble, France, in 2014. + A full description of the experiment is available at [1]_. The ID of this dataset is bi2014b. - # list the filepaths for this subject - subject_paths = [] - for filename in filenames: - subject_paths = subject_paths + glob.glob( - os.path.join( - path_folder, "subject{:d}".format(subject), "Session*", filename - ) - ) # noqa - return subject_paths + **Authors** + + Investigators: Eng. Louis Korczowski, B. Sc. Ekaterina Ostaschenko + Technical Support: Eng. Anton Andreev, Eng. Grégoire Cattan, Eng. Pedro. L. C. Rodrigues, + M. Sc. Violette Gautheret + Scientific Supervisor: Ph.D. Marco Congedo + + Notes + ----- + .. versionadded:: 0.4.6 + + References + ---------- + + .. [1] Korczowski, L., Ostaschenko, E., Andreev, A., Cattan, G., Rodrigues, P. L. C., + Gautheret, V., & Congedo, M. (2019). Brain Invaders Solo versus Collaboration: + Multi-User P300-Based Brain-Computer Interface Dataset (bi2014b). + https://hal.archives-ouvertes.fr/hal-02173958 + """ + + def __init__(self): + super().__init__( + subjects=list(range(1, 38)), + sessions_per_subject=1, + events=dict(Target=2, NonTarget=1), + code="Brain Invaders 2014b", + interval=[0, 1], + paradigm="p300", + doi="https://doi.org/10.5281/zenodo.3267301", + ) + + def _get_single_subject_data(self, subject): + """return data for a single subject""" + return _bi_get_subject_data(self, subject) + + def data_path( + self, subject, path=None, force_update=False, update_path=None, verbose=None + ): + return _bi_data_path(self, subject, path, force_update, update_path, verbose) + + +class bi2015a(BaseDataset): + """P300 dataset bi2015a from a "Brain Invaders" experiment + + **Dataset Description** + + This dataset contains electroencephalographic (EEG) recordings + of 43 subjects playing to a visual P300 Brain-Computer Interface (BCI) + videogame named Brain Invaders. The interface uses the oddball paradigm + on a grid of 36 symbols (1 Target, 35 Non-Target) that are flashed + pseudo-randomly to elicit the P300 response. EEG data were recorded using + 32 active wet electrodes with three conditions: flash duration 50ms, 80ms + or 110ms. The experiment took place at GIPSA-lab, Grenoble, France, in 2015. + A full description of the experiment is available at [1]_. The ID of this + dataset is bi2015a. + + **Authors** + + Investigators: Eng. Louis Korczowski, B. Sc. Martine Cederhout + Technical Support: Eng. Anton Andreev, Eng. Grégoire Cattan, Eng. Pedro. L. C. Rodrigues, + M. Sc. Violette Gautheret + Scientific Supervisor: Ph.D. Marco Congedo + + Notes + ----- + .. versionadded:: 0.4.6 + + References + ---------- + + .. [1] Korczowski, L., Cederhout, M., Andreev, A., Cattan, G., Rodrigues, P. L. C., + Gautheret, V., & Congedo, M. (2019). Brain Invaders calibration-less P300-based + BCI with modulation of flash duration Dataset (bi2015a) + https://hal.archives-ouvertes.fr/hal-02172347 + """ + + def __init__(self): + super().__init__( + subjects=list(range(1, 44)), + sessions_per_subject=3, + events=dict(Target=2, NonTarget=1), + code="Brain Invaders 2015a", + interval=[0, 1], + paradigm="p300", + doi="https://doi.org/10.5281/zenodo.3266929", + ) + + def _get_single_subject_data(self, subject): + """return data for a single subject""" + return _bi_get_subject_data(self, subject) + + def data_path( + self, subject, path=None, force_update=False, update_path=None, verbose=None + ): + return _bi_data_path(self, subject, path, force_update, update_path, verbose) + + +class bi2015b(BaseDataset): + """P300 dataset bi2015b from a "Brain Invaders" experiment + + **Dataset Description** + + This dataset contains electroencephalographic (EEG) recordings + of 44 subjects playing in pair to the multi-user version of a visual + P300 Brain-Computer Interface (BCI) named Brain Invaders. The interface + uses the oddball paradigm on a grid of 36 symbols (1 or 2 Target, + 35 or 34 Non-Target) that are flashed pseudo-randomly to elicit the + P300 response. EEG data were recorded using 32 active wet electrodes + per subjects (total: 64 electrodes) during four randomised conditions + (Cooperation 1-Target, Cooperation 2-Targets, Competition 1-Target, + Competition 2-Targets). The experiment took place at GIPSA-lab, Grenoble, + France, in 2015. A full description of the experiment is available at + A full description of the experiment is available at [1]_. The ID of this + dataset is bi2015a. + + **Authors** + + Investigators: Eng. Louis Korczowski, B. Sc. Martine Cederhout + Technical Support: Eng. Anton Andreev, Eng. Grégoire Cattan, Eng. Pedro. L. C. Rodrigues, + M. Sc. Violette Gautheret + Scientific Supervisor: Ph.D. Marco Congedo + + Notes + ----- + .. versionadded:: 0.4.6 + + References + ---------- + + .. [1] Korczowski, L., Cederhout, M., Andreev, A., Cattan, G., Rodrigues, P. L. C., + Gautheret, V., & Congedo, M. (2019). Brain Invaders Cooperative versus Competitive: + Multi-User P300-based Brain-Computer Interface Dataset (bi2015b) + https://hal.archives-ouvertes.fr/hal-02172347 + """ + + def __init__(self): + super().__init__( + subjects=list(range(1, 45)), + sessions_per_subject=1, + events=dict(Target=2, NonTarget=1), + code="Brain Invaders 2015b", + interval=[0, 1], + paradigm="p300", + doi="https://doi.org/10.5281/zenodo.3267307", + ) + + def _get_single_subject_data(self, subject): + """return data for a single subject""" + return _bi_get_subject_data(self, subject) + + def data_path( + self, subject, path=None, force_update=False, update_path=None, verbose=None + ): + return _bi_data_path(self, subject, path, force_update, update_path, verbose) From b1e374ca71ef5c6088eebec8546359f14ca90709 Mon Sep 17 00:00:00 2001 From: Jan Sosulski Date: Wed, 6 Apr 2022 16:52:03 +0200 Subject: [PATCH 16/19] Update README.md (#284) Alternatively remove this entry. Although I rely on it being there :) Co-authored-by: Sylvain Chevallier --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 119239a35..54b638653 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,7 @@ one of the sections below, or just scroll down to find out more. - [Supported datasets](#supported-datasets) - [Who are we?](#who-are-we) - [Get in touch](#contact-us) -- [Documentation](#documentation) +- [Documentation][link_moabb_docs] - [Architecture and main concepts](#architecture-and-main-concepts) - [Citing MOABB and related publications](#citing-moabb-and-related-publications) From 5b384b2428ebeab2521977cbc8fde384558aa848 Mon Sep 17 00:00:00 2001 From: Jan Sosulski Date: Wed, 6 Apr 2022 17:12:01 +0200 Subject: [PATCH 17/19] Generalize default path for erp visualization (#279) * Generalize default path for erp visualization * Update scripts/data_visualization_p300.py * keep os.path instead of Path * correct import for isort Co-authored-by: Sylvain Chevallier --- scripts/data_visualization_p300.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/scripts/data_visualization_p300.py b/scripts/data_visualization_p300.py index 3c06d5a1b..46ce90b75 100644 --- a/scripts/data_visualization_p300.py +++ b/scripts/data_visualization_p300.py @@ -14,8 +14,9 @@ # Authors: Jan Sosulski # # License: BSD (3-clause) +import os +import os.path as osp import warnings -from pathlib import Path import matplotlib import mne @@ -142,7 +143,7 @@ def epo_summary(epos): return summary, info_str -FIGURES_PATH = Path("/home/jan/bci_data/figures/moabb_erps") +FIGURES_PATH = osp.join(osp.expanduser("~"), "moabb_figures", "erps") # Changing this to False re-generates all plots even if they exist. Use with caution. cache_plots = True @@ -180,11 +181,11 @@ def epo_summary(epos): print(f"Processing dataset: {dset_name}") - data_path = FIGURES_PATH / dset_name # Path of the dataset folder - Path(data_path).mkdir(parents=True, exist_ok=True) + data_path = osp.join(FIGURES_PATH, dset_name) # path of the dataset folder + os.makedirs(data_path, exist_ok=True) all_subjects_cached = True for subject in dset.subject_list: - subject_path = data_path / f"subject_{subject}" + subject_path = osp.join(data_path, f"subject_{subject}") if cache_plots and subject_path.exists(): continue all_subjects_cached = False From 451dcb8422a90a714dce0adbe8c2636722e3b50a Mon Sep 17 00:00:00 2001 From: Jan Sosulski Date: Wed, 6 Apr 2022 17:24:18 +0200 Subject: [PATCH 18/19] Elicit warning if lambda functions are used (#278) * added tests + elicit warning if lambda functions are used as part of the classifier * Update moabb/tests/evaluations.py * Update moabb/tests/evaluations.py * lint code * correct windows test skip Co-authored-by: Sylvain Chevallier --- moabb/analysis/results.py | 10 ++++++++++ moabb/tests/evaluations.py | 37 +++++++++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+) diff --git a/moabb/analysis/results.py b/moabb/analysis/results.py index 03f378dc7..eb1178654 100644 --- a/moabb/analysis/results.py +++ b/moabb/analysis/results.py @@ -2,6 +2,7 @@ import os import os.path as osp import re +import warnings from datetime import datetime import h5py @@ -17,6 +18,15 @@ def get_string_rep(obj): str_repr = repr(obj.get_params()) else: str_repr = repr(obj) + if " at " in str_repr: + warnings.warn( + "You are probably using a classifier with a lambda function" + " as an attribute. Lambda functions can only be identified" + " by memory address which MOABB does not consider. To avoid" + " issues you can use named functions defined using the def" + " keyword instead.", + RuntimeWarning, + ) str_no_addresses = re.sub("0x[a-z0-9]*", "0x__", str_repr) return str_no_addresses.replace("\n", "").encode("utf8") diff --git a/moabb/tests/evaluations.py b/moabb/tests/evaluations.py index d42a48a1d..e4bc6648f 100644 --- a/moabb/tests/evaluations.py +++ b/moabb/tests/evaluations.py @@ -1,14 +1,18 @@ import os import os.path as osp +import platform import unittest +import warnings from collections import OrderedDict import numpy as np +import sklearn.base from pyriemann.estimation import Covariances from pyriemann.spatialfilters import CSP from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA from sklearn.pipeline import make_pipeline +from moabb.analysis.results import get_string_rep from moabb.datasets.fake import FakeDataset from moabb.evaluations import evaluations as ev from moabb.paradigms.motor_imagery import FakeImageryParadigm @@ -21,6 +25,13 @@ os.makedirs(osp.join(osp.expanduser("~"), "mne_data")) +class DummyClassifier(sklearn.base.BaseEstimator): + __slots__ = "kernel" + + def __init__(self, kernel): + self.kernel = kernel + + class Test_WithinSess(unittest.TestCase): """This is actually integration testing but I don't know how to do this better. A paradigm implements pre-processing so it needs files to run MNE @@ -177,5 +188,31 @@ def test_compatible_dataset(self): self.assertTrue(self.eval.is_valid(dataset=ds)) +class Test_LambdaWarning(Test_WithinSess): + def setUp(self): + self.eval = ev.WithinSessionEvaluation( + paradigm=FakeImageryParadigm(), datasets=[dataset] + ) + + def test_lambda_warning(self): + def explicit_kernel(x): + return x ** 3 + + c1 = DummyClassifier(kernel=lambda x: x ** 2) + c2 = DummyClassifier(kernel=lambda x: 5 * x) + + c3 = DummyClassifier(kernel=explicit_kernel) + + self.assertFalse(repr(c1) == repr(c2)) + if platform.system() != "Windows": + with self.assertWarns(RuntimeWarning): + self.assertTrue(get_string_rep(c1) == get_string_rep(c2)) + + # I do not know an elegant way to check for no warnings + with warnings.catch_warnings(record=True) as w: + get_string_rep(c3) + self.assertTrue(len(w) == 0) + + if __name__ == "__main__": unittest.main() From 3fb45aa8142bb1202fee8de4ecc3a5267e5af8af Mon Sep 17 00:00:00 2001 From: Sylvain Chevallier Date: Thu, 7 Apr 2022 12:08:21 +0200 Subject: [PATCH 19/19] update to 0.4.6 (#286) --- docs/source/whats_new.rst | 21 +++++++++++++++++++-- moabb/__init__.py | 2 +- pyproject.toml | 2 +- 3 files changed, 21 insertions(+), 4 deletions(-) diff --git a/docs/source/whats_new.rst b/docs/source/whats_new.rst index ad255dca1..c29d7527d 100644 --- a/docs/source/whats_new.rst +++ b/docs/source/whats_new.rst @@ -30,13 +30,30 @@ API changes - None - -Version - 0.4.5 (Stable - PyPi) +Version - 0.4.6 (Stable - PyPi) --------------------------------- Enhancements ~~~~~~~~~~~~ +- Add P300 BrainInvaders datasets (:gh:`283` by `Sylvain Chevallier`_) +- Add explicit warning when lambda function are used to parametrize pipelines (:gh:`278` by `Jan Sosulski`_) + + +Bugs +~~~~ + +- Correct default path for ERP visualization (:gh:`279` by `Jan Sosulski`_) +- Correct documentation (:gh:`282` and :gh:`284` by `Jan Sosulski`_) + + + +Version - 0.4.5 +--------------- + +Enhancements +~~~~~~~~~~~~ + - Progress bars, pooch, tqdm (:gh:`258` by `Divyesh Narayanan`_ and `Sylvain Chevallier`_) - Adding test and example for set_download_dir (:gh:`249` by `Divyesh Narayanan`_) - Update to newer version of Schirrmeister2017 dataset (:gh:`265` by `Robin Schirrmeister`_) diff --git a/moabb/__init__.py b/moabb/__init__.py index d1ad9a8de..691a91f11 100644 --- a/moabb/__init__.py +++ b/moabb/__init__.py @@ -1,4 +1,4 @@ # flake8: noqa -__version__ = "0.4.5" +__version__ = "0.4.6" from moabb.utils import set_log_level diff --git a/pyproject.toml b/pyproject.toml index e8f433c71..6c93b4016 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "moabb" -version = "0.4.5" +version = "0.4.6" description = "Mother of All BCI Benchmarks" authors = ["Alexandre Barachant", "Vinay Jayaram"] maintainers = ["Sylvain Chevallier "]