diff --git a/.github/workflows/main.yml b/.github/workflows/circle_artifacts.yml similarity index 92% rename from .github/workflows/main.yml rename to .github/workflows/circle_artifacts.yml index 7153fe66a06..b4b246e595f 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/circle_artifacts.yml @@ -1,7 +1,7 @@ on: [status] jobs: circleci_artifacts_redirector_job: - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 name: Run CircleCI artifacts redirector steps: - name: GitHub Action step diff --git a/.github/workflows/codespell_and_flake.yml b/.github/workflows/codespell_and_flake.yml index d3a38a82ac0..82159f70fab 100644 --- a/.github/workflows/codespell_and_flake.yml +++ b/.github/workflows/codespell_and_flake.yml @@ -10,7 +10,7 @@ on: jobs: style: if: "github.repository == 'mne-tools/mne-python' && !contains(github.event.head_commit.message, '[ci skip]') && !contains(github.event.head_commit.message, '[skip ci]') && !contains(github.event.head_commit.message, '[skip github]')" - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 env: CODESPELL_DIRS: 'mne/ doc/ tutorials/ examples/' CODESPELL_SKIPS: 'doc/auto_*,*.fif,*.eve,*.gz,*.tgz,*.zip,*.mat,*.stc,*.label,*.w,*.bz2,*.annot,*.sulc,*.log,*.local-copy,*.orig_avg,*.inflated_avg,*.gii,*.pyc,*.doctree,*.pickle,*.inv,*.png,*.edf,*.touch,*.thickness,*.nofix,*.volume,*.defect_borders,*.mgh,lh.*,rh.*,COR-*,FreeSurferColorLUT.txt,*.examples,.xdebug_mris_calc,bad.segments,BadChannels,*.hist,empty_file,*.orig,*.js,*.map,*.ipynb,searchindex.dat,install_mne_c.rst,plot_*.rst,*.rst.txt,c_EULA.rst*,*.html,gdf_encodes.txt,*.svg' diff --git a/.github/workflows/compat_minimal.yml b/.github/workflows/compat_minimal.yml index 8e7eafc5c29..f81220b8482 100644 --- a/.github/workflows/compat_minimal.yml +++ b/.github/workflows/compat_minimal.yml @@ -12,7 +12,7 @@ jobs: job: if: "github.repository == 'mne-tools/mne-python' && !contains(github.event.head_commit.message, '[ci skip]') && !contains(github.event.head_commit.message, '[skip ci]') && !contains(github.event.head_commit.message, '[skip github]')" name: 'py3.7' - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 defaults: run: shell: bash diff --git a/.github/workflows/compat_old.yml b/.github/workflows/compat_old.yml index 04cbf08d00d..91a1493c620 100644 --- a/.github/workflows/compat_old.yml +++ b/.github/workflows/compat_old.yml @@ -12,7 +12,7 @@ jobs: job: if: "github.repository == 'mne-tools/mne-python' && !contains(github.event.head_commit.message, '[ci skip]') && !contains(github.event.head_commit.message, '[skip ci]') && !contains(github.event.head_commit.message, '[skip github]')" name: 'py3.6' - runs-on: ubuntu-latest + runs-on: ubuntu-18.04 defaults: run: shell: bash diff --git a/.github/workflows/linux_conda.yml b/.github/workflows/linux_conda.yml index 3123aa67d8c..fcc5919cbce 100644 --- a/.github/workflows/linux_conda.yml +++ b/.github/workflows/linux_conda.yml @@ -12,7 +12,7 @@ jobs: job: if: "github.repository == 'mne-tools/mne-python' && !contains(github.event.head_commit.message, '[ci skip]') && !contains(github.event.head_commit.message, '[skip ci]') && !contains(github.event.head_commit.message, '[skip github]')" name: 'py3.8' - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 defaults: run: shell: bash @@ -20,16 +20,14 @@ jobs: CONDA_ENV: 'environment.yml' DISPLAY: ':99.0' MNE_LOGGING_LEVEL: 'warning' - OPENBLAS_NUM_THREADS: '1' + MKL_NUM_THREADS: '1' PYTHONUNBUFFERED: '1' PYTHON_VERSION: '3.8' steps: - uses: actions/checkout@v2 with: fetch-depth: 0 - - run: | - sudo apt-get install -y libxkbcommon-x11-0 libxcb-icccm4 libxcb-image0 libxcb-keysyms1 libxcb-randr0 libxcb-render-util0 libxcb-xinerama0 libxcb-xfixes0 libopengl0 - /sbin/start-stop-daemon --start --quiet --pidfile /tmp/custom_xvfb_99.pid --make-pidfile --background --exec /usr/bin/Xvfb -- :99 -screen 0 1400x900x24 -ac +extension GLX +render -noreset; + - run: ./tools/setup_xvfb.sh name: 'Setup xvfb' - uses: conda-incubator/setup-miniconda@v2 with: diff --git a/.github/workflows/linux_pip.yml b/.github/workflows/linux_pip.yml index f35345c5a02..07cfb18c0b8 100644 --- a/.github/workflows/linux_pip.yml +++ b/.github/workflows/linux_pip.yml @@ -11,8 +11,8 @@ jobs: # PIP + non-default stim channel + log level info job: if: "github.repository == 'mne-tools/mne-python' && !contains(github.event.head_commit.message, '[ci skip]') && !contains(github.event.head_commit.message, '[skip ci]') && !contains(github.event.head_commit.message, '[skip github]')" - name: 'py3.8' - runs-on: ubuntu-latest + name: 'py3.9' + runs-on: ubuntu-20.04 defaults: run: shell: bash @@ -22,14 +22,12 @@ jobs: MNE_STIM_CHANNEL: 'STI101' OPENBLAS_NUM_THREADS: '1' PYTHONUNBUFFERED: '1' - PYTHON_VERSION: '3.8' + PYTHON_VERSION: '3.9' steps: - uses: actions/checkout@v2 with: fetch-depth: 0 - - run: | - sudo apt-get install -y libxkbcommon-x11-0 libxcb-icccm4 libxcb-image0 libxcb-keysyms1 libxcb-randr0 libxcb-render-util0 libxcb-xinerama0 libxcb-xfixes0 libopengl0 - /sbin/start-stop-daemon --start --quiet --pidfile /tmp/custom_xvfb_99.pid --make-pidfile --background --exec /usr/bin/Xvfb -- :99 -screen 0 1400x900x24 -ac +extension GLX +render -noreset; + - run: ./tools/setup_xvfb.sh name: 'Setup xvfb' - uses: actions/setup-python@v2 with: diff --git a/README.rst b/README.rst index 537342019a2..ee494dcbb34 100644 --- a/README.rst +++ b/README.rst @@ -1,11 +1,14 @@ .. -*- mode: rst -*- -|Travis|_ |Azure|_ |Circle|_ |Codecov|_ |PyPI|_ |conda-forge|_ |Zenodo|_ +|GH-Linux|_ |GH-macOS|_ |Azure|_ |Circle|_ |Codecov|_ |PyPI|_ |conda-forge|_ |Zenodo|_ |MNE|_ -.. |Travis| image:: https://api.travis-ci.org/mne-tools/mne-python.svg?branch=master -.. _Travis: https://travis-ci.org/mne-tools/mne-python/branches +.. |GH-Linux| image:: https://github.com/mne-tools/mne-python/workflows/linux%20/%20conda/badge.svg?branch=master +.. _GH-Linux: https://github.com/mne-tools/mne-python/actions?query=branch:master+event:push + +.. |GH-macOS| image:: https://github.com/mne-tools/mne-python/workflows/macos%20/%20conda/badge.svg?branch=master +.. _GH-macOS: https://github.com/mne-tools/mne-python/actions?query=branch:master+event:push .. |Azure| image:: https://dev.azure.com/mne-tools/mne-python/_apis/build/status/mne-tools.mne-python?branchName=master .. _Azure: https://dev.azure.com/mne-tools/mne-python/_build/latest?definitionId=1&branchName=master diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 8c06ca8ac03..19cb1192334 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -174,7 +174,6 @@ stages: OPENBLAS_NUM_THREADS: 1 PYTHONUNBUFFERED: 1 PYTHONIOENCODING: 'utf-8' - MKL_NUM_THREADS: 1 AZURE_CI_WINDOWS: 'true' PYTHON_ARCH: 'x64' strategy: diff --git a/doc/_includes/data_formats.rst b/doc/_includes/data_formats.rst index ddc1d078387..15be2cf8860 100644 --- a/doc/_includes/data_formats.rst +++ b/doc/_includes/data_formats.rst @@ -72,6 +72,7 @@ EEG :ref:`Persyst ` .lay :func:`mn NIRS :ref:`NIRx ` directory :func:`mne.io.read_raw_nirx` +NIRS :ref:`BOXY ` directory :func:`mne.io.read_raw_boxy` ============ ============================================ ========= =================================== More details are provided in the tutorials in the :ref:`tut-data-formats` diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 5b79925a788..156cf30c015 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -20,11 +20,15 @@ Current (0.22.dev0) .. |Victoria Peterson| replace:: **Victoria Peterson** +.. |Jonathan Kuziek| replace:: **Jonathan Kuziek** + Enhancements ~~~~~~~~~~~~ - Add :class:`mne.decoding.SSD` for spatial filtering with spatio-spectral-decomposition (:gh:`7070` **by new contributor** |Victoria Peterson|_ and `Denis Engemann`_) +- Add reader for optical imaging data recorded using ISS Imgagent I/II hardware and BOXY recording software in :func:`mne.io.read_raw_boxy` (:gh:`7717` **by new contributor** |Jonathan Kuziek|_ and `Kyle Mathewson`_) + - Add options to use labels in :func:`mne.minimum_norm.get_point_spread` and :func:`mne.minimum_norm.get_cross_talk` (:gh:`8275` by `Olaf Hauk`_) - Update ``surfaces`` argument in :func:`mne.viz.plot_alignment` to allow dict for transparency values, and set default for sEEG data to have transparency (:gh:`8445` by `Keith Doelling`_) @@ -35,6 +39,8 @@ Enhancements - Add ``proj`` argument to :func:`mne.make_fixed_length_epochs` (:gh:`8351` by `Eric Larson`_) +- Add :func:`mne.preprocessing.realign_raw` to realign simultaneous raw recordings in the presence of clock drift (:gh:`8539` by `Eric Larson`_) + - Reduce memory usage of volume source spaces (:gh:`8379` by `Eric Larson`_) - Speed up heavy use of :meth:`mne.SourceMorph.apply` for volumetric source spaces by use of the method :meth:`mne.SourceMorph.compute_vol_morph_mat` (:gh:`8366` by `Eric Larson`_) diff --git a/doc/changes/names.inc b/doc/changes/names.inc index 8f602c42f4f..7f04d5677eb 100644 --- a/doc/changes/names.inc +++ b/doc/changes/names.inc @@ -318,6 +318,8 @@ .. _Rahul Nadkarni: https://github.com/rahuln +.. _Jonathan Kuziek: https://github.com/kuziekj + .. _Lau Møller Andersen: https://github.com/ualsbombe .. _Martin Schulz: https://github.com/marsipu diff --git a/doc/conf.py b/doc/conf.py index 5a143e25563..df315811271 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -677,7 +677,7 @@ def reset_warnings(gallery_conf, fname): 'n_dipoles_fwd', 'n_picks_ref', 'n_coords', # Undocumented (on purpose) 'RawKIT', 'RawEximia', 'RawEGI', 'RawEEGLAB', 'RawEDF', 'RawCTF', 'RawBTi', - 'RawBrainVision', 'RawCurry', 'RawNIRX', 'RawGDF', 'RawSNIRF', + 'RawBrainVision', 'RawCurry', 'RawNIRX', 'RawGDF', 'RawSNIRF', 'RawBOXY', 'RawPersyst', 'RawNihon', # sklearn subclasses 'mapping', 'to', 'any', diff --git a/doc/python_reference.rst b/doc/python_reference.rst index 5782ef73c9d..d01876d0dc2 100644 --- a/doc/python_reference.rst +++ b/doc/python_reference.rst @@ -72,6 +72,7 @@ Reading raw data read_raw_fif read_raw_eximia read_raw_fieldtrip + read_raw_boxy read_raw_persyst read_raw_nihon @@ -380,6 +381,7 @@ Projections: oversampled_temporal_projection peak_finder read_ica + realign_raw regress_artifact corrmap read_ica_eeglab diff --git a/environment.yml b/environment.yml index 87f26985877..39ea020c4a9 100644 --- a/environment.yml +++ b/environment.yml @@ -1,6 +1,6 @@ name: mne channels: -- defaults +- conda-forge dependencies: - python>=3.8 - pip @@ -24,18 +24,15 @@ dependencies: - imageio - tqdm - spyder-kernels -- pip: - - mne - - imageio-ffmpeg>=0.4.1 - - vtk>=9.0.1 - - pyvista>=0.24 - - pyvistaqt>=0.2.0 - - mayavi - - PySurfer[save_movie] - - dipy --only-binary dipy - - nibabel - - nilearn - - neo - - python-picard - - PyQt5>=5.10,<5.14; platform_system == "Darwin" - - PyQt5>=5.10; platform_system != "Darwin" +- imageio-ffmpeg>=0.4.1 +- vtk>=9.0.1 +- pyvista>=0.24 +- pyvistaqt>=0.2.0 +- mayavi +- PySurfer +- dipy +- nibabel +- nilearn +- python-picard +- pyqt +- mne diff --git a/examples/visualization/plot_eeglab_head_sphere.py b/examples/visualization/plot_eeglab_head_sphere.py index e1b2896eb15..488c14edd57 100644 --- a/examples/visualization/plot_eeglab_head_sphere.py +++ b/examples/visualization/plot_eeglab_head_sphere.py @@ -99,7 +99,6 @@ fake_evoked.plot_sensors(sphere=(x, y, z, radius), axes=ax[1], show=False) # add titles -fig.texts[0].remove() ax[0].set_title('MNE channel projection', fontweight='bold') ax[1].set_title('EEGLAB channel projection', fontweight='bold') diff --git a/mne/beamformer/tests/test_lcmv.py b/mne/beamformer/tests/test_lcmv.py index 576b97158c8..835f494228d 100644 --- a/mne/beamformer/tests/test_lcmv.py +++ b/mne/beamformer/tests/test_lcmv.py @@ -648,7 +648,7 @@ def test_lcmv_reg_proj(proj, weight_norm): (0.05, 'unit-noise-gain', False, None, 83, 86), (0.05, 'unit-noise-gain', False, 0.8, 83, 86), # depth same for wn != None # no reg - (0.00, 'unit-noise-gain', True, None, 45, 99), # TODO: Still not stable + (0.00, 'unit-noise-gain', True, None, 35, 99), # TODO: Still not stable ]) def test_localization_bias_fixed(bias_params_fixed, reg, weight_norm, use_cov, depth, lower, upper): @@ -687,7 +687,7 @@ def test_localization_bias_fixed(bias_params_fixed, reg, weight_norm, use_cov, (0.00, 'vector', 'unit-noise-gain-invariant', True, None, 50, 65), (0.00, 'vector', 'unit-noise-gain', True, None, 42, 65), (0.00, 'vector', 'nai', True, None, 42, 65), - (0.00, 'max-power', None, True, None, 15, 19), + (0.00, 'max-power', None, True, None, 13, 19), (0.00, 'max-power', 'unit-noise-gain-invariant', True, None, 43, 50), (0.00, 'max-power', 'unit-noise-gain', True, None, 43, 50), (0.00, 'max-power', 'nai', True, None, 43, 50), diff --git a/mne/channels/channels.py b/mne/channels/channels.py index a4ab98e03ee..cb187842e3d 100644 --- a/mne/channels/channels.py +++ b/mne/channels/channels.py @@ -27,7 +27,7 @@ from ..io.pick import (channel_type, pick_info, pick_types, _picks_by_type, _check_excludes_includes, _contains_ch_type, channel_indices_by_type, pick_channels, _picks_to_idx, - _get_channel_types) + _get_channel_types, get_channel_type_constants) from ..io.write import DATE_NONE from ..io._digitization import _get_data_as_dict_from_dig @@ -80,7 +80,8 @@ def _get_ch_type(inst, ch_type, allow_ref_meg=False): """ if ch_type is None: allowed_types = ['mag', 'grad', 'planar1', 'planar2', 'eeg', 'csd', - 'fnirs_cw_amplitude', 'fnirs_od', 'hbo', 'hbr', + 'fnirs_cw_amplitude', 'fnirs_fd_ac_amplitude', + 'fnirs_fd_phase', 'fnirs_od', 'hbo', 'hbr', 'ecog', 'seeg'] allowed_types += ['ref_meg'] if allow_ref_meg else [] for type_ in allowed_types: @@ -275,41 +276,11 @@ def get_montage(self): return montage -# XXX Eventually de-duplicate with _kind_dict of mne/io/meas_info.py -_human2fiff = {'ecg': FIFF.FIFFV_ECG_CH, - 'eeg': FIFF.FIFFV_EEG_CH, - 'emg': FIFF.FIFFV_EMG_CH, - 'eog': FIFF.FIFFV_EOG_CH, - 'exci': FIFF.FIFFV_EXCI_CH, - 'ias': FIFF.FIFFV_IAS_CH, - 'misc': FIFF.FIFFV_MISC_CH, - 'resp': FIFF.FIFFV_RESP_CH, - 'seeg': FIFF.FIFFV_SEEG_CH, - 'stim': FIFF.FIFFV_STIM_CH, - 'syst': FIFF.FIFFV_SYST_CH, - 'bio': FIFF.FIFFV_BIO_CH, - 'ecog': FIFF.FIFFV_ECOG_CH, - 'fnirs_cw_amplitude': FIFF.FIFFV_FNIRS_CH, - 'fnirs_od': FIFF.FIFFV_FNIRS_CH, - 'hbo': FIFF.FIFFV_FNIRS_CH, - 'hbr': FIFF.FIFFV_FNIRS_CH} -_human2unit = {'ecg': FIFF.FIFF_UNIT_V, - 'eeg': FIFF.FIFF_UNIT_V, - 'emg': FIFF.FIFF_UNIT_V, - 'eog': FIFF.FIFF_UNIT_V, - 'exci': FIFF.FIFF_UNIT_NONE, - 'ias': FIFF.FIFF_UNIT_NONE, - 'misc': FIFF.FIFF_UNIT_V, - 'resp': FIFF.FIFF_UNIT_NONE, - 'seeg': FIFF.FIFF_UNIT_V, - 'stim': FIFF.FIFF_UNIT_NONE, - 'syst': FIFF.FIFF_UNIT_NONE, - 'bio': FIFF.FIFF_UNIT_V, - 'ecog': FIFF.FIFF_UNIT_V, - 'fnirs_cw_amplitude': FIFF.FIFF_UNIT_V, - 'fnirs_od': FIFF.FIFF_UNIT_NONE, - 'hbo': FIFF.FIFF_UNIT_MOL, - 'hbr': FIFF.FIFF_UNIT_MOL} +channel_type_constants = get_channel_type_constants() +_human2fiff = {k: v.get('kind', FIFF.FIFFV_COIL_NONE) for k, v in + channel_type_constants.items()} +_human2unit = {k: v.get('unit', FIFF.FIFF_UNIT_NONE) for k, v in + channel_type_constants.items()} _unit2human = {FIFF.FIFF_UNIT_V: 'V', FIFF.FIFF_UNIT_T: 'T', FIFF.FIFF_UNIT_T_M: 'T/m', @@ -440,7 +411,8 @@ def set_channel_types(self, mapping, verbose=None): The following sensor types are accepted: ecg, eeg, emg, eog, exci, ias, misc, resp, seeg, stim, syst, ecog, - hbo, hbr, fnirs_cw_amplitude, fnirs_od + hbo, hbr, fnirs_cw_amplitude, fnirs_fd_ac_amplitude, + fnirs_fd_phase, fnirs_od .. versionadded:: 0.9.0 """ @@ -482,6 +454,10 @@ def set_channel_types(self, mapping, verbose=None): coil_type = FIFF.FIFFV_COIL_FNIRS_HBR elif ch_type == 'fnirs_cw_amplitude': coil_type = FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE + elif ch_type == 'fnirs_fd_ac_amplitude': + coil_type = FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE + elif ch_type == 'fnirs_fd_phase': + coil_type = FIFF.FIFFV_COIL_FNIRS_FD_PHASE elif ch_type == 'fnirs_od': coil_type = FIFF.FIFFV_COIL_FNIRS_OD else: diff --git a/mne/channels/layout.py b/mne/channels/layout.py index a69d9e5a586..6a20cbf9ed9 100644 --- a/mne/channels/layout.py +++ b/mne/channels/layout.py @@ -17,7 +17,7 @@ import numpy as np from ..transforms import _pol_to_cart, _cart_to_sph -from ..io.pick import pick_types, _picks_to_idx +from ..io.pick import pick_types, _picks_to_idx, _FNIRS_CH_TYPES_SPLIT from ..io.constants import FIFF from ..io.meas_info import Info from ..utils import (_clean_names, warn, _check_ch_locs, fill_doc, @@ -917,7 +917,7 @@ def _merge_ch_data(data, ch_type, names, method='rms'): if ch_type == 'grad': data = _merge_grad_data(data, method) else: - assert ch_type in ('hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_od') + assert ch_type in _FNIRS_CH_TYPES_SPLIT data, names = _merge_nirs_data(data, names) return data, names diff --git a/mne/channels/tests/test_channels.py b/mne/channels/tests/test_channels.py index a2a2edbfaf4..06dec3155ec 100644 --- a/mne/channels/tests/test_channels.py +++ b/mne/channels/tests/test_channels.py @@ -114,21 +114,22 @@ def test_set_channel_types(): # Error Tests # Test channel name exists in ch_names mapping = {'EEG 160': 'EEG060'} - pytest.raises(ValueError, raw.set_channel_types, mapping) + with pytest.raises(ValueError, match=r"name \(EEG 160\) doesn't exist"): + raw.set_channel_types(mapping) # Test change to illegal channel type mapping = {'EOG 061': 'xxx'} - pytest.raises(ValueError, raw.set_channel_types, mapping) - # Test changing type if in proj (avg eeg ref here) + with pytest.raises(ValueError, match='cannot change to this channel type'): + raw.set_channel_types(mapping) + # Test changing type if in proj mapping = {'EEG 058': 'ecog', 'EEG 059': 'ecg', 'EEG 060': 'eog', 'EOG 061': 'seeg', 'MEG 2441': 'eeg', 'MEG 2443': 'eeg', 'MEG 2442': 'hbo'} - pytest.raises(RuntimeError, raw.set_channel_types, mapping) - # Test type change raw2 = read_raw_fif(raw_fname) raw2.info['bads'] = ['EEG 059', 'EEG 060', 'EOG 061'] - pytest.raises(RuntimeError, raw2.set_channel_types, mapping) # has prj + with pytest.raises(RuntimeError, match='type .* in projector "PCA-v1"'): + raw2.set_channel_types(mapping) # has prj raw2.add_proj([], remove_existing=True) - with pytest.warns(RuntimeWarning, match='The unit for channel'): + with pytest.warns(RuntimeWarning, match='unit for channel.* has changed'): raw2 = raw2.set_channel_types(mapping) info = raw2.info assert info['chs'][372]['ch_name'] == 'EEG 058' diff --git a/mne/cov.py b/mne/cov.py index 2b96cd40b1f..72e33bec09f 100644 --- a/mne/cov.py +++ b/mne/cov.py @@ -1253,7 +1253,8 @@ class _RegCovariance(BaseEstimator): """Aux class.""" def __init__(self, info, grad=0.1, mag=0.1, eeg=0.1, seeg=0.1, ecog=0.1, - hbo=0.1, hbr=0.1, fnirs_cw_amplitude=0.1, fnirs_od=0.1, + hbo=0.1, hbr=0.1, fnirs_cw_amplitude=0.1, + fnirs_fd_ac_amplitude=0.1, fnirs_fd_phase=0.1, fnirs_od=0.1, csd=0.1, store_precision=False, assume_centered=False): self.info = info # For sklearn compat, these cannot (easily?) be combined into @@ -1266,6 +1267,8 @@ def __init__(self, info, grad=0.1, mag=0.1, eeg=0.1, seeg=0.1, ecog=0.1, self.hbo = hbo self.hbr = hbr self.fnirs_cw_amplitude = fnirs_cw_amplitude + self.fnirs_fd_ac_amplitude = fnirs_fd_ac_amplitude + self.fnirs_fd_phase = fnirs_fd_phase self.fnirs_od = fnirs_od self.csd = csd self.store_precision = store_precision @@ -1545,7 +1548,8 @@ def _smart_eigh(C, info, rank, scalings=None, projs=None, @verbose def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads', proj=True, seeg=0.1, ecog=0.1, hbo=0.1, hbr=0.1, - fnirs_cw_amplitude=0.1, fnirs_od=0.1, csd=0.1, + fnirs_cw_amplitude=0.1, fnirs_fd_ac_amplitude=0.1, + fnirs_fd_phase=0.1, fnirs_od=0.1, csd=0.1, rank=None, scalings=None, verbose=None): """Regularize noise covariance matrix. @@ -1587,7 +1591,11 @@ def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads', hbr : float (default 0.1) Regularization factor for HBR signals. fnirs_cw_amplitude : float (default 0.1) - Regularization factor for fNIRS raw signals. + Regularization factor for fNIRS CW raw signals. + fnirs_fd_ac_amplitude : float (default 0.1) + Regularization factor for fNIRS FD AC raw signals. + fnirs_fd_phase : float (default 0.1) + Regularization factor for fNIRS raw phase signals. fnirs_od : float (default 0.1) Regularization factor for fNIRS optical density signals. csd : float (default 0.1) @@ -1619,7 +1627,8 @@ def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads', scalings = _handle_default('scalings_cov_rank', scalings) regs = dict(eeg=eeg, seeg=seeg, ecog=ecog, hbo=hbo, hbr=hbr, fnirs_cw_amplitude=fnirs_cw_amplitude, - fnirs_od=fnirs_od, csd=csd) + fnirs_fd_ac_amplitude=fnirs_fd_ac_amplitude, + fnirs_fd_phase=fnirs_fd_phase, fnirs_od=fnirs_od, csd=csd) if exclude is None: raise ValueError('exclude must be a list of strings or "bads"') diff --git a/mne/defaults.py b/mne/defaults.py index 82b9d4a58bd..5b85be312c2 100644 --- a/mne/defaults.py +++ b/mne/defaults.py @@ -11,15 +11,18 @@ ref_meg='steelblue', misc='k', stim='k', resp='k', chpi='k', exci='k', ias='k', syst='k', seeg='saddlebrown', dipole='k', gof='k', bio='k', ecog='k', hbo='#AA3377', hbr='b', - fnirs_cw_amplitude='k', fnirs_od='k', csd='k'), + fnirs_cw_amplitude='k', fnirs_fd_ac_amplitude='k', + fnirs_fd_phase='k', fnirs_od='k', csd='k'), units=dict(mag='fT', grad='fT/cm', eeg='µV', eog='µV', ecg='µV', emg='µV', misc='AU', seeg='mV', dipole='nAm', gof='GOF', bio='µV', ecog='µV', hbo='µM', hbr='µM', ref_meg='fT', - fnirs_cw_amplitude='V', fnirs_od='V', csd='mV/m²'), + fnirs_cw_amplitude='V', fnirs_fd_ac_amplitude='V', + fnirs_fd_phase='rad', fnirs_od='V', csd='mV/m²'), # scalings for the units scalings=dict(mag=1e15, grad=1e13, eeg=1e6, eog=1e6, emg=1e6, ecg=1e6, misc=1.0, seeg=1e3, dipole=1e9, gof=1.0, bio=1e6, ecog=1e6, hbo=1e6, hbr=1e6, ref_meg=1e15, fnirs_cw_amplitude=1.0, + fnirs_fd_ac_amplitude=1.0, fnirs_fd_phase=1., fnirs_od=1.0, csd=1e3), # rough guess for a good plot scalings_plot_raw=dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, @@ -27,6 +30,7 @@ stim=1, resp=1, chpi=1e-4, exci=1, ias=1, syst=1, seeg=1e-4, bio=1e-6, ecog=1e-4, hbo=10e-6, hbr=10e-6, whitened=10., fnirs_cw_amplitude=2e-2, + fnirs_fd_ac_amplitude=2e-2, fnirs_fd_phase=2e-1, fnirs_od=2e-2, csd=200e-4), scalings_cov_rank=dict(mag=1e12, grad=1e11, eeg=1e5, # ~100x scalings seeg=1e1, ecog=1e4, hbo=1e4, hbr=1e4), @@ -39,6 +43,8 @@ dipole='Dipole', ecog='ECoG', hbo='Oxyhemoglobin', ref_meg='Reference Magnetometers', fnirs_cw_amplitude='fNIRS (CW amplitude)', + fnirs_fd_ac_amplitude='fNIRS (FD AC amplitude)', + fnirs_fd_phase='fNIRS (FD phase)', fnirs_od='fNIRS (OD)', hbr='Deoxyhemoglobin', gof='Goodness of fit', csd='Current source density'), mask_params=dict(marker='o', diff --git a/mne/epochs.py b/mne/epochs.py index 4721078227d..d00c7ded4d6 100644 --- a/mne/epochs.py +++ b/mne/epochs.py @@ -408,7 +408,7 @@ def __init__(self, info, data, events, event_id=None, tmin=-0.2, tmax=0.5, f'got {events_type}') if events.ndim != 2 or events.shape[1] != 3: raise ValueError( - 'events must be of shape (N, 3), got {events.shape}') + f'events must be of shape (N, 3), got {events.shape}') events_max = events.max() if events_max > INT32_MAX: raise ValueError( diff --git a/mne/evoked.py b/mne/evoked.py index d98743a1f44..c3527aba76e 100644 --- a/mne/evoked.py +++ b/mne/evoked.py @@ -31,7 +31,7 @@ from .io.open import fiff_open from .io.tag import read_tag from .io.tree import dir_tree_find -from .io.pick import pick_types, _picks_to_idx +from .io.pick import pick_types, _picks_to_idx, _FNIRS_CH_TYPES_SPLIT from .io.meas_info import read_meas_info, write_meas_info from .io.proj import ProjMixin from .io.write import (start_file, start_block, end_file, end_block, @@ -392,9 +392,9 @@ def animate_topomap(self, ch_type=None, times=None, frame_rate=None, ---------- ch_type : str | None Channel type to plot. Accepted data types: 'mag', 'grad', 'eeg', - 'hbo', 'hbr', 'fnirs_od, and 'fnirs_cw_amplitude'. - If None, first available channel type from ('mag', 'grad', 'eeg', - 'hbo', 'hbr', 'fnirs_od, 'fnirs_cw_amplitude') is used. + 'hbo', 'hbr', 'fnirs_cw_amplitude', + 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', and 'fnirs_od'. + If None, first available channel type from the above list is used. Defaults to None. times : array of float | None The time points to plot. If None, 10 evenly spaced samples are @@ -521,7 +521,7 @@ def get_peak(self, ch_type=None, tmin=None, tmax=None, Parameters ---------- - ch_type : 'mag', 'grad', 'eeg', 'seeg', 'ecog', 'hbo', hbr', 'misc', None + ch_type : str | None The channel type to use. Defaults to None. If more than one sensor Type is present in the data the channel type has to be explicitly set. @@ -558,8 +558,8 @@ def get_peak(self, ch_type=None, tmin=None, tmax=None, .. versionadded:: 0.16 """ # noqa: E501 - supported = ('mag', 'grad', 'eeg', 'seeg', 'ecog', 'misc', 'hbo', - 'hbr', 'None', 'fnirs_cw_amplitude', 'fnirs_od') + supported = ('mag', 'grad', 'eeg', 'seeg', 'ecog', 'misc', + 'None') + _FNIRS_CH_TYPES_SPLIT types_used = self.get_channel_types(unique=True, only_data_chs=True) _check_option('ch_type', str(ch_type), supported) @@ -592,7 +592,7 @@ def get_peak(self, ch_type=None, tmin=None, tmax=None, seeg = True elif ch_type == 'ecog': ecog = True - elif ch_type in ('hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_od'): + elif ch_type in _FNIRS_CH_TYPES_SPLIT: fnirs = ch_type if ch_type is not None: diff --git a/mne/io/__init__.py b/mne/io/__init__.py index 2a99ada8e72..a8c58873980 100644 --- a/mne/io/__init__.py +++ b/mne/io/__init__.py @@ -27,6 +27,7 @@ from . import kit from . import nicolet from . import nirx +from . import boxy from . import persyst from . import eeglab from . import pick @@ -47,6 +48,7 @@ from .eeglab import read_raw_eeglab, read_epochs_eeglab from .eximia import read_raw_eximia from .nirx import read_raw_nirx +from .boxy import read_raw_boxy from .snirf import read_raw_snirf from .persyst import read_raw_persyst from .fieldtrip import (read_raw_fieldtrip, read_epochs_fieldtrip, diff --git a/mne/io/array/tests/test_array.py b/mne/io/array/tests/test_array.py index df1d790cd8f..4d6966ed67e 100644 --- a/mne/io/array/tests/test_array.py +++ b/mne/io/array/tests/test_array.py @@ -14,7 +14,8 @@ from mne.io import read_raw_fif from mne.io.array import RawArray from mne.io.tests.test_raw import _test_raw_reader -from mne.io.meas_info import create_info, _kind_dict +from mne.io.meas_info import create_info +from mne.io.pick import get_channel_type_constants from mne.utils import run_tests_if_main from mne.channels import make_dig_montage @@ -101,7 +102,8 @@ def test_array_raw(): types[-1] = 'eog' # default type info = create_info(ch_names, sfreq) - assert_equal(info['chs'][0]['kind'], _kind_dict['misc'][0]) + assert_equal(info['chs'][0]['kind'], + get_channel_type_constants()['misc']['kind']) # use real types info = create_info(ch_names, sfreq, types) raw2 = _test_raw_reader(RawArray, test_preloading=False, diff --git a/mne/io/boxy/__init__.py b/mne/io/boxy/__init__.py new file mode 100644 index 00000000000..5da9a5b6a37 --- /dev/null +++ b/mne/io/boxy/__init__.py @@ -0,0 +1,7 @@ +"""fNIRS module for conversion to FIF.""" + +# Authors: Kyle Mathewson, Jonathan Kuziek +# +# License: BSD (3-clause) + +from .boxy import read_raw_boxy diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py new file mode 100644 index 00000000000..a48efd67bf9 --- /dev/null +++ b/mne/io/boxy/boxy.py @@ -0,0 +1,262 @@ +# Authors: Kyle Mathewson, Jonathan Kuziek +# +# License: BSD (3-clause) + +import re as re + +import numpy as np + +from ..base import BaseRaw +from ..meas_info import create_info +from ..utils import _mult_cal_one +from ...utils import logger, verbose, fill_doc +from ...annotations import Annotations + + +@fill_doc +def read_raw_boxy(fname, preload=False, verbose=None): + """Reader for an optical imaging recording. + + This function has been tested using the ISS Imagent I and II systems + and versions 0.40/0.84 of the BOXY recording software. + + Parameters + ---------- + fname : str + Path to the BOXY data file. + %(preload)s + %(verbose)s + + Returns + ------- + raw : instance of RawBOXY + A Raw object containing BOXY data. + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + """ + return RawBOXY(fname, preload, verbose) + + +@fill_doc +class RawBOXY(BaseRaw): + """Raw object from a BOXY optical imaging file. + + Parameters + ---------- + fname : str + Path to the BOXY data file. + %(preload)s + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + """ + + @verbose + def __init__(self, fname, preload=False, verbose=None): + logger.info('Loading %s' % fname) + + # Read header file and grab some info. + start_line = np.inf + col_names = mrk_col = filetype = mrk_data = end_line = None + raw_extras = dict() + raw_extras['offsets'] = list() # keep track of our offsets + sfreq = None + with open(fname, 'r') as fid: + line_num = 0 + i_line = fid.readline() + while i_line: + # most of our lines will be data lines, so check that first + if line_num >= start_line: + assert col_names is not None + assert filetype is not None + if '#DATA ENDS' in i_line: + # Data ends just before this. + end_line = line_num + break + if mrk_col is not None: + if filetype == 'non-parsed': + # Non-parsed files have different lines lengths. + crnt_line = i_line.rsplit(' ')[0] + temp_data = re.findall( + r'[-+]?\d*\.?\d+', crnt_line) + if len(temp_data) == len(col_names): + mrk_data.append(float( + re.findall(r'[-+]?\d*\.?\d+', crnt_line) + [mrk_col])) + else: + crnt_line = i_line.rsplit(' ')[0] + mrk_data.append(float(re.findall( + r'[-+]?\d*\.?\d+', crnt_line)[mrk_col])) + raw_extras['offsets'].append(fid.tell()) + # now proceed with more standard header parsing + elif 'BOXY.EXE:' in i_line: + boxy_ver = re.findall(r'\d*\.\d+', + i_line.rsplit(' ')[-1])[0] + # Check that the BOXY version is supported + if boxy_ver not in ['0.40', '0.84']: + raise RuntimeError('MNE has not been tested with BOXY ' + 'version (%s)' % boxy_ver) + elif 'Detector Channels' in i_line: + raw_extras['detect_num'] = int(i_line.rsplit(' ')[0]) + elif 'External MUX Channels' in i_line: + raw_extras['source_num'] = int(i_line.rsplit(' ')[0]) + elif 'Update Rate (Hz)' in i_line or \ + 'Updata Rate (Hz)' in i_line: + # Version 0.40 of the BOXY recording software + # (and possibly other versions lower than 0.84) contains a + # typo in the raw data file where 'Update Rate' is spelled + # "Updata Rate. This will account for this typo. + sfreq = float(i_line.rsplit(' ')[0]) + elif '#DATA BEGINS' in i_line: + # Data should start a couple lines later. + start_line = line_num + 3 + elif line_num == start_line - 2: + # Grab names for each column of data. + raw_extras['col_names'] = col_names = re.findall( + r'\w+\-\w+|\w+\-\d+|\w+', i_line.rsplit(' ')[0]) + if 'exmux' in col_names: + # Change filetype based on data organisation. + filetype = 'non-parsed' + else: + filetype = 'parsed' + if 'digaux' in col_names: + mrk_col = col_names.index('digaux') + mrk_data = list() + # raw_extras['offsets'].append(fid.tell()) + elif line_num == start_line - 1: + raw_extras['offsets'].append(fid.tell()) + line_num += 1 + i_line = fid.readline() + assert sfreq is not None + raw_extras.update( + filetype=filetype, start_line=start_line, end_line=end_line) + + # Label each channel in our data, for each data type (DC, AC, Ph). + # Data is organised by channels x timepoint, where the first + # 'source_num' rows correspond to the first detector, the next + # 'source_num' rows correspond to the second detector, and so on. + ch_names = list() + ch_types = list() + cals = list() + for det_num in range(raw_extras['detect_num']): + for src_num in range(raw_extras['source_num']): + for i_type, ch_type in [ + ('DC', 'fnirs_cw_amplitude'), + ('AC', 'fnirs_fd_ac_amplitude'), + ('Ph', 'fnirs_fd_phase')]: + ch_names.append( + f'S{src_num + 1}_D{det_num + 1} {i_type}') + ch_types.append(ch_type) + cals.append(np.pi / 180. if i_type == 'Ph' else 1.) + + # Create info structure. + info = create_info(ch_names, sfreq, ch_types) + for ch, cal in zip(info['chs'], cals): + ch['cal'] = cal + + # Determine how long our data is. + delta = end_line - start_line + assert len(raw_extras['offsets']) == delta + 1 + if filetype == 'non-parsed': + delta //= (raw_extras['source_num']) + super(RawBOXY, self).__init__( + info, preload, filenames=[fname], first_samps=[0], + last_samps=[delta - 1], raw_extras=[raw_extras], verbose=verbose) + + # Now let's grab our markers, if they are present. + if mrk_data is not None: + mrk_data = np.array(mrk_data, float) + # We only want the first instance of each trigger. + prev_mrk = 0 + mrk_idx = list() + duration = list() + tmp_dur = 0 + for i_num, i_mrk in enumerate(mrk_data): + if i_mrk != 0 and i_mrk != prev_mrk: + mrk_idx.append(i_num) + if i_mrk != 0 and i_mrk == prev_mrk: + tmp_dur += 1 + if i_mrk == 0 and i_mrk != prev_mrk: + duration.append((tmp_dur + 1) / sfreq) + tmp_dur = 0 + prev_mrk = i_mrk + onset = np.array(mrk_idx) / sfreq + description = mrk_data[mrk_idx] + annot = Annotations(onset, duration, description) + self.set_annotations(annot) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a segment of data from a file. + + Boxy file organises data in two ways, parsed or un-parsed. + Regardless of type, output has (n_montages x n_sources x n_detectors + + n_marker_channels) rows, and (n_timepoints x n_blocks) columns. + """ + source_num = self._raw_extras[fi]['source_num'] + detect_num = self._raw_extras[fi]['detect_num'] + start_line = self._raw_extras[fi]['start_line'] + end_line = self._raw_extras[fi]['end_line'] + filetype = self._raw_extras[fi]['filetype'] + col_names = self._raw_extras[fi]['col_names'] + offsets = self._raw_extras[fi]['offsets'] + boxy_file = self._filenames[fi] + + # Non-parsed multiplexes sources, so we need source_num times as many + # lines in that case + if filetype == 'parsed': + start_read = start_line + start + stop_read = start_read + (stop - start) + else: + assert filetype == 'non-parsed' + start_read = start_line + start * source_num + stop_read = start_read + (stop - start) * source_num + assert start_read >= start_line + assert stop_read <= end_line + + # Possible detector names. + detectors = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'[:detect_num] + + # Loop through our data. + one = np.zeros((len(col_names), stop_read - start_read)) + with open(boxy_file, 'r') as fid: + # Just a more efficient version of this: + # ii = 0 + # for line_num, i_line in enumerate(fid): + # if line_num >= start_read: + # if line_num >= stop_read: + # break + # # Grab actual data. + # i_data = i_line.strip().split() + # one[:len(i_data), ii] = i_data + # ii += 1 + fid.seek(offsets[start_read - start_line], 0) + for oo in one.T: + i_data = fid.readline().strip().split() + oo[:len(i_data)] = i_data + + # in theory we could index in the loop above, but it's painfully slow, + # so let's just take a hopefully minor memory hit + if filetype == 'non-parsed': + ch_idxs = [col_names.index(f'{det}-{i_type}') + for det in detectors + for i_type in ['DC', 'AC', 'Ph']] + one = one[ch_idxs].reshape( # each "time point" multiplexes srcs + len(detectors), 3, -1, source_num + ).transpose( # reorganize into (det, source, DC/AC/Ph, t) order + 0, 3, 1, 2 + ).reshape( # reshape the way we store it (det x source x DAP, t) + len(detectors) * source_num * 3, -1) + else: + assert filetype == 'parsed' + ch_idxs = [col_names.index(f'{det}-{i_type}{si + 1}') + for det in detectors + for si in range(source_num) + for i_type in ['DC', 'AC', 'Ph']] + one = one[ch_idxs] + + # Place our data into the data object in place. + _mult_cal_one(data, one, idx, cals, mult) diff --git a/mne/io/boxy/tests/__init__.py b/mne/io/boxy/tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/mne/io/boxy/tests/test_boxy.py b/mne/io/boxy/tests/test_boxy.py new file mode 100644 index 00000000000..f4d6ef2656f --- /dev/null +++ b/mne/io/boxy/tests/test_boxy.py @@ -0,0 +1,190 @@ +# Authors: Kyle Mathewson, Jonathan Kuziek +# +# License: BSD (3-clause) + +import os.path as op + +import pytest +import numpy as np +from numpy.testing import (assert_allclose, assert_array_equal, + assert_array_less) +import scipy.io as spio + +from mne import pick_types +from mne.datasets import testing +from mne.io import read_raw_boxy +from mne.io.tests.test_raw import _test_raw_reader + +data_path = testing.data_path(download=False) +boxy_0_40 = op.join( + data_path, 'BOXY', 'boxy_0_40_recording', + 'boxy_0_40_notriggers_unparsed.txt') +p_pod_0_40 = op.join( + data_path, 'BOXY', 'boxy_0_40_recording', 'p_pod_10_6_3_loaded_data', + 'p_pod_10_6_3_notriggers_unparsed.mat') +boxy_0_84 = op.join( + data_path, 'BOXY', 'boxy_0_84_digaux_recording', + 'boxy_0_84_triggers_unparsed.txt') +boxy_0_84_parsed = op.join( + data_path, 'BOXY', 'boxy_0_84_digaux_recording', + 'boxy_0_84_triggers_parsed.txt') +p_pod_0_84 = op.join( + data_path, 'BOXY', 'boxy_0_84_digaux_recording', + 'p_pod_10_6_3_loaded_data', 'p_pod_10_6_3_triggers_unparsed.mat') + + +def _assert_ppod(raw, p_pod_file): + have_types = raw.get_channel_types(unique=True) + assert 'fnirs_fd_phase' in raw, have_types + assert 'fnirs_cw_amplitude' in raw, have_types + assert 'fnirs_fd_ac_amplitude' in raw, have_types + ppod_data = spio.loadmat(p_pod_file) + + # Compare MNE loaded data to p_pod loaded data. + map_ = dict(dc='fnirs_cw_amplitude', ac='fnirs_fd_ac_amplitude', + ph='fnirs_fd_phase') + for key, value in map_.items(): + ppod = ppod_data[key].T + m = np.median(np.abs(ppod)) + assert 1e-1 < m < 1e5, key # our atol is meaningful + atol = m * 1e-10 + py = raw.get_data(value) + if key == 'ph': # radians + assert_array_less(-np.pi, py) + assert_array_less(py, 3 * np.pi) + py = np.rad2deg(py) + assert_allclose(py, ppod, atol=atol, err_msg=key) + + +@testing.requires_testing_data +def test_boxy_load(): + """Test reading BOXY files.""" + raw = read_raw_boxy(boxy_0_40, verbose=True) + assert raw.info['sfreq'] == 62.5 + _assert_ppod(raw, p_pod_0_40) + + # Grab our different data types. + mne_ph = raw.copy().pick(picks='fnirs_fd_phase') + mne_dc = raw.copy().pick(picks='fnirs_cw_amplitude') + mne_ac = raw.copy().pick(picks='fnirs_fd_ac_amplitude') + + # Check channel names. + first_chans = ['S1_D1', 'S2_D1', 'S3_D1', 'S4_D1', 'S5_D1', + 'S6_D1', 'S7_D1', 'S8_D1', 'S9_D1', 'S10_D1'] + last_chans = ['S1_D8', 'S2_D8', 'S3_D8', 'S4_D8', 'S5_D8', + 'S6_D8', 'S7_D8', 'S8_D8', 'S9_D8', 'S10_D8'] + + assert mne_dc.info['ch_names'][:10] == [i_chan + ' ' + 'DC' + for i_chan in first_chans] + assert mne_ac.info['ch_names'][:10] == [i_chan + ' ' + 'AC' + for i_chan in first_chans] + assert mne_ph.info['ch_names'][:10] == [i_chan + ' ' + 'Ph' + for i_chan in first_chans] + + assert mne_dc.info['ch_names'][70::] == [i_chan + ' ' + 'DC' + for i_chan in last_chans] + assert mne_ac.info['ch_names'][70::] == [i_chan + ' ' + 'AC' + for i_chan in last_chans] + assert mne_ph.info['ch_names'][70::] == [i_chan + ' ' + 'Ph' + for i_chan in last_chans] + + # Since this data set has no 'digaux' for creating trigger annotations, + # let's make sure our Raw object has no annotations. + assert len(raw.annotations) == 0 + + +@testing.requires_testing_data +@pytest.mark.parametrize('fname', (boxy_0_84, boxy_0_84_parsed)) +def test_boxy_filetypes(fname): + """Test reading parsed and unparsed BOXY data files.""" + # BOXY data files can be saved in two formats (parsed and unparsed) which + # mostly determines how the data is organised. + # For parsed files, each row is a single timepoint and all + # source/detector combinations are represented as columns. + # For unparsed files, each row is a source and each group of n rows + # represents a timepoint. For example, if there are ten sources in the raw + # data then the first ten rows represent the ten sources at timepoint 1 + # while the next set of ten rows are the ten sources at timepoint 2. + # Detectors are represented as columns. + + # Since p_pod is designed to only load unparsed files, we will first + # compare MNE and p_pod loaded data from an unparsed data file. If those + # files are comparable, then we will compare the MNE loaded data between + # parsed and unparsed files. + raw = read_raw_boxy(fname, verbose=True) + assert raw.info['sfreq'] == 79.4722 + _assert_ppod(raw, p_pod_0_84) + + # Grab our different data types. + unp_dc = raw.copy().pick('fnirs_cw_amplitude') + unp_ac = raw.copy().pick('fnirs_fd_ac_amplitude') + unp_ph = raw.copy().pick('fnirs_fd_phase') + + # Check channel names. + chans = ['S1_D1', 'S2_D1', 'S3_D1', 'S4_D1', + 'S5_D1', 'S6_D1', 'S7_D1', 'S8_D1'] + + assert unp_dc.info['ch_names'] == [i_chan + ' ' + 'DC' + for i_chan in chans] + assert unp_ac.info['ch_names'] == [i_chan + ' ' + 'AC' + for i_chan in chans] + assert unp_ph.info['ch_names'] == [i_chan + ' ' + 'Ph' + for i_chan in chans] + + +@testing.requires_testing_data +@pytest.mark.parametrize('fname', (boxy_0_84, boxy_0_84_parsed)) +def test_boxy_digaux(fname): + """Test reading BOXY files and generating annotations from digaux.""" + srate = 79.4722 + raw = read_raw_boxy(fname, verbose=True) + + # Grab our different data types. + picks_dc = pick_types(raw.info, fnirs='fnirs_cw_amplitude') + picks_ac = pick_types(raw.info, fnirs='fnirs_fd_ac_amplitude') + picks_ph = pick_types(raw.info, fnirs='fnirs_fd_phase') + assert_array_equal(picks_dc, np.arange(0, 8) * 3 + 0) + assert_array_equal(picks_ac, np.arange(0, 8) * 3 + 1) + assert_array_equal(picks_ph, np.arange(0, 8) * 3 + 2) + + # Check that our event order matches what we expect. + event_list = ['1.0', '2.0', '3.0', '4.0', '5.0'] + assert_array_equal(raw.annotations.description, event_list) + + # Check that our event timings are what we expect. + event_onset = [i_time * (1.0 / srate) for i_time in + [105, 185, 265, 344, 424]] + assert_allclose(raw.annotations.onset, event_onset, atol=1e-6) + + # Now let's compare parsed and unparsed events to p_pod loaded digaux. + # Load our p_pod data. + ppod_data = spio.loadmat(p_pod_0_84) + ppod_digaux = np.transpose(ppod_data['digaux'])[0] + + # Now let's get our triggers from the p_pod digaux. + # We only want the first instance of each trigger. + prev_mrk = 0 + mrk_idx = list() + duration = list() + tmp_dur = 0 + for i_num, i_mrk in enumerate(ppod_digaux): + if i_mrk != 0 and i_mrk != prev_mrk: + mrk_idx.append(i_num) + if i_mrk != 0 and i_mrk == prev_mrk: + tmp_dur += 1 + if i_mrk == 0 and i_mrk != prev_mrk: + duration.append((tmp_dur + 1) * (1.0 / srate)) + tmp_dur = 0 + prev_mrk = i_mrk + onset = np.asarray([i_mrk * (1.0 / srate) for i_mrk in mrk_idx]) + description = np.asarray([str(float(i_mrk))for i_mrk in + ppod_digaux[mrk_idx]]) + assert_array_equal(raw.annotations.description, description) + assert_allclose(raw.annotations.onset, onset, atol=1e-6) + + +@testing.requires_testing_data +@pytest.mark.parametrize('fname', (boxy_0_40, boxy_0_84, boxy_0_84_parsed)) +def test_raw_properties(fname): + """Test raw reader properties.""" + _test_raw_reader(read_raw_boxy, fname=fname, boundary_decimal=1) diff --git a/mne/io/edf/tests/test_edf.py b/mne/io/edf/tests/test_edf.py index d4507d47a53..a96203c7c2d 100644 --- a/mne/io/edf/tests/test_edf.py +++ b/mne/io/edf/tests/test_edf.py @@ -27,9 +27,8 @@ from mne.io.edf.edf import _read_annotations_edf from mne.io.edf.edf import _read_ch from mne.io.edf.edf import _parse_prefilter_string -from mne.io.pick import channel_indices_by_type +from mne.io.pick import channel_indices_by_type, get_channel_type_constants from mne.annotations import events_from_annotations, read_annotations -from mne.io.meas_info import _kind_dict as _KIND_DICT FILE = inspect.getfile(inspect.currentframe()) @@ -363,7 +362,9 @@ def test_load_generator(fname, recwarn): def test_edf_stim_ch_pick_up(test_input, EXPECTED): """Test stim_channel.""" # This is fragile for EEG/EEG-CSD, so just omit csd - TYPE_LUT = {v[0]: k for k, v in _KIND_DICT.items() if k != 'csd'} + KIND_DICT = get_channel_type_constants() + TYPE_LUT = {v['kind']: k for k, v in KIND_DICT.items() if k not in + ('csd', 'chpi')} # chpi not needed, and unhashable (a list) fname = op.join(data_dir, 'test_stim_channel.edf') raw = read_raw_edf(fname, stim_channel=test_input) diff --git a/mne/io/meas_info.py b/mne/io/meas_info.py index 3e676afdd55..135334e77a2 100644 --- a/mne/io/meas_info.py +++ b/mne/io/meas_info.py @@ -17,7 +17,8 @@ import numpy as np from scipy import linalg -from .pick import channel_type, pick_channels, pick_info +from .pick import (channel_type, pick_channels, pick_info, + get_channel_type_constants) from .constants import FIFF, _coord_frame_named from .open import fiff_open from .tree import dir_tree_find @@ -40,30 +41,6 @@ b = bytes # alias -_kind_dict = dict( - eeg=(FIFF.FIFFV_EEG_CH, FIFF.FIFFV_COIL_EEG, FIFF.FIFF_UNIT_V), - mag=(FIFF.FIFFV_MEG_CH, FIFF.FIFFV_COIL_VV_MAG_T3, FIFF.FIFF_UNIT_T), - grad=(FIFF.FIFFV_MEG_CH, FIFF.FIFFV_COIL_VV_PLANAR_T1, FIFF.FIFF_UNIT_T_M), - ref_meg=(FIFF.FIFFV_REF_MEG_CH, FIFF.FIFFV_COIL_VV_MAG_T3, - FIFF.FIFF_UNIT_T), - misc=(FIFF.FIFFV_MISC_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_NONE), - stim=(FIFF.FIFFV_STIM_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V), - eog=(FIFF.FIFFV_EOG_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V), - ecg=(FIFF.FIFFV_ECG_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V), - emg=(FIFF.FIFFV_EMG_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V), - seeg=(FIFF.FIFFV_SEEG_CH, FIFF.FIFFV_COIL_EEG, FIFF.FIFF_UNIT_V), - bio=(FIFF.FIFFV_BIO_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V), - ecog=(FIFF.FIFFV_ECOG_CH, FIFF.FIFFV_COIL_EEG, FIFF.FIFF_UNIT_V), - fnirs_cw_amplitude=(FIFF.FIFFV_FNIRS_CH, - FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE, FIFF.FIFF_UNIT_V), - fnirs_od=(FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_COIL_FNIRS_OD, - FIFF.FIFF_UNIT_NONE), - hbo=(FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_COIL_FNIRS_HBO, FIFF.FIFF_UNIT_MOL), - hbr=(FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_COIL_FNIRS_HBR, FIFF.FIFF_UNIT_MOL), - csd=(FIFF.FIFFV_EEG_CH, FIFF.FIFFV_COIL_EEG_CSD, FIFF.FIFF_UNIT_V_M2), -) - - _SCALAR_CH_KEYS = ('scanno', 'logno', 'kind', 'range', 'cal', 'coil_type', 'unit', 'unit_mul', 'coord_frame') _ALL_CH_KEYS_SET = set(_SCALAR_CH_KEYS + ('loc', 'ch_name')) @@ -2018,20 +1995,26 @@ def create_info(ch_names, sfreq, ch_types='misc', verbose=None): '(%s != %s) for ch_types=%s' % (len(ch_types), nchan, ch_types)) info = _empty_info(sfreq) - for ci, (name, kind) in enumerate(zip(ch_names, ch_types)): - _validate_type(name, 'str', "each entry in ch_names") - _validate_type(kind, 'str', "each entry in ch_types") - if kind not in _kind_dict: - raise KeyError('kind must be one of %s, not %s' - % (list(_kind_dict.keys()), kind)) - kind = _kind_dict[kind] + ch_types_dict = get_channel_type_constants(include_defaults=True) + for ci, (ch_name, ch_type) in enumerate(zip(ch_names, ch_types)): + _validate_type(ch_name, 'str', "each entry in ch_names") + _validate_type(ch_type, 'str', "each entry in ch_types") + if ch_type not in ch_types_dict: + raise KeyError(f'kind must be one of {list(ch_types_dict)}, ' + f'not {ch_type}') + this_ch_dict = ch_types_dict[ch_type] + kind = this_ch_dict['kind'] + # handle chpi, where kind is a *list* of FIFF constants: + kind = kind[0] if isinstance(kind, (list, tuple)) else kind # mirror what tag.py does here - coord_frame = _ch_coord_dict.get(kind[0], FIFF.FIFFV_COORD_UNKNOWN) + coord_frame = _ch_coord_dict.get(kind, FIFF.FIFFV_COORD_UNKNOWN) + coil_type = this_ch_dict.get('coil_type', FIFF.FIFFV_COIL_NONE) + unit = this_ch_dict.get('unit', FIFF.FIFF_UNIT_NONE) chan_info = dict(loc=np.full(12, np.nan), unit_mul=FIFF.FIFF_UNITM_NONE, range=1., cal=1., - kind=kind[0], coil_type=kind[1], - unit=kind[2], coord_frame=coord_frame, - ch_name=str(name), scanno=ci + 1, logno=ci + 1) + kind=kind, coil_type=coil_type, unit=unit, + coord_frame=coord_frame, ch_name=str(ch_name), + scanno=ci + 1, logno=ci + 1) info['chs'].append(chan_info) info._update_redundant() diff --git a/mne/io/pick.py b/mne/io/pick.py index b00edcee4fa..4a7ae650994 100644 --- a/mne/io/pick.py +++ b/mne/io/pick.py @@ -15,51 +15,102 @@ _check_option) -def get_channel_type_constants(): - """Return all known channel types. +def get_channel_type_constants(include_defaults=False): + """Return all known channel types, and associated FIFF constants. + + Parameters + ---------- + include_defaults : bool + Whether to include default values for "unit" and "coil_type" for all + entries (see Notes). Defaults are generally based on values normally + present for a VectorView MEG system. Defaults to ``False``. Returns ------- channel_types : dict - The keys contain the channel types, and the values contain the - corresponding values in the info['chs'][idx] dictionary. + The keys are channel type strings, and the values are dictionaries of + FIFF constants for "kind", and possibly "unit" and "coil_type". + + Notes + ----- + Values which might vary within a channel type across real data + recordings are excluded unless ``include_defaults=True``. For example, + "ref_meg" channels may have coil type + ``FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD``, ``FIFFV_COIL_VV_MAG_T3``, etc + (depending on the recording system), so no "coil_type" entry is given + for "ref_meg" unless ``include_defaults`` is requested. """ - return dict(grad=dict(kind=FIFF.FIFFV_MEG_CH, - unit=FIFF.FIFF_UNIT_T_M), - mag=dict(kind=FIFF.FIFFV_MEG_CH, - unit=FIFF.FIFF_UNIT_T), + base = dict(grad=dict(kind=FIFF.FIFFV_MEG_CH, unit=FIFF.FIFF_UNIT_T_M), + mag=dict(kind=FIFF.FIFFV_MEG_CH, unit=FIFF.FIFF_UNIT_T), ref_meg=dict(kind=FIFF.FIFFV_REF_MEG_CH), - eeg=dict(kind=FIFF.FIFFV_EEG_CH), + eeg=dict(kind=FIFF.FIFFV_EEG_CH, + unit=FIFF.FIFF_UNIT_V, + coil_type=FIFF.FIFFV_COIL_EEG), + seeg=dict(kind=FIFF.FIFFV_SEEG_CH, + unit=FIFF.FIFF_UNIT_V, + coil_type=FIFF.FIFFV_COIL_EEG), + ecog=dict(kind=FIFF.FIFFV_ECOG_CH, + unit=FIFF.FIFF_UNIT_V, + coil_type=FIFF.FIFFV_COIL_EEG), + eog=dict(kind=FIFF.FIFFV_EOG_CH, unit=FIFF.FIFF_UNIT_V), + emg=dict(kind=FIFF.FIFFV_EMG_CH, unit=FIFF.FIFF_UNIT_V), + ecg=dict(kind=FIFF.FIFFV_ECG_CH, unit=FIFF.FIFF_UNIT_V), + bio=dict(kind=FIFF.FIFFV_BIO_CH, unit=FIFF.FIFF_UNIT_V), + misc=dict(kind=FIFF.FIFFV_MISC_CH, unit=FIFF.FIFF_UNIT_V), stim=dict(kind=FIFF.FIFFV_STIM_CH), - eog=dict(kind=FIFF.FIFFV_EOG_CH), - emg=dict(kind=FIFF.FIFFV_EMG_CH), - ecg=dict(kind=FIFF.FIFFV_ECG_CH), resp=dict(kind=FIFF.FIFFV_RESP_CH), - misc=dict(kind=FIFF.FIFFV_MISC_CH), exci=dict(kind=FIFF.FIFFV_EXCI_CH), - ias=dict(kind=FIFF.FIFFV_IAS_CH), syst=dict(kind=FIFF.FIFFV_SYST_CH), - seeg=dict(kind=FIFF.FIFFV_SEEG_CH), - bio=dict(kind=FIFF.FIFFV_BIO_CH), + ias=dict(kind=FIFF.FIFFV_IAS_CH), + gof=dict(kind=FIFF.FIFFV_GOODNESS_FIT), + dipole=dict(kind=FIFF.FIFFV_DIPOLE_WAVE), chpi=dict(kind=[FIFF.FIFFV_QUAT_0, FIFF.FIFFV_QUAT_1, FIFF.FIFFV_QUAT_2, FIFF.FIFFV_QUAT_3, FIFF.FIFFV_QUAT_4, FIFF.FIFFV_QUAT_5, FIFF.FIFFV_QUAT_6, FIFF.FIFFV_HPI_G, FIFF.FIFFV_HPI_ERR, FIFF.FIFFV_HPI_MOV]), - dipole=dict(kind=FIFF.FIFFV_DIPOLE_WAVE), - gof=dict(kind=FIFF.FIFFV_GOODNESS_FIT), - ecog=dict(kind=FIFF.FIFFV_ECOG_CH), fnirs_cw_amplitude=dict( kind=FIFF.FIFFV_FNIRS_CH, + unit=FIFF.FIFF_UNIT_V, coil_type=FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE), + fnirs_fd_ac_amplitude=dict( + kind=FIFF.FIFFV_FNIRS_CH, + unit=FIFF.FIFF_UNIT_V, + coil_type=FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE), + fnirs_fd_phase=dict( + kind=FIFF.FIFFV_FNIRS_CH, + unit=FIFF.FIFF_UNIT_RAD, + coil_type=FIFF.FIFFV_COIL_FNIRS_FD_PHASE), fnirs_od=dict(kind=FIFF.FIFFV_FNIRS_CH, coil_type=FIFF.FIFFV_COIL_FNIRS_OD), hbo=dict(kind=FIFF.FIFFV_FNIRS_CH, + unit=FIFF.FIFF_UNIT_MOL, coil_type=FIFF.FIFFV_COIL_FNIRS_HBO), hbr=dict(kind=FIFF.FIFFV_FNIRS_CH, + unit=FIFF.FIFF_UNIT_MOL, coil_type=FIFF.FIFFV_COIL_FNIRS_HBR), csd=dict(kind=FIFF.FIFFV_EEG_CH, + unit=FIFF.FIFF_UNIT_V_M2, coil_type=FIFF.FIFFV_COIL_EEG_CSD)) + if include_defaults: + coil_none = dict(coil_type=FIFF.FIFFV_COIL_NONE) + unit_none = dict(unit=FIFF.FIFF_UNIT_NONE) + defaults = dict( + grad=dict(coil_type=FIFF.FIFFV_COIL_VV_PLANAR_T1), + mag=dict(coil_type=FIFF.FIFFV_COIL_VV_MAG_T3), + ref_meg=dict(coil_type=FIFF.FIFFV_COIL_VV_MAG_T3, + unit=FIFF.FIFF_UNIT_T), + misc=dict(**coil_none, **unit_none), # NB: overwrites UNIT_V + stim=dict(unit=FIFF.FIFF_UNIT_V, **coil_none), + eog=coil_none, + ecg=coil_none, + emg=coil_none, + bio=coil_none, + fnirs_od=unit_none, + ) + for key, value in defaults.items(): + base[key].update(value) + return base _first_rule = { @@ -100,6 +151,10 @@ def get_channel_type_constants(): FIFF.FIFFV_COIL_FNIRS_HBR: 'hbr', FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE: 'fnirs_cw_amplitude', + FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE: + 'fnirs_fd_ac_amplitude', + FIFF.FIFFV_COIL_FNIRS_FD_PHASE: + 'fnirs_fd_phase', FIFF.FIFFV_COIL_FNIRS_OD: 'fnirs_od', }), 'eeg': ('coil_type', {FIFF.FIFFV_COIL_EEG: 'eeg', @@ -272,6 +327,12 @@ def _triage_fnirs_pick(ch, fnirs, warned): elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE and \ fnirs == 'fnirs_cw_amplitude': return True + elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE and \ + fnirs == 'fnirs_fd_ac_amplitude': + return True + elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_FD_PHASE and \ + fnirs == 'fnirs_fd_phase': + return True elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_OD and fnirs == 'fnirs_od': return True return False @@ -403,7 +464,7 @@ def pick_types(info, meg=False, eeg=False, stim=False, eog=False, ecg=False, for key in ('grad', 'mag'): param_dict[key] = meg if isinstance(fnirs, bool): - for key in ('hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_od'): + for key in _FNIRS_CH_TYPES_SPLIT: param_dict[key] = fnirs warned = [False] for k in range(nchan): @@ -411,8 +472,8 @@ def pick_types(info, meg=False, eeg=False, stim=False, eog=False, ecg=False, try: pick[k] = param_dict[ch_type] except KeyError: # not so simple - assert ch_type in ('grad', 'mag', 'hbo', 'hbr', 'ref_meg', - 'fnirs_cw_amplitude', 'fnirs_od') + assert ch_type in ( + 'grad', 'mag', 'ref_meg') + _FNIRS_CH_TYPES_SPLIT if ch_type in ('grad', 'mag'): pick[k] = _triage_meg_pick(info['chs'][k], meg) elif ch_type == 'ref_meg': @@ -703,7 +764,8 @@ def channel_indices_by_type(info, picks=None): idx_by_type = {key: list() for key in _PICK_TYPES_KEYS if key not in ('meg', 'fnirs')} idx_by_type.update(mag=list(), grad=list(), hbo=list(), hbr=list(), - fnirs_cw_amplitude=list(), fnirs_od=list()) + fnirs_cw_amplitude=list(), fnirs_fd_ac_amplitude=list(), + fnirs_fd_phase=list(), fnirs_od=list()) picks = _picks_to_idx(info, picks, none='all', exclude=(), allow_empty=True) for k in picks: @@ -791,8 +853,8 @@ def _contains_ch_type(info, ch_type): """ _validate_type(ch_type, 'str', "ch_type") - meg_extras = ['mag', 'grad', 'planar1', 'planar2'] - fnirs_extras = ['hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_od'] + meg_extras = list(_MEG_CH_TYPES_SPLIT) + fnirs_extras = list(_FNIRS_CH_TYPES_SPLIT) valid_channel_types = sorted([key for key in _PICK_TYPES_KEYS if key != 'meg'] + meg_extras + fnirs_extras) _check_option('ch_type', ch_type, valid_channel_types) @@ -896,21 +958,19 @@ def _check_excludes_includes(chs, info=None, allow_bads=False): misc=False, resp=False, chpi=False, exci=False, ias=False, syst=False, seeg=True, dipole=False, gof=False, bio=False, ecog=True, fnirs=True) _PICK_TYPES_KEYS = tuple(list(_PICK_TYPES_DATA_DICT) + ['ref_meg']) -_DATA_CH_TYPES_SPLIT = ('mag', 'grad', 'eeg', 'csd', 'seeg', 'ecog', - 'hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_od') -_DATA_CH_TYPES_ORDER_DEFAULT = ('mag', 'grad', 'eeg', 'csd', 'eog', 'ecg', - 'emg', 'ref_meg', 'misc', 'stim', 'resp', - 'chpi', 'exci', 'ias', 'syst', 'seeg', 'bio', - 'ecog', 'hbo', 'hbr', 'fnirs_cw_amplitude', - 'fnirs_od', 'whitened') - -# Valid data types, ordered for consistency, used in viz/evoked. -_VALID_CHANNEL_TYPES = ('eeg', 'grad', 'mag', 'seeg', 'eog', 'ecg', 'emg', - 'dipole', 'gof', 'bio', 'ecog', 'hbo', 'hbr', - 'fnirs_cw_amplitude', 'fnirs_od', 'misc', 'csd') - _MEG_CH_TYPES_SPLIT = ('mag', 'grad', 'planar1', 'planar2') -_FNIRS_CH_TYPES_SPLIT = ('hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_od') +_FNIRS_CH_TYPES_SPLIT = ('hbo', 'hbr', 'fnirs_cw_amplitude', + 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', 'fnirs_od') +_DATA_CH_TYPES_ORDER_DEFAULT = ( + 'mag', 'grad', 'eeg', 'csd', 'eog', 'ecg', 'emg', 'ref_meg', 'misc', + 'stim', 'resp', 'chpi', 'exci', 'ias', 'syst', 'seeg', 'bio', + 'ecog') + _FNIRS_CH_TYPES_SPLIT + ('whitened',) +# Valid data types, ordered for consistency, used in viz/evoked. +_VALID_CHANNEL_TYPES = ( + 'eeg', 'grad', 'mag', 'seeg', 'eog', 'ecg', 'emg', 'dipole', 'gof', 'bio', + 'ecog') + _FNIRS_CH_TYPES_SPLIT + ('misc', 'csd') +_DATA_CH_TYPES_SPLIT = ( + 'mag', 'grad', 'eeg', 'csd', 'seeg', 'ecog') + _FNIRS_CH_TYPES_SPLIT def _pick_data_channels(info, exclude='bads', with_ref_meg=True): diff --git a/mne/io/tests/test_pick.py b/mne/io/tests/test_pick.py index 0741379780a..ed089b23334 100644 --- a/mne/io/tests/test_pick.py +++ b/mne/io/tests/test_pick.py @@ -69,8 +69,8 @@ def _channel_type_old(info, idx): # iterate through all defined channel types until we find a match with ch # go in order from most specific (most rules entries) to least specific - channel_types = sorted( - get_channel_type_constants().items(), key=lambda x: len(x[1]))[::-1] + channel_types = sorted(get_channel_type_constants().items(), + key=lambda x: len(x[1]), reverse=True) for t, rules in channel_types: for key, vals in rules.items(): # all keys must match the values if ch.get(key, None) not in np.array(vals): @@ -78,7 +78,7 @@ def _channel_type_old(info, idx): else: return t - raise ValueError('Unknown channel type for {}'.format(ch["ch_name"])) + raise ValueError(f'Unknown channel type for {ch["ch_name"]}') def _assert_channel_types(info): @@ -112,8 +112,10 @@ def test_pick_refs(): for info in infos: info['bads'] = [] _assert_channel_types(info) - pytest.raises(ValueError, pick_types, info, meg='foo') - pytest.raises(ValueError, pick_types, info, ref_meg='foo') + with pytest.raises(ValueError, match="'planar2'] or bool, not foo"): + pick_types(info, meg='foo') + with pytest.raises(ValueError, match="'planar2', 'auto'] or bool,"): + pick_types(info, ref_meg='foo') picks_meg_ref = pick_types(info, meg=True, ref_meg=True) picks_meg = pick_types(info, meg=True, ref_meg=False) picks_ref = pick_types(info, meg=False, ref_meg=True) diff --git a/mne/io/utils.py b/mne/io/utils.py index f33a43a2b74..a272cd23065 100644 --- a/mne/io/utils.py +++ b/mne/io/utils.py @@ -77,7 +77,8 @@ def _find_channels(ch_names, ch_type='EOG'): def _mult_cal_one(data_view, one, idx, cals, mult): """Take a chunk of raw data, multiply by mult or cals, and store.""" one = np.asarray(one, dtype=data_view.dtype) - assert data_view.shape[1] == one.shape[1] + assert data_view.shape[1] == one.shape[1], \ + (data_view.shape[1], one.shape[1]) if mult is not None: mult.ndim == one.ndim == 2 data_view[:] = mult @ one[idx] diff --git a/mne/preprocessing/__init__.py b/mne/preprocessing/__init__.py index 0d48c102775..dddf063846c 100644 --- a/mne/preprocessing/__init__.py +++ b/mne/preprocessing/__init__.py @@ -19,6 +19,7 @@ from .infomax_ import infomax from .stim import fix_stim_artifact from .maxwell import maxwell_filter, find_bad_channels_maxwell +from .realign import realign_raw from .xdawn import Xdawn from ._csd import compute_current_source_density from . import nirs diff --git a/mne/preprocessing/realign.py b/mne/preprocessing/realign.py new file mode 100644 index 00000000000..1f5987f11e9 --- /dev/null +++ b/mne/preprocessing/realign.py @@ -0,0 +1,107 @@ +# -*- coding: utf-8 -*- +# Authors: Eric Larson + +# License: BSD (3-clause) + +import numpy as np + +from ..io import BaseRaw +from ..utils import _validate_type, warn, logger, verbose + + +@verbose +def realign_raw(raw, other, t_raw, t_other, verbose=None): + """Realign two simultaneous recordings. + + Due to clock drift, recordings at a given same sample rate made by two + separate devices simultaneously can become out of sync over time. This + function uses event times captured by both acquisition devices to resample + ``other`` to match ``raw``. + + Parameters + ---------- + raw : instance of Raw + The first raw instance. + other : instance of Raw + The second raw instance. It will be resampled to match ``raw``. + t_raw : array-like, shape (n_events,) + The times of shared events in ``raw`` relative to ``raw.times[0]`` (0). + Typically these could be events on some TTL channel like + ``find_events(raw)[:, 0] - raw.first_event``. + t_other : array-like, shape (n_events,) + The times of shared events in ``other`` relative to ``other.times[0]``. + %(verbose)s + + Notes + ----- + This function operates inplace. It will: + + 1. Estimate the zero-order (start offset) and first-order (clock drift) + correction. + 2. Crop the start of ``raw`` or ``other``, depending on which started + recording first. + 3. Resample ``other`` to match ``raw`` based on the clock drift. + 4. Crop the end of ``raw`` or ``other``, depending on which stopped + recording first (and the clock drift rate). + + This function is primarily designed to work on recordings made at the same + sample rate, but it can also operate on recordings made at different + sample rates to resample and deal with clock drift simultaneously. + + .. versionadded:: 0.22 + """ + from scipy import stats + _validate_type(raw, BaseRaw, 'raw') + _validate_type(other, BaseRaw, 'other') + t_raw = np.array(t_raw, float) + t_other = np.array(t_other, float) + if t_raw.ndim != 1 or t_raw.shape != t_other.shape: + raise ValueError('t_raw and t_other must be 1D with the same shape, ' + f'got shapes {t_raw.shape} and {t_other.shape}') + if len(t_raw) < 20: + warn('Fewer than 20 times passed, results may be unreliable') + + # 1. Compute correction factors + coef = np.polyfit(t_other, t_raw, deg=1) + r, p = stats.pearsonr(t_other, t_raw) + msg = f'Linear correlation computed as R={r:0.3f} and p={p:0.2e}' + if p > 0.05 or r <= 0: + raise ValueError(msg + ', cannot resample safely') + if p > 1e-6: + warn(msg + ', results may be unreliable') + else: + logger.info(msg) + dr_ms_s = 1000 * abs(1 - coef[0]) + logger.info( + f'Drift rate: {1000 * dr_ms_s:0.1f} μs/sec ' + f'(total drift over {raw.times[-1]:0.1f} sec recording: ' + f'{raw.times[-1] * dr_ms_s:0.1f} ms)') + + # 2. Crop start of recordings to match using the zero-order term + msg = f'Cropping {coef[1]:0.3f} sec from the start of ' + if coef[1] > 0: # need to crop start of raw to match other + logger.info(msg + 'raw') + raw.crop(coef[1], None) + t_raw -= coef[1] + else: # need to crop start of other to match raw + logger.info(msg + 'other') + other.crop(-coef[1], None) + t_other += coef[1] + + # 3. Resample data using the first-order term + logger.info('Resampling other') + coef = coef[0] + sfreq_new = raw.info['sfreq'] * coef + other.load_data().resample(sfreq_new, verbose=True) + other.info['sfreq'] = raw.info['sfreq'] + other._update_times() + + # 4. Crop the end of one of the recordings if necessary + delta = raw.times[-1] - other.times[-1] + msg = f'Cropping {abs(delta):0.3f} sec from the end of ' + if delta > 0: + logger.info(msg + 'raw') + raw.crop(0, other.times[-1]) + elif delta < 0: + logger.info(msg + 'other') + other.crop(0, raw.times[-1]) diff --git a/mne/preprocessing/tests/test_ica.py b/mne/preprocessing/tests/test_ica.py index c510dfb1472..fc21a218d59 100644 --- a/mne/preprocessing/tests/test_ica.py +++ b/mne/preprocessing/tests/test_ica.py @@ -25,8 +25,7 @@ from mne.preprocessing.ica import (get_score_funcs, corrmap, _sort_components, _ica_explained_variance, read_ica_eeglab) from mne.io import read_raw_fif, Info, RawArray, read_raw_ctf, read_raw_eeglab -from mne.io.meas_info import _kind_dict -from mne.io.pick import _DATA_CH_TYPES_SPLIT +from mne.io.pick import _DATA_CH_TYPES_SPLIT, get_channel_type_constants from mne.io.eeglab.eeglab import _check_load_mat from mne.rank import _compute_rank_int from mne.utils import catch_logging, requires_sklearn, run_tests_if_main @@ -1005,7 +1004,7 @@ def test_fit_params(method, tmpdir): def test_bad_channels(method, allow_ref_meg): """Test exception when unsupported channels are used.""" _skip_check_picard(method) - chs = [i for i in _kind_dict] + chs = list(get_channel_type_constants()) info = create_info(len(chs), 500, chs) rng = np.random.RandomState(0) data = rng.rand(len(chs), 50) diff --git a/mne/preprocessing/tests/test_maxwell.py b/mne/preprocessing/tests/test_maxwell.py index ccdbe972934..da75146aabf 100644 --- a/mne/preprocessing/tests/test_maxwell.py +++ b/mne/preprocessing/tests/test_maxwell.py @@ -1083,9 +1083,10 @@ def test_shielding_factor(tmpdir): for line in fid: fid_out.write(' '.join(line.strip().split(' ')[:14]) + '\n') with get_n_projected() as counts: - raw_sss = maxwell_filter(raw_erm, calibration=temp_fname, - cross_talk=ctc_fname, st_duration=1., - coord_frame='meg', regularize='in') + with pytest.warns(None): # SVD convergence sometimes + raw_sss = maxwell_filter(raw_erm, calibration=temp_fname, + cross_talk=ctc_fname, st_duration=1., + coord_frame='meg', regularize='in') # Our 3D cal has worse defaults for this ERM than the 1D file _assert_shielding(raw_sss, erm_power, 44, 45) assert counts[0] == 3 diff --git a/mne/preprocessing/tests/test_realign.py b/mne/preprocessing/tests/test_realign.py new file mode 100644 index 00000000000..7434f597348 --- /dev/null +++ b/mne/preprocessing/tests/test_realign.py @@ -0,0 +1,116 @@ +# Author: Mark Wronkiewicz +# +# License: BSD (3-clause) + +import numpy as np +from numpy.testing import assert_allclose +from scipy.interpolate import interp1d +import pytest + +from mne import create_info, find_events, Epochs +from mne.io import RawArray +from mne.preprocessing import realign_raw + + +@pytest.mark.parametrize('ratio_other', (1., 0.999, 1.001)) # drifts +@pytest.mark.parametrize('start_raw, start_other', [(0, 0), (0, 3), (3, 0)]) +@pytest.mark.parametrize('stop_raw, stop_other', [(0, 0), (0, 3), (3, 0)]) +def test_realign(ratio_other, start_raw, start_other, stop_raw, stop_other): + """Test realigning raw.""" + # construct a true signal + sfreq = 100. + duration = 50 + stop_raw = duration - stop_raw + stop_other = duration - stop_other + signal = np.zeros(int(round((duration + 1) * sfreq))) + orig_events = np.round( + np.arange(max(start_raw, start_other) + 2, + min(stop_raw, stop_other) - 2) * sfreq).astype(int) + signal[orig_events] = 1. + n_events = len(orig_events) + times = np.arange(len(signal)) / sfreq + stim = np.convolve(signal, np.ones(int(round(0.02 * sfreq))))[:len(times)] + signal = np.convolve( + signal, np.hanning(int(round(0.2 * sfreq))))[:len(times)] + + # construct our sampled versions of these signals (linear interp is fine) + sfreq_raw = sfreq + sfreq_other = ratio_other * sfreq + raw_times = np.arange(start_raw, stop_raw, 1. / sfreq_raw) + other_times = np.arange(start_other, stop_other, 1. / sfreq_other) + assert raw_times[0] >= times[0] + assert raw_times[-1] <= times[-1] + assert other_times[0] >= times[0] + assert other_times[-1] <= times[-1] + data_raw = np.array( + [interp1d(times, d, kind)(raw_times) + for d, kind in ((signal, 'linear'), (stim, 'nearest'))]) + data_other = np.array( + [interp1d(times, d, kind)(other_times) + for d, kind in ((signal, 'linear'), (stim, 'nearest'))]) + info_raw = create_info( + ['raw_data', 'raw_stim'], sfreq, ['eeg', 'stim']) + info_other = create_info( + ['other_data', 'other_stim'], sfreq, ['eeg', 'stim']) + raw = RawArray(data_raw, info_raw, first_samp=111) + other = RawArray(data_other, info_other, first_samp=222) + + # naive processing + evoked_raw, events_raw, _, events_other = _assert_similarity( + raw, other, n_events) + if start_raw == start_other: # can just naively crop + a, b = data_raw[0], data_other[0] + n = min(len(a), len(b)) + corr = np.corrcoef(a[:n], b[:n])[0, 1] + min_, max_ = (0.99999, 1.) if sfreq_raw == sfreq_other else (0.8, 0.9) + assert min_ <= corr <= max_ + + # realign + t_raw = (events_raw[:, 0] - raw.first_samp) / other.info['sfreq'] + t_other = (events_other[:, 0] - other.first_samp) / other.info['sfreq'] + assert duration - 10 <= len(events_raw) < duration + raw_orig, other_orig = raw.copy(), other.copy() + realign_raw(raw, other, t_raw, t_other) + + # old events should still work for raw and produce the same result + evoked_raw_2, _, _, _ = _assert_similarity( + raw, other, n_events, events_raw=events_raw) + assert_allclose(evoked_raw.data, evoked_raw_2.data) + assert_allclose(raw.times, other.times) + # raw data now aligned + corr = np.corrcoef(raw.get_data([0])[0], other.get_data([0])[0])[0, 1] + assert 0.99 < corr <= 1. + + # Degenerate conditions -- only test in one run + test_degenerate = (start_raw == start_other and + stop_raw == stop_other and + ratio_other == 1) + if not test_degenerate: + return + # these alignments will not be correct but it shouldn't matter + with pytest.warns(RuntimeWarning, match='^Fewer.*may be unreliable.*'): + realign_raw(raw, other, raw_times[:5], other_times[:5]) + with pytest.raises(ValueError, match='same shape'): + realign_raw(raw_orig, other_orig, raw_times[:5], other_times) + rand_times = np.random.RandomState(0).randn(len(other_times)) + with pytest.raises(ValueError, match='cannot resample safely'): + realign_raw(raw_orig, other_orig, rand_times, other_times) + with pytest.warns(RuntimeWarning, match='.*computed as R=.*unreliable'): + realign_raw( + raw_orig, other_orig, raw_times + rand_times * 1000, other_times) + + +def _assert_similarity(raw, other, n_events, events_raw=None): + if events_raw is None: + events_raw = find_events(raw) + events_other = find_events(other) + assert len(events_raw) == n_events + assert len(events_other) == n_events + kwargs = dict(baseline=None, tmin=0, tmax=0.2) + evoked_raw = Epochs(raw, events_raw, **kwargs).average() + evoked_other = Epochs(other, events_other, **kwargs).average() + assert evoked_raw.nave == evoked_other.nave == len(events_raw) + assert len(evoked_raw.data) == len(evoked_other.data) == 1 # just EEG + corr = np.corrcoef(evoked_raw.data[0], evoked_other.data[0])[0, 1] + assert 0.9 <= corr <= 1. + return evoked_raw, events_raw, evoked_other, events_other diff --git a/mne/preprocessing/tests/test_xdawn.py b/mne/preprocessing/tests/test_xdawn.py index e0c905547d2..9f7d9a05d4f 100644 --- a/mne/preprocessing/tests/test_xdawn.py +++ b/mne/preprocessing/tests/test_xdawn.py @@ -15,7 +15,7 @@ create_info, EpochsArray) from mne.decoding import Vectorizer from mne.io import read_raw_fif -from mne.utils import requires_sklearn, check_version +from mne.utils import requires_sklearn from mne.preprocessing.xdawn import Xdawn, _XdawnTransformer base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data') @@ -193,12 +193,10 @@ def test_xdawn_regularization(): xd.fit(epochs) xd = Xdawn(correct_overlap=False, reg='diagonal_fixed') xd.fit(epochs) - bad_eig = check_version('numpy', '1.16.5') # some problem with newer NumPy - if bad_eig: - pytest.skip('Unknown MKL+Windows error fails for eig check') - xd = Xdawn(correct_overlap=False, reg=None) - with pytest.raises(ValueError, match='Could not compute eigenvalues'): - xd.fit(epochs) + # XXX in principle this should maybe raise an error due to deficiency? + # xd = Xdawn(correct_overlap=False, reg=None) + # with pytest.raises(ValueError, match='Could not compute eigenvalues'): + # xd.fit(epochs) @requires_sklearn diff --git a/mne/tests/test_defaults.py b/mne/tests/test_defaults.py index aa3eee4af7c..55bc1883926 100644 --- a/mne/tests/test_defaults.py +++ b/mne/tests/test_defaults.py @@ -39,7 +39,7 @@ def test_si_units(): 'n': 1e-9, 'f': 1e-15, } - known_SI = {'V', 'T', 'Am', 'm', 'M', + known_SI = {'V', 'T', 'Am', 'm', 'M', 'rad', 'AU', 'GOF'} # not really SI but we tolerate them powers = '²' @@ -50,6 +50,8 @@ def _split_si(x): prefix, si = '', 'GOF' elif x == 'AU': prefix, si = '', 'AU' + elif x == 'rad': + prefix, si = '', 'rad' elif len(x) == 2: if x[1] in powers: prefix, si = '', x diff --git a/mne/utils/config.py b/mne/utils/config.py index a0168d92098..6a537ab668e 100644 --- a/mne/utils/config.py +++ b/mne/utils/config.py @@ -562,7 +562,7 @@ def sys_info(fid=None, show_paths=False): elif mod_name in ('mayavi', 'vtk'): has_3d = True if mod_name == 'vtk': - version = mod.VTK_VERSION + version = getattr(mod, 'VTK_VERSION', 'VTK_VERSION missing') elif mod_name == 'PyQt5': version = _check_pyqt5_version() else: diff --git a/mne/viz/backends/_pyvista.py b/mne/viz/backends/_pyvista.py index 4b8fa1ae041..e55c620f680 100644 --- a/mne/viz/backends/_pyvista.py +++ b/mne/viz/backends/_pyvista.py @@ -37,7 +37,7 @@ from pyvista import BackgroundPlotter from pyvista.utilities import try_callback from pyvista.plotting.plotting import _ALL_PLOTTERS -VTK9 = LooseVersion(vtk.VTK_VERSION) >= LooseVersion('9.0') +VTK9 = LooseVersion(getattr(vtk, 'VTK_VERSION', '9.0')) >= LooseVersion('9.0') _FIGURES = dict() diff --git a/mne/viz/utils.py b/mne/viz/utils.py index 18fc54763a0..f6ec8788aa0 100644 --- a/mne/viz/utils.py +++ b/mne/viz/utils.py @@ -32,7 +32,7 @@ _pick_data_channels, _DATA_CH_TYPES_SPLIT, pick_types, _DATA_CH_TYPES_ORDER_DEFAULT, _VALID_CHANNEL_TYPES, pick_info, _picks_by_type, pick_channels_cov, - _picks_to_idx, _contains_ch_type) + _picks_to_idx, _contains_ch_type, _FNIRS_CH_TYPES_SPLIT) from ..io.meas_info import create_info from ..rank import compute_rank from ..io.proj import setup_proj @@ -964,6 +964,9 @@ def plot_sensors(info, kind='topomap', ch_type=None, title=None, def _onpick_sensor(event, fig, ax, pos, ch_names, show_names): """Pick a channel in plot_sensors.""" + if event.mouseevent.inaxes != ax: + return + if event.mouseevent.key == 'control' and fig.lasso is not None: for ind in event.ind: fig.lasso.select_one(ind) @@ -1064,7 +1067,7 @@ def _plot_sensors(pos, info, picks, colors, bads, ch_names, title, show_names, ch_names=ch_names, show_names=show_names) fig.canvas.mpl_connect('pick_event', picker) - fig.suptitle(title) + ax.set(title=title) closed = partial(_close_event, fig=fig) fig.canvas.mpl_connect('close_event', closed) plt_show(show, block=block) @@ -2070,7 +2073,7 @@ def _set_psd_plot_params(info, proj, picks, ax, area_mode): kwargs = dict(meg=False, ref_meg=False, exclude=[]) if name in ('mag', 'grad'): kwargs['meg'] = name - elif name in ('fnirs_cw_amplitude', 'fnirs_od', 'hbo', 'hbr'): + elif name in _FNIRS_CH_TYPES_SPLIT: kwargs['fnirs'] = name else: kwargs[name] = True diff --git a/tools/github_actions_dependencies.sh b/tools/github_actions_dependencies.sh index 407c4a29730..7fa42126bfc 100755 --- a/tools/github_actions_dependencies.sh +++ b/tools/github_actions_dependencies.sh @@ -4,15 +4,22 @@ if [ ! -z "$CONDA_ENV" ]; then pip uninstall -yq mne elif [ ! -z "$CONDA_DEPENDENCIES" ]; then conda install -y $CONDA_DEPENDENCIES -else # pip - python -m pip install --upgrade pip setuptools wheel +else # pip 3.9 (missing statsmodels and dipy) + python -m pip install --progress-bar off --upgrade pip setuptools wheel pip uninstall -yq numpy - pip install -i "https://pypi.anaconda.org/scipy-wheels-nightly/simple" --pre "numpy!=1.20.0.dev0+20201111233731.0ffaaf8,!=1.20.0.dev0+20201111232921.0ffaaf8" - pip install -f "https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com" scipy pandas scikit-learn matplotlib h5py Pillow - pip install https://github.com/pyvista/pyvista/zipball/master - pip install https://github.com/pyvista/pyvistaqt/zipball/master + pip install --progress-bar off --upgrade --pre --only-binary ":all:" python-dateutil pytz joblib threadpoolctl + pip install --progress-bar off --upgrade --pre --only-binary ":all:" -i "https://pypi.anaconda.org/scipy-wheels-nightly/simple" numpy scipy pandas scikit-learn + pip install --progress-bar off --upgrade --pre --only-binary ":all:" -f "https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com" matplotlib + # built using vtk master branch on an Ubuntu 18.04.5 VM and uploaded to OSF: + wget -q https://osf.io/kej3v/download -O vtk-9.0.20201117-cp39-cp39-linux_x86_64.whl + pip install vtk-9.0.20201117-cp39-cp39-linux_x86_64.whl + pip install --progress-bar off https://github.com/pyvista/pyvista/zipball/5ee02e2f295f667e33f11e71946e774cca40256c + pip install --progress-bar off https://github.com/pyvista/pyvistaqt/zipball/master + pip install --progress-bar off --upgrade --pre PyQt5 + python -c "import vtk" + python -c "import pyvistaqt" fi -pip install --upgrade -r requirements_testing.txt +pip install --progress-bar off --upgrade -r requirements_testing.txt if [ "${DEPS}" != "minimal" ]; then pip install nitime fi diff --git a/tools/setup_xvfb.sh b/tools/setup_xvfb.sh new file mode 100755 index 00000000000..cfeb6a0bd92 --- /dev/null +++ b/tools/setup_xvfb.sh @@ -0,0 +1,4 @@ +#!/bin/bash -ef + +sudo apt-get install -yqq libxkbcommon-x11-0 libxcb-icccm4 libxcb-image0 libxcb-keysyms1 libxcb-randr0 libxcb-render-util0 libxcb-xinerama0 libxcb-xfixes0 libopengl0 +/sbin/start-stop-daemon --start --quiet --pidfile /tmp/custom_xvfb_99.pid --make-pidfile --background --exec /usr/bin/Xvfb -- :99 -screen 0 1400x900x24 -ac +extension GLX +render -noreset diff --git a/tutorials/io/plot_30_reading_fnirs_data.py b/tutorials/io/plot_30_reading_fnirs_data.py index c80d7ba34dc..6968d32d685 100644 --- a/tutorials/io/plot_30_reading_fnirs_data.py +++ b/tutorials/io/plot_30_reading_fnirs_data.py @@ -37,6 +37,35 @@ stored in the .snirf format. +.. _import-boxy: + +BOXY (.txt) +=========== + +BOXY recordings can be read in using :func:`mne.io.read_raw_boxy`. +The BOXY software and ISS Imagent I and II devices are frequency domain +systems that store data in a single ``.txt`` file containing what they call +(with MNE's name for that type of data in parens): + +- DC + All light collected by the detector (``fnirs_cw_amplitude``) +- AC + High-frequency modulated light intensity (``fnirs_fd_ac_amplitude``) +- Phase + Phase of the modulated light (``fnirs_fd_phase``) + +DC data is stored as the type ``fnirs_cw_amplitude`` because it +collects both the modulated and any unmodulated light, and hence is analogous +to what is collected by continuous wave systems such as NIRx. This helps with +conformance to SNIRF standard types. + +These raw data files can be saved by the acquisition devices as parsed or +unparsed ``.txt`` files, which affects how the data in the file is organised. +MNE will read either file type and extract the raw DC, AC, and Phase data. +If triggers are sent using the ``digaux`` port of the recording hardware, MNE +will also read the ``digaux`` data and create annotations for any triggers. + + Storing of optode locations ===========================