From 3edb9088ec87f81d4325a1c29c95cb5b89a4f896 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 20 Apr 2020 08:42:53 -0700 Subject: [PATCH 001/167] created gratton_emcp_epochs function in preprocessing/eog.py --- mne/preprocessing/eog.py | 82 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) diff --git a/mne/preprocessing/eog.py b/mne/preprocessing/eog.py index 9481eef862d..598a9566c02 100644 --- a/mne/preprocessing/eog.py +++ b/mne/preprocessing/eog.py @@ -248,3 +248,85 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, picks=picks, baseline=baseline, preload=preload, reject_by_annotation=reject_by_annotation) return eog_epochs + + +@verbose +def GrattonEmcpEpochs(epochs): + + """Gratton, Coles, Donchin (1983) EMCP - Eye movement correction procedure. + + Parameters + ---------- + epochs : instance of Epoch + The epoched data with vertical and horizontal eye channels. + + Returns + ------- + emcp_epochs : instance of Epochs + Data epoched around EOG events. + + Notes + ----- + Correct EEG data for EOG artifacts with regression + -compute the ERP in each condition + -subtract ERP from each trial + -subtract baseline (mean over all epoch) + -predict eye channel remainder from eeg remainder + -use coefficients to subtract eog from eeg + + """ + + event_names = ['A_error','B_error'] + i = 0 + for key, value in sorted(epochs.event_id.items(), key=lambda x: (x[1], x[0])): + event_names[i] = key + i += 1 + + #select the correct channels and data + eeg_chans = pick_types(epochs.info, eeg=True, eog=False) + eog_chans = pick_types(epochs.info, eeg=False, eog=True) + original_data = epochs._data + + #subtract the average over trials from each trial + rem = {} + for event in event_names: + data = epochs[event]._data + avg = np.mean(epochs[event]._data,axis=0) + rem[event] = data-avg + + #concatenate trials together of different types + ## then put them all back together in X (regression on all at once) + allrem = np.concatenate([rem[event] for event in event_names]) + + #separate eog and eeg + X = allrem[:,eeg_chans,:] + Y = allrem[:,eog_chans,:] + + #subtract mean over time from every trial/channel + X = (X.T - np.mean(X,2).T).T + Y = (Y.T - np.mean(Y,2).T).T + + #move electrodes first + X = np.moveaxis(X,0,1) + Y = np.moveaxis(Y,0,1) + + #make 2d and compute regression + X = np.reshape(X,(X.shape[0],np.prod(X.shape[1:]))) + Y = np.reshape(Y,(Y.shape[0],np.prod(Y.shape[1:]))) + b = np.linalg.solve(np.dot(Y,Y.T), np.dot(Y,X.T)) + + #get original data and electrodes first for matrix math + raw_eeg = np.moveaxis(original_data[:,eeg_chans,:],0,1) + raw_eog = np.moveaxis(original_data[:,eog_chans,:],0,1) + + #subtract weighted eye channels from eeg channels + eeg_corrected = (raw_eeg.T - np.dot(raw_eog.T,b)).T + + #move back to match epochs + eeg_corrected = np.moveaxis(eeg_corrected,0,1) + + #copy original epochs and replace with corrected data + epochs_new = epochs.copy() + epochs_new._data[:,eeg_chans,:] = eeg_corrected + + return emcp_epochs From 7dc4062985140c469496551937d8bc9a3a15c684 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 20 Apr 2020 08:45:23 -0700 Subject: [PATCH 002/167] revert commit to master --- mne/preprocessing/eog.py | 84 +--------------------------------------- 1 file changed, 1 insertion(+), 83 deletions(-) diff --git a/mne/preprocessing/eog.py b/mne/preprocessing/eog.py index 598a9566c02..27d0ac6252c 100644 --- a/mne/preprocessing/eog.py +++ b/mne/preprocessing/eog.py @@ -247,86 +247,4 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, tmax=tmax, proj=False, reject=reject, flat=flat, picks=picks, baseline=baseline, preload=preload, reject_by_annotation=reject_by_annotation) - return eog_epochs - - -@verbose -def GrattonEmcpEpochs(epochs): - - """Gratton, Coles, Donchin (1983) EMCP - Eye movement correction procedure. - - Parameters - ---------- - epochs : instance of Epoch - The epoched data with vertical and horizontal eye channels. - - Returns - ------- - emcp_epochs : instance of Epochs - Data epoched around EOG events. - - Notes - ----- - Correct EEG data for EOG artifacts with regression - -compute the ERP in each condition - -subtract ERP from each trial - -subtract baseline (mean over all epoch) - -predict eye channel remainder from eeg remainder - -use coefficients to subtract eog from eeg - - """ - - event_names = ['A_error','B_error'] - i = 0 - for key, value in sorted(epochs.event_id.items(), key=lambda x: (x[1], x[0])): - event_names[i] = key - i += 1 - - #select the correct channels and data - eeg_chans = pick_types(epochs.info, eeg=True, eog=False) - eog_chans = pick_types(epochs.info, eeg=False, eog=True) - original_data = epochs._data - - #subtract the average over trials from each trial - rem = {} - for event in event_names: - data = epochs[event]._data - avg = np.mean(epochs[event]._data,axis=0) - rem[event] = data-avg - - #concatenate trials together of different types - ## then put them all back together in X (regression on all at once) - allrem = np.concatenate([rem[event] for event in event_names]) - - #separate eog and eeg - X = allrem[:,eeg_chans,:] - Y = allrem[:,eog_chans,:] - - #subtract mean over time from every trial/channel - X = (X.T - np.mean(X,2).T).T - Y = (Y.T - np.mean(Y,2).T).T - - #move electrodes first - X = np.moveaxis(X,0,1) - Y = np.moveaxis(Y,0,1) - - #make 2d and compute regression - X = np.reshape(X,(X.shape[0],np.prod(X.shape[1:]))) - Y = np.reshape(Y,(Y.shape[0],np.prod(Y.shape[1:]))) - b = np.linalg.solve(np.dot(Y,Y.T), np.dot(Y,X.T)) - - #get original data and electrodes first for matrix math - raw_eeg = np.moveaxis(original_data[:,eeg_chans,:],0,1) - raw_eog = np.moveaxis(original_data[:,eog_chans,:],0,1) - - #subtract weighted eye channels from eeg channels - eeg_corrected = (raw_eeg.T - np.dot(raw_eog.T,b)).T - - #move back to match epochs - eeg_corrected = np.moveaxis(eeg_corrected,0,1) - - #copy original epochs and replace with corrected data - epochs_new = epochs.copy() - epochs_new._data[:,eeg_chans,:] = eeg_corrected - - return emcp_epochs + return eog_epochs \ No newline at end of file From 40d7492b6b8258b78ebe4527862b10be3c220c29 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 20 Apr 2020 08:46:49 -0700 Subject: [PATCH 003/167] fix --- mne/preprocessing/eog.py | 86 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 85 insertions(+), 1 deletion(-) diff --git a/mne/preprocessing/eog.py b/mne/preprocessing/eog.py index 27d0ac6252c..9aae4aa0402 100644 --- a/mne/preprocessing/eog.py +++ b/mne/preprocessing/eog.py @@ -247,4 +247,88 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, tmax=tmax, proj=False, reject=reject, flat=flat, picks=picks, baseline=baseline, preload=preload, reject_by_annotation=reject_by_annotation) - return eog_epochs \ No newline at end of file + return eog_epochs + + + +@verbose +def gratton_emcp_epochs(epochs): + + """Gratton, Coles, Donchin (1983) EMCP - Eye movement correction procedure. + + Parameters + ---------- + epochs : instance of Epoch + The epoched data with vertical and horizontal eye channels. + + Returns + ------- + emcp_epochs : instance of Epochs + Data epoched around EOG events. + + Notes + ----- + Correct EEG data for EOG artifacts with regression + -compute the ERP in each condition + -subtract ERP from each trial + -subtract baseline (mean over all epoch) + -predict eye channel remainder from eeg remainder + -use coefficients to subtract eog from eeg + + """ + + event_names = ['A_error','B_error'] + i = 0 + for key, value in sorted(epochs.event_id.items(), key=lambda x: (x[1], x[0])): + event_names[i] = key + i += 1 + + #select the correct channels and data + eeg_chans = pick_types(epochs.info, eeg=True, eog=False) + eog_chans = pick_types(epochs.info, eeg=False, eog=True) + original_data = epochs._data + + #subtract the average over trials from each trial + rem = {} + for event in event_names: + data = epochs[event]._data + avg = np.mean(epochs[event]._data,axis=0) + rem[event] = data-avg + + #concatenate trials together of different types + ## then put them all back together in X (regression on all at once) + allrem = np.concatenate([rem[event] for event in event_names]) + + #separate eog and eeg + X = allrem[:,eeg_chans,:] + Y = allrem[:,eog_chans,:] + + #subtract mean over time from every trial/channel + X = (X.T - np.mean(X,2).T).T + Y = (Y.T - np.mean(Y,2).T).T + + #move electrodes first + X = np.moveaxis(X,0,1) + Y = np.moveaxis(Y,0,1) + + #make 2d and compute regression + X = np.reshape(X,(X.shape[0],np.prod(X.shape[1:]))) + Y = np.reshape(Y,(Y.shape[0],np.prod(Y.shape[1:]))) + b = np.linalg.solve(np.dot(Y,Y.T), np.dot(Y,X.T)) + + #get original data and electrodes first for matrix math + raw_eeg = np.moveaxis(original_data[:,eeg_chans,:],0,1) + raw_eog = np.moveaxis(original_data[:,eog_chans,:],0,1) + + #subtract weighted eye channels from eeg channels + eeg_corrected = (raw_eeg.T - np.dot(raw_eog.T,b)).T + + #move back to match epochs + eeg_corrected = np.moveaxis(eeg_corrected,0,1) + + #copy original epochs and replace with corrected data + epochs_new = epochs.copy() + epochs_new._data[:,eeg_chans,:] = eeg_corrected + + return emcp_epochs + \ No newline at end of file From e5d69d762992e50e6feaf665680aa074b6ad4e5f Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 20 Apr 2020 08:47:37 -0700 Subject: [PATCH 004/167] revert --- mne/preprocessing/eog.py | 85 +--------------------------------------- 1 file changed, 1 insertion(+), 84 deletions(-) diff --git a/mne/preprocessing/eog.py b/mne/preprocessing/eog.py index 9aae4aa0402..9c2bad50d4e 100644 --- a/mne/preprocessing/eog.py +++ b/mne/preprocessing/eog.py @@ -248,87 +248,4 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, picks=picks, baseline=baseline, preload=preload, reject_by_annotation=reject_by_annotation) return eog_epochs - - - -@verbose -def gratton_emcp_epochs(epochs): - - """Gratton, Coles, Donchin (1983) EMCP - Eye movement correction procedure. - - Parameters - ---------- - epochs : instance of Epoch - The epoched data with vertical and horizontal eye channels. - - Returns - ------- - emcp_epochs : instance of Epochs - Data epoched around EOG events. - - Notes - ----- - Correct EEG data for EOG artifacts with regression - -compute the ERP in each condition - -subtract ERP from each trial - -subtract baseline (mean over all epoch) - -predict eye channel remainder from eeg remainder - -use coefficients to subtract eog from eeg - - """ - - event_names = ['A_error','B_error'] - i = 0 - for key, value in sorted(epochs.event_id.items(), key=lambda x: (x[1], x[0])): - event_names[i] = key - i += 1 - - #select the correct channels and data - eeg_chans = pick_types(epochs.info, eeg=True, eog=False) - eog_chans = pick_types(epochs.info, eeg=False, eog=True) - original_data = epochs._data - - #subtract the average over trials from each trial - rem = {} - for event in event_names: - data = epochs[event]._data - avg = np.mean(epochs[event]._data,axis=0) - rem[event] = data-avg - - #concatenate trials together of different types - ## then put them all back together in X (regression on all at once) - allrem = np.concatenate([rem[event] for event in event_names]) - - #separate eog and eeg - X = allrem[:,eeg_chans,:] - Y = allrem[:,eog_chans,:] - - #subtract mean over time from every trial/channel - X = (X.T - np.mean(X,2).T).T - Y = (Y.T - np.mean(Y,2).T).T - - #move electrodes first - X = np.moveaxis(X,0,1) - Y = np.moveaxis(Y,0,1) - - #make 2d and compute regression - X = np.reshape(X,(X.shape[0],np.prod(X.shape[1:]))) - Y = np.reshape(Y,(Y.shape[0],np.prod(Y.shape[1:]))) - b = np.linalg.solve(np.dot(Y,Y.T), np.dot(Y,X.T)) - - #get original data and electrodes first for matrix math - raw_eeg = np.moveaxis(original_data[:,eeg_chans,:],0,1) - raw_eog = np.moveaxis(original_data[:,eog_chans,:],0,1) - - #subtract weighted eye channels from eeg channels - eeg_corrected = (raw_eeg.T - np.dot(raw_eog.T,b)).T - - #move back to match epochs - eeg_corrected = np.moveaxis(eeg_corrected,0,1) - - #copy original epochs and replace with corrected data - epochs_new = epochs.copy() - epochs_new._data[:,eeg_chans,:] = eeg_corrected - - return emcp_epochs - \ No newline at end of file + \ No newline at end of file From 3c2a163308b54c3c8c8385a668bb3578ab2557ac Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 20 Apr 2020 08:48:42 -0700 Subject: [PATCH 005/167] fix --- mne/preprocessing/eog.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/mne/preprocessing/eog.py b/mne/preprocessing/eog.py index 9c2bad50d4e..4df6c8a43fe 100644 --- a/mne/preprocessing/eog.py +++ b/mne/preprocessing/eog.py @@ -247,5 +247,4 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, tmax=tmax, proj=False, reject=reject, flat=flat, picks=picks, baseline=baseline, preload=preload, reject_by_annotation=reject_by_annotation) - return eog_epochs - \ No newline at end of file + return eog_epochs \ No newline at end of file From 5dcaac69e842e5b4dc28bbe7959742ad89608304 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 20 Apr 2020 08:49:09 -0700 Subject: [PATCH 006/167] space ; --- mne/preprocessing/eog.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mne/preprocessing/eog.py b/mne/preprocessing/eog.py index 4df6c8a43fe..27d0ac6252c 100644 --- a/mne/preprocessing/eog.py +++ b/mne/preprocessing/eog.py @@ -247,4 +247,4 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, tmax=tmax, proj=False, reject=reject, flat=flat, picks=picks, baseline=baseline, preload=preload, reject_by_annotation=reject_by_annotation) - return eog_epochs \ No newline at end of file + return eog_epochs \ No newline at end of file From 096250156d1bbda5d72bf4e16d84f75b526b5d74 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Sun, 3 May 2020 16:12:43 -0700 Subject: [PATCH 007/167] initialize new branch with boxy.py file and folder in io with tests --- mne/io/__init__.py | 2 + mne/io/boxy/__init__.py | 7 + mne/io/boxy/boxy.py | 393 +++++++++++++++++++++++++++++++++ mne/io/boxy/tests/__init__.py | 0 mne/io/boxy/tests/test_boxy.py | 226 +++++++++++++++++++ 5 files changed, 628 insertions(+) create mode 100644 mne/io/boxy/__init__.py create mode 100644 mne/io/boxy/boxy.py create mode 100644 mne/io/boxy/tests/__init__.py create mode 100644 mne/io/boxy/tests/test_boxy.py diff --git a/mne/io/__init__.py b/mne/io/__init__.py index ac16517a635..0cf67ed4397 100644 --- a/mne/io/__init__.py +++ b/mne/io/__init__.py @@ -27,6 +27,7 @@ from . import kit from . import nicolet from . import nirx +from . import boxy from . import eeglab from . import pick @@ -45,6 +46,7 @@ from .eeglab import read_raw_eeglab, read_epochs_eeglab from .eximia import read_raw_eximia from .nirx import read_raw_nirx +from .boxy import read_raw_boxy from .fieldtrip import (read_raw_fieldtrip, read_epochs_fieldtrip, read_evoked_fieldtrip) diff --git a/mne/io/boxy/__init__.py b/mne/io/boxy/__init__.py new file mode 100644 index 00000000000..c06d590829e --- /dev/null +++ b/mne/io/boxy/__init__.py @@ -0,0 +1,7 @@ +"""fNIRS module for conversion to FIF.""" + +# Author: Robert Luke +# +# License: BSD (3-clause) + +from .boxy import read_raw_boxy diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py new file mode 100644 index 00000000000..ffce7135a2e --- /dev/null +++ b/mne/io/boxy/boxy.py @@ -0,0 +1,393 @@ +# Authors: Kyle Mathewson, Jonathan Kuziek +# +# License: BSD (3-clause) + +from configparser import ConfigParser, RawConfigParser +import glob as glob +import re as re + +import numpy as np + +from ..base import BaseRaw +from ..constants import FIFF +from ..meas_info import create_info, _format_dig_points +from ...annotations import Annotations +from ...transforms import apply_trans, _get_trans +from ...utils import logger, verbose, fill_doc + + +@fill_doc +def read_raw_boxy(fname, preload=False, verbose=None): + """Reader for a BOXY optical imaging recording. + Parameters + ---------- + fname : str + Path to the BOXY data folder. + %(preload)s + %(verbose)s + Returns + ------- + raw : instance of RawBOXY + A Raw object containing BOXY data. + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + """ + return RawBOXY(fname, preload, verbose) + +@fill_doc +class RawBOXY(BaseRaw): + """Raw object from a BOXY optical imaging file. + Parameters + ---------- + fname : str + Path to the BOXY data folder. + %(preload)s + %(verbose)s + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + """ + + @verbose + def __init__(self, fname, preload=False, verbose=None): + from ...externals.pymatreader import read_mat + from ...coreg import get_mni_fiducials # avoid circular import prob + logger.info('Loading %s' % fname) + + # Read header file + # Parse required header fields + ###this keeps track of the line we're on### + ###mostly to know the start and stop of data (probably an easier way)### + line_num = 0 + ###load and read data to get some meta information### + ###there is alot of information at the beginning of a file### + ###but this only grabs some of it### + with open(boxy_file,'r') as data: + for i_line in data: + line_num += 1 + if '#DATA ENDS' in i_line: + end_line = line_num - 1 + break + if 'Detector Channels' in i_line: + detect_num = int(i_line.rsplit(' ')[0]) + elif 'External MUX Channels' in i_line: + source_num = int(i_line.rsplit(' ')[0]) + elif 'Auxiliary Channels' in i_line: + aux_num = int(i_line.rsplit(' ')[0]) + elif 'Waveform (CCF) Frequency (Hz)' in i_line: + ccf_ha = float(i_line.rsplit(' ')[0]) + elif 'Update Rate (Hz)' in i_line: + srate = float(i_line.rsplit(' ')[0]) + elif 'Updata Rate (Hz)' in i_line: + srate = float(i_line.rsplit(' ')[0]) + elif '#DATA BEGINS' in i_line: + start_line = line_num + + # Extract source-detectors + ###set up some variables### + chan_num = [] + source_label = [] + detect_label = [] + chan_wavelength = [] + chan_modulation = [] + + ###load and read each line of the .mtg file### + with open(mtg_file,'r') as data: + for i_ignore in range(2): + next(data) + for i_line in data: + chan1, chan2, source, detector, wavelength, modulation = i_line.split() + chan_num.append(chan1) + source_label.append(source) + detect_label.append(detector) + chan_wavelength.append(wavelength) + chan_modulation.append(modulation) + + # Read information about probe/montage/optodes + # A word on terminology used here: + # Sources produce light + # Detectors measure light + # Sources and detectors are both called optodes + # Each source - detector pair produces a channel + # Channels are defined as the midpoint between source and detector + + ###check if we are given a .tol or .elp file### + all_labels = [] + all_coords = [] + fiducial_coords = [] + if coord_file[-3:].lower() == 'elp'.lower(): + get_label = 0 + get_coords = 0 + ###load and read .elp file### + with open(coord_file,'r') as data: + for i_line in data: + ###first let's get our fiducial coordinates### + if '%F' in i_line: + fiducial_coords.append(i_line.split()[1:]) + ###check where sensor info starts### + if '//Sensor name' in i_line: + get_label = 1 + elif get_label == 1: + ###grab the part after '%N' for the label### + label = i_line.split()[1] + all_labels.append(label) + get_label = 0 + get_coords = 1 + elif get_coords == 1: + X, Y, Z = i_line.split() + all_coords.append([float(X),float(Y),float(Z)]) + get_coords = 0 + for i_index in range(3): + fiducial_coords[i_index] = np.asarray([float(x) for x in fiducial_coords[i_index]]) + elif coord_file[-3:] == 'tol': + ###load and read .tol file### + with open(coord_file,'r') as data: + for i_line in data: + label, X, Y, Z = i_line.split() + all_labels.append(label) + ###convert coordinates from mm to m## + all_coords.append([(float(X)*0.001),(float(Y)*0.001),(float(Z)*0.001)]) + + ###get coordinates for sources### + source_coords = [] + for i_chan in source_label: + if i_chan in all_labels: + chan_index = all_labels.index(i_chan) + source_coords.append(all_coords[chan_index]) + + ###get coordinates for detectors### + detect_coords = [] + for i_chan in detect_label: + if i_chan in all_labels: + chan_index = all_labels.index(i_chan) + detect_coords.append(all_coords[chan_index]) + + + # Generate meaningful channel names + ###need to rename labels to make other functions happy### + ###get our unique labels for sources and detectors### + unique_source_labels = [] + unique_detect_labels = [] + [unique_source_labels.append(label) for label in source_label if label not in unique_source_labels] + [unique_detect_labels.append(label) for label in detect_label if label not in unique_detect_labels] + + ###now let's label each channel in our data### + ###data is channels X timepoint where the first source_num rows correspond to### + ###the first detector, and each row within that group is a different source### + ###should note that current .mtg files contain channels for multiple data files### + ###going to move to have a single .mtg file per participant, condition, and montage### + ###combine coordinates and label our channels### + ###will label them based on ac, dc, and ph data### + boxy_coords = [] + boxy_labels = [] + data_types = ['AC','DC','Ph'] + total_chans = detect_num*source_num + for i_type in data_types: + for i_coord in range(len(source_coords[0:total_chans])): + boxy_coords.append(np.mean( + np.vstack((source_coords[i_coord], detect_coords[i_coord])), + axis=0).tolist() + source_coords[i_coord] + + detect_coords[i_coord] + [chan_wavelength[i_coord]] + [0] + [0]) + boxy_labels.append('S' + + str(unique_source_labels.index(source_label[i_coord])+1) + + '_D' + + str(unique_detect_labels.index(detect_label[i_coord])+1) + + ' ' + chan_wavelength[i_coord] + ' ' + i_type) + + ###montage only wants channel coords, so need to grab those, convert to### + ###array, then make a dict with labels### + for i_chan in range(len(boxy_coords)): + boxy_coords[i_chan] = np.asarray(boxy_coords[i_chan],dtype=np.float64) + + for i_chan in range(len(all_coords)): + all_coords[i_chan] = np.asarray(all_coords[i_chan],dtype=np.float64) + + all_chan_dict = dict(zip(all_labels,all_coords)) + + + ###make our montage### + montage_orig = mne.channels.make_dig_montage(ch_pos=all_chan_dict,coord_frame='head', + nasion = fiducial_coords[0], + lpa = fiducial_coords[1], + rpa = fiducial_coords[2]) + + ###for some reason make_dig_montage put our channels in a different order than what we input### + ###let's fix that. should be fine to just change coords and ch_names### + for i_chan in range(len(all_coords)): + montage_orig.dig[i_chan+3]['r'] = all_coords[i_chan] + montage_orig.ch_names[i_chan] = all_labels[i_chan] + + ###add an extra channel for our triggers for later### + boxy_labels.append('Markers') + + info = mne.create_info(boxy_labels,srate,ch_types='fnirs_raw') + info.update(dig=montage_orig.dig) + + # Set up digitization + # These are all in MNI coordinates, so let's transform them to + # the Neuromag head coordinate frame + ###get our fiducials and transform matrix from fsaverage### + subjects_dir = op.dirname(fetch_fsaverage()) + fid_path = op.join(subjects_dir, 'fsaverage', 'bem', 'fsaverage-fiducials.fif') + fiducials = read_fiducials(fid_path) + trans = coregister_fiducials(info, fiducials[0], tol=0.02) + + ###remake montage using the transformed coordinates### + all_coords_trans = apply_trans(trans,all_coords) + all_chan_dict_trans = dict(zip(all_labels,all_coords_trans)) + fiducial_coords_trans = apply_trans(trans,fiducial_coords) + + ###make our montage### + montage_trans = mne.channels.make_dig_montage(ch_pos=all_chan_dict_trans,coord_frame='head', + nasion = fiducial_coords_trans[0], + lpa = fiducial_coords_trans[1], + rpa = fiducial_coords_trans[2]) + + ###let's fix montage order ### + for i_chan in range(len(all_coords_trans)): + montage_trans.dig[i_chan+3]['r'] = all_coords_trans[i_chan] + montage_trans.ch_names[i_chan] = all_labels[i_chan] + + # Create mne structure + ###create info structure### + info = mne.create_info(boxy_labels,srate,ch_types='fnirs_raw') + ###add data type and channel wavelength to info### + info.update(dig=montage_trans.dig, trans=trans) + + # Store channel, source, and detector locations + # The channel location is stored in the first 3 entries of loc. + # The source location is stored in the second 3 entries of loc. + # The detector location is stored in the third 3 entries of loc. + # NIRx NIRSite uses MNI coordinates. + # Also encode the light frequency in the structure. + + ###place our coordinates and wavelengths for each channel### + for i_chan in range(len(boxy_labels)-1): + temp_chn = apply_trans(trans,boxy_coords[i_chan][0:3]) + temp_src = apply_trans(trans,boxy_coords[i_chan][3:6]) + temp_det = apply_trans(trans,boxy_coords[i_chan][6:9]) + temp_other = np.asarray(boxy_coords[i_chan][9:],dtype=np.float64) + info['chs'][i_chan]['loc'] = test = np.concatenate((temp_chn, temp_src, + temp_det, temp_other),axis=0) + info['chs'][-1]['loc'] = np.zeros((12,)) + + super(RawBOXY, self).__init__( + info, preload, filenames=[fname], last_samps=[last_sample], + raw_extras=[raw_extras], verbose=verbose) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a segment of data from a file. + """ + with open(boxy_file,'r') as data: + for i_line in data: + line_num += 1 + if '#DATA BEGINS' in i_line: + start_line = line_num + break + + raw_data = pd.read_csv(boxy_file, skiprows=start_line, sep='\t') + ###detectors, sources, and data types### + detectors = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', + 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', + 'Y', 'Z'] + data_types = ['AC','DC','Ph'] + sources = np.arange(1,source_num+1,1) + + ###since we can save boxy files in two different styles### + ###this will check to see which style the data is saved### + ###seems to also work with older boxy files### + if 'exmux' in raw_data.columns: + filetype = 'non-parsed' + + ###drop the last line as this is just '#DATA ENDS'### + raw_data = raw_data.drop([len(raw_data)-1]) + + ###store some extra info### + record = raw_data['record'].to_numpy() + exmux = raw_data['exmux'].to_numpy() + + ###make some empty variables to store our data### + raw_ac = np.zeros((detect_num*source_num,int(len(raw_data)/source_num))) + raw_dc = np.zeros((detect_num*source_num,int(len(raw_data)/source_num))) + raw_ph = np.zeros((detect_num*source_num,int(len(raw_data)/source_num))) + else: + filetype = 'parsed' + + ###drop the last line as this is just '#DATA ENDS'### + ###also drop the first line since this is empty### + raw_data = raw_data.drop([0,len(raw_data)-1]) + + ###make some empty variables to store our data### + raw_ac = np.zeros(((detect_num*source_num),len(raw_data))) + raw_dc = np.zeros(((detect_num*source_num),len(raw_data))) + raw_ph = np.zeros(((detect_num*source_num),len(raw_data))) + + ###store some extra data, might not need these though### + time = raw_data['time'].to_numpy() if 'time' in raw_data.columns else [] + time = raw_data['time'].to_numpy() if 'time' in raw_data.columns else [] + group = raw_data['group'].to_numpy() if 'group' in raw_data.columns else [] + step = raw_data['step'].to_numpy() if 'step' in raw_data.columns else [] + mark = raw_data['mark'].to_numpy() if 'mark' in raw_data.columns else [] + flag = raw_data['flag'].to_numpy() if 'flag' in raw_data.columns else [] + aux1 = raw_data['aux-1'].to_numpy() if 'aux-1' in raw_data.columns else [] + digaux = raw_data['digaux'].to_numpy() if 'digaux' in raw_data.columns else [] + bias = np.zeros((detect_num,len(raw_data))) + + ###loop through detectors### + for i_detect in detectors[0:detect_num]: + + ###older boxy files don't seem to keep track of detector bias### + ###probably due to specific boxy settings actually### + if 'bias-A' in raw_data.columns: + bias[detectors.index(i_detect),:] = raw_data['bias-' + i_detect].to_numpy() + + ###loop through data types### + for i_data in data_types: + ###loop through sources### + for i_source in sources: + ###where to store our data### + index_loc = detectors.index(i_detect)*source_num + (i_source-1) + ###need to treat our filetypes differently### + if filetype == 'non-parsed': + + ###filetype saves timepoints in groups### + ###this should account for that### + time_points = np.arange(i_source-1,int(record[-1])*source_num,source_num) + + ###determine which channel to look for### + channel = i_detect + '-' + i_data + + ###save our data based on data type### + if data_types.index(i_data) == 0: + raw_ac[index_loc,:] = raw_data[channel][time_points].to_numpy() + elif data_ty pes.index(i_data) == 1: + raw_dc[index_loc,:] = raw_data[channel][time_points].to_numpy() + elif data_types.index(i_data) == 2: + raw_ph[index_loc,:] = raw_data[channel][time_points].to_numpy() + elif filetype == 'parsed': + ###determine which channel to look for### + channel = i_detect + '-' + i_data + str(i_source) + + ###save our data based on data type### + if data_types.index(i_data) == 0: + raw_ac[index_loc,:] = raw_data[channel].to_numpy() + elif data_types.index(i_data) == 1: + raw_dc[index_loc,:] = raw_data[channel].to_numpy() + elif data_types.index(i_data) == 2: + raw_ph[index_loc,:] = raw_data[channel].to_numpy() + + ###now combine our data types into a single array with the data### + data = np.append(raw_ac, np.append(raw_dc, raw_ph, axis=0),axis=0) + + # Read triggers from event file + ###add our markers to the data array based on filetype### + if filetype == 'non-parsed': + if type(digaux) is list and digaux != []: + markers = digaux[np.arange(0,len(digaux),source_num)] + else: + markers = np.zeros(np.size(data,axis=1)) + elif filetype == 'parsed': + markers = digaux + data = np.vstack((data, markers)) + return data \ No newline at end of file diff --git a/mne/io/boxy/tests/__init__.py b/mne/io/boxy/tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/mne/io/boxy/tests/test_boxy.py b/mne/io/boxy/tests/test_boxy.py new file mode 100644 index 00000000000..ed4a75014fb --- /dev/null +++ b/mne/io/boxy/tests/test_boxy.py @@ -0,0 +1,226 @@ +# -*- coding: utf-8 -*- +# Authors: Robert Luke +# Eric Larson +# simplified BSD-3 license + +import os.path as op +import shutil + +import pytest +from numpy.testing import assert_allclose, assert_array_equal + +from mne.datasets.testing import data_path, requires_testing_data +from mne.io import read_raw_nirx +from mne.io.tests.test_raw import _test_raw_reader +from mne.transforms import apply_trans, _get_trans +from mne.utils import run_tests_if_main +from mne.preprocessing.nirs import source_detector_distances,\ + short_channels + +fname_nirx_15_0 = op.join(data_path(download=False), + 'NIRx', 'nirx_15_0_recording') +fname_nirx_15_2 = op.join(data_path(download=False), + 'NIRx', 'nirx_15_2_recording') +fname_nirx_15_2_short = op.join(data_path(download=False), + 'NIRx', 'nirx_15_2_recording_w_short') + + +@requires_testing_data +def test_nirx_15_2_short(): + """Test reading NIRX files.""" + raw = read_raw_nirx(fname_nirx_15_2_short, preload=True) + + # Test data import + assert raw._data.shape == (26, 145) + assert raw.info['sfreq'] == 12.5 + + # Test channel naming + assert raw.info['ch_names'][:4] == ["S1_D1 760", "S1_D1 850", + "S1_D9 760", "S1_D9 850"] + assert raw.info['ch_names'][24:26] == ["S5_D13 760", "S5_D13 850"] + + # Test frequency encoding + assert raw.info['chs'][0]['loc'][9] == 760 + assert raw.info['chs'][1]['loc'][9] == 850 + + # Test info import + assert raw.info['subject_info'] == dict(sex=1, first_name="MNE", + middle_name="Test", + last_name="Recording") + + # Test distance between optodes matches values from + # nirsite https://github.com/mne-tools/mne-testing-data/pull/51 + # step 4 figure 2 + allowed_distance_error = 0.0002 + distances = source_detector_distances(raw.info) + assert_allclose(distances[::2], [ + 0.0304, 0.0078, 0.0310, 0.0086, 0.0416, + 0.0072, 0.0389, 0.0075, 0.0558, 0.0562, + 0.0561, 0.0565, 0.0077], atol=allowed_distance_error) + + # Test which channels are short + # These are the ones marked as red at + # https://github.com/mne-tools/mne-testing-data/pull/51 step 4 figure 2 + is_short = short_channels(raw.info) + assert_array_equal(is_short[:9:2], [False, True, False, True, False]) + is_short = short_channels(raw.info, threshold=0.003) + assert_array_equal(is_short[:3:2], [False, False]) + is_short = short_channels(raw.info, threshold=50) + assert_array_equal(is_short[:3:2], [True, True]) + + # Test trigger events + assert_array_equal(raw.annotations.description, ['3.0', '2.0', '1.0']) + + # Test location of detectors + # The locations of detectors can be seen in the first + # figure on this page... + # https://github.com/mne-tools/mne-testing-data/pull/51 + # And have been manually copied below + # These values were reported in mm, but according to this page... + # https://mne.tools/stable/auto_tutorials/intro/plot_40_sensor_locations.html + # 3d locations should be specified in meters, so that's what's tested below + # Detector locations are stored in the third three loc values + allowed_dist_error = 0.0002 + locs = [ch['loc'][6:9] for ch in raw.info['chs']] + head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri') + mni_locs = apply_trans(head_mri_t, locs) + + assert raw.info['ch_names'][0][3:5] == 'D1' + assert_allclose( + mni_locs[0], [-0.0841, -0.0464, -0.0129], atol=allowed_dist_error) + + assert raw.info['ch_names'][4][3:5] == 'D3' + assert_allclose( + mni_locs[4], [0.0846, -0.0142, -0.0156], atol=allowed_dist_error) + + assert raw.info['ch_names'][8][3:5] == 'D2' + assert_allclose( + mni_locs[8], [0.0207, -0.1062, 0.0484], atol=allowed_dist_error) + + assert raw.info['ch_names'][12][3:5] == 'D4' + assert_allclose( + mni_locs[12], [-0.0196, 0.0821, 0.0275], atol=allowed_dist_error) + + assert raw.info['ch_names'][16][3:5] == 'D5' + assert_allclose( + mni_locs[16], [-0.0360, 0.0276, 0.0778], atol=allowed_dist_error) + + assert raw.info['ch_names'][19][3:5] == 'D6' + assert_allclose( + mni_locs[19], [0.0352, 0.0283, 0.0780], atol=allowed_dist_error) + + assert raw.info['ch_names'][21][3:5] == 'D7' + assert_allclose( + mni_locs[21], [0.0388, -0.0477, 0.0932], atol=allowed_dist_error) + + +@requires_testing_data +def test_encoding(tmpdir): + """Test NIRx encoding.""" + fname = str(tmpdir.join('latin')) + shutil.copytree(fname_nirx_15_2, fname) + hdr_fname = op.join(fname, 'NIRS-2019-10-02_003.hdr') + hdr = list() + with open(hdr_fname, 'rb') as fid: + hdr.extend(line for line in fid) + hdr[2] = b'Date="jeu. 13 f\xe9vr. 2020"\r\n' + with open(hdr_fname, 'wb') as fid: + for line in hdr: + fid.write(line) + # smoke test + read_raw_nirx(fname) + + +@requires_testing_data +def test_nirx_15_2(): + """Test reading NIRX files.""" + raw = read_raw_nirx(fname_nirx_15_2, preload=True) + + # Test data import + assert raw._data.shape == (64, 67) + assert raw.info['sfreq'] == 3.90625 + + # Test channel naming + assert raw.info['ch_names'][:4] == ["S1_D1 760", "S1_D1 850", + "S1_D10 760", "S1_D10 850"] + + # Test info import + assert raw.info['subject_info'] == dict(sex=1, first_name="TestRecording") + + # Test trigger events + assert_array_equal(raw.annotations.description, ['4.0', '6.0', '2.0']) + + # Test location of detectors + allowed_dist_error = 0.0002 + locs = [ch['loc'][6:9] for ch in raw.info['chs']] + head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri') + mni_locs = apply_trans(head_mri_t, locs) + + assert raw.info['ch_names'][0][3:5] == 'D1' + assert_allclose( + mni_locs[0], [-0.0292, 0.0852, -0.0142], atol=allowed_dist_error) + + assert raw.info['ch_names'][15][3:5] == 'D4' + assert_allclose( + mni_locs[15], [-0.0739, -0.0756, -0.0075], atol=allowed_dist_error) + + +@requires_testing_data +def test_nirx_15_0(): + """Test reading NIRX files.""" + raw = read_raw_nirx(fname_nirx_15_0, preload=True) + + # Test data import + assert raw._data.shape == (20, 92) + assert raw.info['sfreq'] == 6.25 + + # Test channel naming + assert raw.info['ch_names'][:12] == ["S1_D1 760", "S1_D1 850", + "S2_D2 760", "S2_D2 850", + "S3_D3 760", "S3_D3 850", + "S4_D4 760", "S4_D4 850", + "S5_D5 760", "S5_D5 850", + "S6_D6 760", "S6_D6 850"] + + # Test info import + assert raw.info['subject_info'] == {'first_name': 'NIRX', + 'last_name': 'Test', 'sex': '0'} + + # Test trigger events + assert_array_equal(raw.annotations.description, ['1.0', '2.0', '2.0']) + + # Test location of detectors + allowed_dist_error = 0.0002 + locs = [ch['loc'][6:9] for ch in raw.info['chs']] + head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri') + mni_locs = apply_trans(head_mri_t, locs) + + assert raw.info['ch_names'][0][3:5] == 'D1' + assert_allclose( + mni_locs[0], [0.0287, -0.1143, -0.0332], atol=allowed_dist_error) + + assert raw.info['ch_names'][15][3:5] == 'D8' + assert_allclose( + mni_locs[15], [-0.0693, -0.0480, 0.0657], atol=allowed_dist_error) + + # Test distance between optodes matches values from + allowed_distance_error = 0.0002 + distances = source_detector_distances(raw.info) + assert_allclose(distances[::2], [ + 0.0301, 0.0315, 0.0343, 0.0368, 0.0408, + 0.0399, 0.0393, 0.0367, 0.0336, 0.0447], atol=allowed_distance_error) + + +@requires_testing_data +@pytest.mark.parametrize('fname, boundary_decimal', ( + [fname_nirx_15_2_short, 1], + [fname_nirx_15_2, 0], + [fname_nirx_15_0, 0] +)) +def test_nirx_standard(fname, boundary_decimal): + """Test standard operations.""" + _test_raw_reader(read_raw_nirx, fname=fname, + boundary_decimal=boundary_decimal) # low fs + + +run_tests_if_main() From 40c80b2a1895efb174d6399c701ec647d39425bb Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Sun, 3 May 2020 16:28:39 -0700 Subject: [PATCH 008/167] fixing eog nonchange --- mne/preprocessing/eog.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/mne/preprocessing/eog.py b/mne/preprocessing/eog.py index 27d0ac6252c..873a95fea2a 100644 --- a/mne/preprocessing/eog.py +++ b/mne/preprocessing/eog.py @@ -18,7 +18,6 @@ def find_eog_events(raw, event_id=998, l_freq=1, h_freq=10, filter_length='10s', ch_name=None, tstart=0, reject_by_annotation=False, thresh=None, verbose=None): """Locate EOG artifacts. - Parameters ---------- raw : instance of Raw @@ -40,12 +39,10 @@ def find_eog_events(raw, event_id=998, l_freq=1, h_freq=10, thresh : float Threshold to trigger EOG event. %(verbose)s - Returns ------- eog_events : array Events. - See Also -------- create_eog_epochs @@ -165,7 +162,6 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, baseline=None, preload=True, reject_by_annotation=True, thresh=None, verbose=None): """Conveniently generate epochs around EOG artifact events. - Parameters ---------- raw : instance of Raw @@ -188,13 +184,11 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, Rejection parameters based on peak-to-peak amplitude. Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'. If reject is None then no rejection is done. Example:: - reject = dict(grad=4000e-13, # T / m (gradiometers) mag=4e-12, # T (magnetometers) eeg=40e-6, # V (EEG channels) eog=250e-6 # V (EOG channels) ) - flat : dict | None Rejection parameters based on flatness of signal. Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values @@ -215,22 +209,18 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, whose description begins with ``'bad'`` are not used for finding artifacts and epochs overlapping with them are rejected. If False, no rejection based on annotations is performed. - .. versionadded:: 0.14.0 thresh : float Threshold to trigger EOG event. %(verbose)s - Returns ------- eog_epochs : instance of Epochs Data epoched around EOG events. - See Also -------- find_eog_events compute_proj_eog - Notes ----- Filtering is only applied to the EOG channel while finding events. From 1e5d1b15eaa7e280ba5019205471903478981ace Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Sun, 3 May 2020 16:29:53 -0700 Subject: [PATCH 009/167] fix --- mne/preprocessing/eog.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/mne/preprocessing/eog.py b/mne/preprocessing/eog.py index 873a95fea2a..27d0ac6252c 100644 --- a/mne/preprocessing/eog.py +++ b/mne/preprocessing/eog.py @@ -18,6 +18,7 @@ def find_eog_events(raw, event_id=998, l_freq=1, h_freq=10, filter_length='10s', ch_name=None, tstart=0, reject_by_annotation=False, thresh=None, verbose=None): """Locate EOG artifacts. + Parameters ---------- raw : instance of Raw @@ -39,10 +40,12 @@ def find_eog_events(raw, event_id=998, l_freq=1, h_freq=10, thresh : float Threshold to trigger EOG event. %(verbose)s + Returns ------- eog_events : array Events. + See Also -------- create_eog_epochs @@ -162,6 +165,7 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, baseline=None, preload=True, reject_by_annotation=True, thresh=None, verbose=None): """Conveniently generate epochs around EOG artifact events. + Parameters ---------- raw : instance of Raw @@ -184,11 +188,13 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, Rejection parameters based on peak-to-peak amplitude. Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'. If reject is None then no rejection is done. Example:: + reject = dict(grad=4000e-13, # T / m (gradiometers) mag=4e-12, # T (magnetometers) eeg=40e-6, # V (EEG channels) eog=250e-6 # V (EOG channels) ) + flat : dict | None Rejection parameters based on flatness of signal. Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values @@ -209,18 +215,22 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, whose description begins with ``'bad'`` are not used for finding artifacts and epochs overlapping with them are rejected. If False, no rejection based on annotations is performed. + .. versionadded:: 0.14.0 thresh : float Threshold to trigger EOG event. %(verbose)s + Returns ------- eog_epochs : instance of Epochs Data epoched around EOG events. + See Also -------- find_eog_events compute_proj_eog + Notes ----- Filtering is only applied to the EOG channel while finding events. From 9452e28ac91cf78c9c2e469dd9fce055e800c72c Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Sun, 3 May 2020 16:30:25 -0700 Subject: [PATCH 010/167] fix --- mne/preprocessing/eog.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mne/preprocessing/eog.py b/mne/preprocessing/eog.py index 27d0ac6252c..9c2bad50d4e 100644 --- a/mne/preprocessing/eog.py +++ b/mne/preprocessing/eog.py @@ -247,4 +247,5 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, tmax=tmax, proj=False, reject=reject, flat=flat, picks=picks, baseline=baseline, preload=preload, reject_by_annotation=reject_by_annotation) - return eog_epochs \ No newline at end of file + return eog_epochs + \ No newline at end of file From 9bbdb57b8e85d8cae1e90bd0d02408d2a1cf98e6 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Sun, 3 May 2020 16:31:14 -0700 Subject: [PATCH 011/167] fix --- mne/preprocessing/eog.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/mne/preprocessing/eog.py b/mne/preprocessing/eog.py index 9c2bad50d4e..27d0ac6252c 100644 --- a/mne/preprocessing/eog.py +++ b/mne/preprocessing/eog.py @@ -247,5 +247,4 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, tmax=tmax, proj=False, reject=reject, flat=flat, picks=picks, baseline=baseline, preload=preload, reject_by_annotation=reject_by_annotation) - return eog_epochs - \ No newline at end of file + return eog_epochs \ No newline at end of file From 19d4894e79c8b36f31ba1aea6500e241dccf00c6 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Sun, 3 May 2020 16:31:58 -0700 Subject: [PATCH 012/167] fix --- mne/preprocessing/eog.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mne/preprocessing/eog.py b/mne/preprocessing/eog.py index 27d0ac6252c..4df6c8a43fe 100644 --- a/mne/preprocessing/eog.py +++ b/mne/preprocessing/eog.py @@ -247,4 +247,4 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, tmax=tmax, proj=False, reject=reject, flat=flat, picks=picks, baseline=baseline, preload=preload, reject_by_annotation=reject_by_annotation) - return eog_epochs \ No newline at end of file + return eog_epochs \ No newline at end of file From 0dd396be69a659cca52ef6da98468de746503d5b Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Sun, 3 May 2020 16:32:34 -0700 Subject: [PATCH 013/167] fix --- mne/preprocessing/eog.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mne/preprocessing/eog.py b/mne/preprocessing/eog.py index 4df6c8a43fe..27d0ac6252c 100644 --- a/mne/preprocessing/eog.py +++ b/mne/preprocessing/eog.py @@ -247,4 +247,4 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, tmax=tmax, proj=False, reject=reject, flat=flat, picks=picks, baseline=baseline, preload=preload, reject_by_annotation=reject_by_annotation) - return eog_epochs \ No newline at end of file + return eog_epochs \ No newline at end of file From fd08dec8a7d4b93bf26f9bee5e4ab32948062b9c Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Sun, 3 May 2020 16:35:39 -0700 Subject: [PATCH 014/167] rebase fix? --- mne/preprocessing/eog.py | 146 +++++++++++++++------------------------ 1 file changed, 54 insertions(+), 92 deletions(-) diff --git a/mne/preprocessing/eog.py b/mne/preprocessing/eog.py index 27d0ac6252c..4e0b8c1eba8 100644 --- a/mne/preprocessing/eog.py +++ b/mne/preprocessing/eog.py @@ -1,4 +1,4 @@ -# Authors: Alexandre Gramfort +# Authors: Alexandre Gramfort # Denis Engemann # Eric Larson # @@ -6,18 +6,19 @@ import numpy as np -from ._peak_finder import peak_finder +from .peak_finder import peak_finder from .. import pick_types, pick_channels -from ..utils import logger, verbose, _pl -from ..filter import filter_data +from ..utils import logger, verbose +from ..filter import band_pass_filter from ..epochs import Epochs +from ..externals.six import string_types @verbose def find_eog_events(raw, event_id=998, l_freq=1, h_freq=10, filter_length='10s', ch_name=None, tstart=0, - reject_by_annotation=False, thresh=None, verbose=None): - """Locate EOG artifacts. + verbose=None): + """Locate EOG artifacts Parameters ---------- @@ -26,80 +27,62 @@ def find_eog_events(raw, event_id=998, l_freq=1, h_freq=10, event_id : int The index to assign to found events. l_freq : float - Low cut-off frequency to apply to the EOG channel in Hz. + Low cut-off frequency in Hz. h_freq : float - High cut-off frequency to apply to the EOG channel in Hz. + High cut-off frequency in Hz. filter_length : str | int | None Number of taps to use for filtering. - ch_name : str | None - If not None, use specified channel(s) for EOG. + ch_name: str | None + If not None, use specified channel(s) for EOG tstart : float Start detection after tstart seconds. - reject_by_annotation : bool - Whether to omit data that is annotated as bad. - thresh : float - Threshold to trigger EOG event. - %(verbose)s + verbose : bool, str, int, or None + If not None, override default verbose level (see mne.verbose). Returns ------- eog_events : array Events. - - See Also - -------- - create_eog_epochs - compute_proj_eog """ + # Getting EOG Channel eog_inds = _get_eog_channel_index(ch_name, raw) logger.info('EOG channel index for this subject is: %s' % eog_inds) - # Reject bad segments. - reject_by_annotation = 'omit' if reject_by_annotation else None - eog, times = raw.get_data(picks=eog_inds, - reject_by_annotation=reject_by_annotation, - return_times=True) - times = times * raw.info['sfreq'] + raw.first_samp + eog, _ = raw[eog_inds, :] eog_events = _find_eog_events(eog, event_id=event_id, l_freq=l_freq, h_freq=h_freq, sampling_rate=raw.info['sfreq'], first_samp=raw.first_samp, filter_length=filter_length, - tstart=tstart, thresh=thresh, - verbose=verbose) - # Map times to corresponding samples. - eog_events[:, 0] = np.round(times[eog_events[:, 0] - - raw.first_samp]).astype(int) + tstart=tstart) + return eog_events -@verbose def _find_eog_events(eog, event_id, l_freq, h_freq, sampling_rate, first_samp, - filter_length='10s', tstart=0., thresh=None, - verbose=None): - """Find EOG events.""" + filter_length='10s', tstart=0.): + """Helper function""" + logger.info('Filtering the data to remove DC offset to help ' 'distinguish blinks from saccades') # filtering to remove dc offset so that we know which is blink and saccades - # hardcode verbose=False to suppress filter param messages (since this - # filter is not under user control) fmax = np.minimum(45, sampling_rate / 2.0 - 0.75) # protect Nyquist - filteog = np.array([filter_data( - x, sampling_rate, 2, fmax, None, filter_length, 0.5, 0.5, - phase='zero-double', fir_window='hann', fir_design='firwin2', - verbose=False) for x in eog]) + filteog = np.array([band_pass_filter( + x, sampling_rate, 2, fmax, filter_length=filter_length, + l_trans_bandwidth=0.5, h_trans_bandwidth=0.5, phase='zero-double', + fir_window='hann') for x in eog]) temp = np.sqrt(np.sum(filteog ** 2, axis=1)) indexmax = np.argmax(temp) # easier to detect peaks with filtering. - filteog = filter_data( - eog[indexmax], sampling_rate, l_freq, h_freq, None, - filter_length, 0.5, 0.5, phase='zero-double', fir_window='hann', - fir_design='firwin2') + filteog = band_pass_filter( + eog[indexmax], sampling_rate, l_freq, h_freq, + filter_length=filter_length, l_trans_bandwidth=0.5, + h_trans_bandwidth=0.5, phase='zero-double', fir_window='hann') # detecting eog blinks and generating event file @@ -108,11 +91,9 @@ def _find_eog_events(eog, event_id, l_freq, h_freq, sampling_rate, first_samp, temp = filteog - np.mean(filteog) n_samples_start = int(sampling_rate * tstart) if np.abs(np.max(temp)) > np.abs(np.min(temp)): - eog_events, _ = peak_finder(filteog[n_samples_start:], - thresh, extrema=1) + eog_events, _ = peak_finder(filteog[n_samples_start:], extrema=1) else: - eog_events, _ = peak_finder(filteog[n_samples_start:], - thresh, extrema=-1) + eog_events, _ = peak_finder(filteog[n_samples_start:], extrema=-1) eog_events += n_samples_start n_events = len(eog_events) @@ -125,8 +106,7 @@ def _find_eog_events(eog, event_id, l_freq, h_freq, sampling_rate, first_samp, def _get_eog_channel_index(ch_name, inst): - """Get EOG channel index.""" - if isinstance(ch_name, str): + if isinstance(ch_name, string_types): # Check if multiple EOG Channels if ',' in ch_name: ch_name = ch_name.split(',') @@ -139,7 +119,8 @@ def _get_eog_channel_index(ch_name, inst): raise ValueError('%s not in channel list' % ch_name) else: logger.info('Using channel %s as EOG channel%s' % ( - " and ".join(ch_name), _pl(eog_inds))) + " and ".join(ch_name), + '' if len(eog_inds) < 2 else 's')) elif ch_name is None: eog_inds = pick_types(inst.info, meg=False, eeg=False, stim=False, @@ -160,30 +141,32 @@ def _get_eog_channel_index(ch_name, inst): @verbose -def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, - tmax=0.5, l_freq=1, h_freq=10, reject=None, flat=None, - baseline=None, preload=True, reject_by_annotation=True, - thresh=None, verbose=None): - """Conveniently generate epochs around EOG artifact events. +def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, + tmin=-0.5, tmax=0.5, l_freq=1, h_freq=10, + reject=None, flat=None, baseline=None, + preload=True, verbose=None): + """Conveniently generate epochs around EOG artifact events Parameters ---------- raw : instance of Raw - The raw data. + The raw data ch_name : str The name of the channel to use for EOG peak detection. The argument is mandatory if the dataset contains no EOG channels. event_id : int - The index to assign to found events. - %(picks_all)s + The index to assign to found events + picks : array-like of int | None (default) + Indices of channels to include (if None, all channels + are used). tmin : float Start time before event. tmax : float End time after event. l_freq : float - Low pass frequency to apply to the EOG channel while finding events. + Low pass frequency. h_freq : float - High pass frequency to apply to the EOG channel while finding events. + High pass frequency. reject : dict | None Rejection parameters based on peak-to-peak amplitude. Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'. @@ -206,45 +189,24 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, the interval is between "a (s)" and "b (s)". If a is None the beginning of the data is used and if b is None then b is set to the end of the interval. - If baseline is equal to (None, None) all the time + If baseline is equal ot (None, None) all the time interval is used. If None, no correction is applied. preload : bool Preload epochs or not. - reject_by_annotation : bool - Whether to reject based on annotations. If True (default), segments - whose description begins with ``'bad'`` are not used for finding - artifacts and epochs overlapping with them are rejected. If False, no - rejection based on annotations is performed. - - .. versionadded:: 0.14.0 - thresh : float - Threshold to trigger EOG event. - %(verbose)s + verbose : bool, str, int, or None + If not None, override default verbose level (see mne.verbose). Returns ------- eog_epochs : instance of Epochs Data epoched around EOG events. - - See Also - -------- - find_eog_events - compute_proj_eog - - Notes - ----- - Filtering is only applied to the EOG channel while finding events. - The resulting ``eog_epochs`` will have no filtering applied (i.e., have - the same filter properties as the input ``raw`` instance). """ events = find_eog_events(raw, ch_name=ch_name, event_id=event_id, - l_freq=l_freq, h_freq=h_freq, - reject_by_annotation=reject_by_annotation, - thresh=thresh) + l_freq=l_freq, h_freq=h_freq) # create epochs around EOG events - eog_epochs = Epochs(raw, events=events, event_id=event_id, tmin=tmin, - tmax=tmax, proj=False, reject=reject, flat=flat, - picks=picks, baseline=baseline, preload=preload, - reject_by_annotation=reject_by_annotation) - return eog_epochs \ No newline at end of file + eog_epochs = Epochs(raw, events=events, event_id=event_id, + tmin=tmin, tmax=tmax, proj=False, reject=reject, + flat=flat, picks=picks, baseline=baseline, + preload=preload, add_eeg_ref=False) + return eog_epochs From 6ca09ee6f8b65a78e96eaaf3d2487f080d351b3a Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Sun, 3 May 2020 16:37:48 -0700 Subject: [PATCH 015/167] fix? --- mne/preprocessing/eog.py | 146 ++++++++++++++++++++++++--------------- 1 file changed, 92 insertions(+), 54 deletions(-) diff --git a/mne/preprocessing/eog.py b/mne/preprocessing/eog.py index 4e0b8c1eba8..27d0ac6252c 100644 --- a/mne/preprocessing/eog.py +++ b/mne/preprocessing/eog.py @@ -1,4 +1,4 @@ -# Authors: Alexandre Gramfort +# Authors: Alexandre Gramfort # Denis Engemann # Eric Larson # @@ -6,19 +6,18 @@ import numpy as np -from .peak_finder import peak_finder +from ._peak_finder import peak_finder from .. import pick_types, pick_channels -from ..utils import logger, verbose -from ..filter import band_pass_filter +from ..utils import logger, verbose, _pl +from ..filter import filter_data from ..epochs import Epochs -from ..externals.six import string_types @verbose def find_eog_events(raw, event_id=998, l_freq=1, h_freq=10, filter_length='10s', ch_name=None, tstart=0, - verbose=None): - """Locate EOG artifacts + reject_by_annotation=False, thresh=None, verbose=None): + """Locate EOG artifacts. Parameters ---------- @@ -27,62 +26,80 @@ def find_eog_events(raw, event_id=998, l_freq=1, h_freq=10, event_id : int The index to assign to found events. l_freq : float - Low cut-off frequency in Hz. + Low cut-off frequency to apply to the EOG channel in Hz. h_freq : float - High cut-off frequency in Hz. + High cut-off frequency to apply to the EOG channel in Hz. filter_length : str | int | None Number of taps to use for filtering. - ch_name: str | None - If not None, use specified channel(s) for EOG + ch_name : str | None + If not None, use specified channel(s) for EOG. tstart : float Start detection after tstart seconds. - verbose : bool, str, int, or None - If not None, override default verbose level (see mne.verbose). + reject_by_annotation : bool + Whether to omit data that is annotated as bad. + thresh : float + Threshold to trigger EOG event. + %(verbose)s Returns ------- eog_events : array Events. - """ + See Also + -------- + create_eog_epochs + compute_proj_eog + """ # Getting EOG Channel eog_inds = _get_eog_channel_index(ch_name, raw) logger.info('EOG channel index for this subject is: %s' % eog_inds) - eog, _ = raw[eog_inds, :] + # Reject bad segments. + reject_by_annotation = 'omit' if reject_by_annotation else None + eog, times = raw.get_data(picks=eog_inds, + reject_by_annotation=reject_by_annotation, + return_times=True) + times = times * raw.info['sfreq'] + raw.first_samp eog_events = _find_eog_events(eog, event_id=event_id, l_freq=l_freq, h_freq=h_freq, sampling_rate=raw.info['sfreq'], first_samp=raw.first_samp, filter_length=filter_length, - tstart=tstart) - + tstart=tstart, thresh=thresh, + verbose=verbose) + # Map times to corresponding samples. + eog_events[:, 0] = np.round(times[eog_events[:, 0] - + raw.first_samp]).astype(int) return eog_events +@verbose def _find_eog_events(eog, event_id, l_freq, h_freq, sampling_rate, first_samp, - filter_length='10s', tstart=0.): - """Helper function""" - + filter_length='10s', tstart=0., thresh=None, + verbose=None): + """Find EOG events.""" logger.info('Filtering the data to remove DC offset to help ' 'distinguish blinks from saccades') # filtering to remove dc offset so that we know which is blink and saccades + # hardcode verbose=False to suppress filter param messages (since this + # filter is not under user control) fmax = np.minimum(45, sampling_rate / 2.0 - 0.75) # protect Nyquist - filteog = np.array([band_pass_filter( - x, sampling_rate, 2, fmax, filter_length=filter_length, - l_trans_bandwidth=0.5, h_trans_bandwidth=0.5, phase='zero-double', - fir_window='hann') for x in eog]) + filteog = np.array([filter_data( + x, sampling_rate, 2, fmax, None, filter_length, 0.5, 0.5, + phase='zero-double', fir_window='hann', fir_design='firwin2', + verbose=False) for x in eog]) temp = np.sqrt(np.sum(filteog ** 2, axis=1)) indexmax = np.argmax(temp) # easier to detect peaks with filtering. - filteog = band_pass_filter( - eog[indexmax], sampling_rate, l_freq, h_freq, - filter_length=filter_length, l_trans_bandwidth=0.5, - h_trans_bandwidth=0.5, phase='zero-double', fir_window='hann') + filteog = filter_data( + eog[indexmax], sampling_rate, l_freq, h_freq, None, + filter_length, 0.5, 0.5, phase='zero-double', fir_window='hann', + fir_design='firwin2') # detecting eog blinks and generating event file @@ -91,9 +108,11 @@ def _find_eog_events(eog, event_id, l_freq, h_freq, sampling_rate, first_samp, temp = filteog - np.mean(filteog) n_samples_start = int(sampling_rate * tstart) if np.abs(np.max(temp)) > np.abs(np.min(temp)): - eog_events, _ = peak_finder(filteog[n_samples_start:], extrema=1) + eog_events, _ = peak_finder(filteog[n_samples_start:], + thresh, extrema=1) else: - eog_events, _ = peak_finder(filteog[n_samples_start:], extrema=-1) + eog_events, _ = peak_finder(filteog[n_samples_start:], + thresh, extrema=-1) eog_events += n_samples_start n_events = len(eog_events) @@ -106,7 +125,8 @@ def _find_eog_events(eog, event_id, l_freq, h_freq, sampling_rate, first_samp, def _get_eog_channel_index(ch_name, inst): - if isinstance(ch_name, string_types): + """Get EOG channel index.""" + if isinstance(ch_name, str): # Check if multiple EOG Channels if ',' in ch_name: ch_name = ch_name.split(',') @@ -119,8 +139,7 @@ def _get_eog_channel_index(ch_name, inst): raise ValueError('%s not in channel list' % ch_name) else: logger.info('Using channel %s as EOG channel%s' % ( - " and ".join(ch_name), - '' if len(eog_inds) < 2 else 's')) + " and ".join(ch_name), _pl(eog_inds))) elif ch_name is None: eog_inds = pick_types(inst.info, meg=False, eeg=False, stim=False, @@ -141,32 +160,30 @@ def _get_eog_channel_index(ch_name, inst): @verbose -def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, - tmin=-0.5, tmax=0.5, l_freq=1, h_freq=10, - reject=None, flat=None, baseline=None, - preload=True, verbose=None): - """Conveniently generate epochs around EOG artifact events +def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, + tmax=0.5, l_freq=1, h_freq=10, reject=None, flat=None, + baseline=None, preload=True, reject_by_annotation=True, + thresh=None, verbose=None): + """Conveniently generate epochs around EOG artifact events. Parameters ---------- raw : instance of Raw - The raw data + The raw data. ch_name : str The name of the channel to use for EOG peak detection. The argument is mandatory if the dataset contains no EOG channels. event_id : int - The index to assign to found events - picks : array-like of int | None (default) - Indices of channels to include (if None, all channels - are used). + The index to assign to found events. + %(picks_all)s tmin : float Start time before event. tmax : float End time after event. l_freq : float - Low pass frequency. + Low pass frequency to apply to the EOG channel while finding events. h_freq : float - High pass frequency. + High pass frequency to apply to the EOG channel while finding events. reject : dict | None Rejection parameters based on peak-to-peak amplitude. Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'. @@ -189,24 +206,45 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, the interval is between "a (s)" and "b (s)". If a is None the beginning of the data is used and if b is None then b is set to the end of the interval. - If baseline is equal ot (None, None) all the time + If baseline is equal to (None, None) all the time interval is used. If None, no correction is applied. preload : bool Preload epochs or not. - verbose : bool, str, int, or None - If not None, override default verbose level (see mne.verbose). + reject_by_annotation : bool + Whether to reject based on annotations. If True (default), segments + whose description begins with ``'bad'`` are not used for finding + artifacts and epochs overlapping with them are rejected. If False, no + rejection based on annotations is performed. + + .. versionadded:: 0.14.0 + thresh : float + Threshold to trigger EOG event. + %(verbose)s Returns ------- eog_epochs : instance of Epochs Data epoched around EOG events. + + See Also + -------- + find_eog_events + compute_proj_eog + + Notes + ----- + Filtering is only applied to the EOG channel while finding events. + The resulting ``eog_epochs`` will have no filtering applied (i.e., have + the same filter properties as the input ``raw`` instance). """ events = find_eog_events(raw, ch_name=ch_name, event_id=event_id, - l_freq=l_freq, h_freq=h_freq) + l_freq=l_freq, h_freq=h_freq, + reject_by_annotation=reject_by_annotation, + thresh=thresh) # create epochs around EOG events - eog_epochs = Epochs(raw, events=events, event_id=event_id, - tmin=tmin, tmax=tmax, proj=False, reject=reject, - flat=flat, picks=picks, baseline=baseline, - preload=preload, add_eeg_ref=False) - return eog_epochs + eog_epochs = Epochs(raw, events=events, event_id=event_id, tmin=tmin, + tmax=tmax, proj=False, reject=reject, flat=flat, + picks=picks, baseline=baseline, preload=preload, + reject_by_annotation=reject_by_annotation) + return eog_epochs \ No newline at end of file From 4aa880b7905bd94740bcd3ae86fa722574cd6119 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Sun, 3 May 2020 16:41:36 -0700 Subject: [PATCH 016/167] final fix (sublime settings --- mne/preprocessing/eog.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mne/preprocessing/eog.py b/mne/preprocessing/eog.py index 27d0ac6252c..9481eef862d 100644 --- a/mne/preprocessing/eog.py +++ b/mne/preprocessing/eog.py @@ -247,4 +247,4 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, tmax=tmax, proj=False, reject=reject, flat=flat, picks=picks, baseline=baseline, preload=preload, reject_by_annotation=reject_by_annotation) - return eog_epochs \ No newline at end of file + return eog_epochs From 4f2ee27124d38ecfa65d9b6f4c340611a8f388b7 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Sun, 3 May 2020 20:50:56 -0700 Subject: [PATCH 017/167] added dataset import information --- mne/datasets/boxy_example/__init__.py | 3 + mne/datasets/boxy_example/boxy_example.py | 30 ++ mne/datasets/utils.py | 10 +- .../preprocessing/plot_80_boxy_processing.py | 331 ++++++++++++++++++ 4 files changed, 372 insertions(+), 2 deletions(-) create mode 100644 mne/datasets/boxy_example/__init__.py create mode 100644 mne/datasets/boxy_example/boxy_example.py create mode 100644 tutorials/preprocessing/plot_80_boxy_processing.py diff --git a/mne/datasets/boxy_example/__init__.py b/mne/datasets/boxy_example/__init__.py new file mode 100644 index 00000000000..a90c5723ce8 --- /dev/null +++ b/mne/datasets/boxy_example/__init__.py @@ -0,0 +1,3 @@ +"""fNIRS motor dataset.""" + +from .boxy_example import data_path, has_boxy_example_data, get_version diff --git a/mne/datasets/boxy_example/boxy_example.py b/mne/datasets/boxy_example/boxy_example.py new file mode 100644 index 00000000000..3aa114aa3eb --- /dev/null +++ b/mne/datasets/boxy_example/boxy_example.py @@ -0,0 +1,30 @@ +# Authors: Eric Larson +# License: BSD Style. + +from functools import partial + +from ...utils import verbose +from ..utils import (has_dataset, _data_path, _data_path_doc, + _get_version, _version_doc) + + +has_boxy_example_data = partial(has_dataset, name='boxy_example') + + +@verbose +def data_path(path=None, force_update=False, update_path=True, download=True, + verbose=None): # noqa: D103 + return _data_path(path=path, force_update=force_update, + update_path=update_path, name='boxy_example', + download=download) + + +data_path.__doc__ = _data_path_doc.format(name='boxy_example', + conf='MNE_DATASETS_BOXY_EXAMPLE_PATH') + + +def get_version(): # noqa: D103 + return _get_version('boxy_example') + + +get_version.__doc__ = _version_doc.format(name='boxy_example') diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py index 542ceb17231..841fac05a17 100644 --- a/mne/datasets/utils.py +++ b/mne/datasets/utils.py @@ -226,6 +226,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, 'testing': 'MNE_DATASETS_TESTING_PATH', 'multimodal': 'MNE_DATASETS_MULTIMODAL_PATH', 'fnirs_motor': 'MNE_DATASETS_FNIRS_MOTOR_PATH', + 'boxy_example': 'MNE_DATASETS_BOXY_EXAMPLE_PATH', 'opm': 'MNE_DATASETS_OPM_PATH', 'visual_92_categories': 'MNE_DATASETS_VISUAL_92_CATEGORIES_PATH', 'kiloword': 'MNE_DATASETS_KILOWORD_PATH', @@ -263,6 +264,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, 'tar.gz/%s' % releases['testing'], multimodal='https://ndownloader.figshare.com/files/5999598', fnirs_motor='https://osf.io/dj3eh/download?version=1', + boxy_example='https://osf.io/hksme/download?version=1', opm='https://osf.io/p6ae7/download?version=2', visual_92_categories=[ 'https://osf.io/8ejrs/download?version=1', @@ -281,6 +283,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, mtrf='mTRF_1.5.zip', multimodal='MNE-multimodal-data.tar.gz', fnirs_motor='MNE-fNIRS-motor-data.tgz', + boxy_example='MNE-BOXY-example-data.tgz', opm='MNE-OPM-data.tar.gz', sample='MNE-sample-data-processed.tar.gz', somato='MNE-somato-data.tar.gz', @@ -325,6 +328,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, testing='1ef691944239411b869b3ed2f40a69fe', multimodal='26ec847ae9ab80f58f204d09e2c08367', fnirs_motor='c4935d19ddab35422a69f3326a01fef8', + boxy_example='b3793334548b7ba04c1b767c66117414', opm='370ad1dcfd5c47e029e692c85358a374', visual_92_categories=['74f50bbeb65740903eadc229c9fa759f', '203410a98afc9df9ae8ba9f933370e20'], @@ -523,7 +527,7 @@ def has_dataset(name): Returns ------- - has : bool + : bool True if the dataset is present. """ name = 'spm' if name == 'spm_face' else name @@ -542,6 +546,7 @@ def has_dataset(name): 'spm': 'MNE-spm-face', 'multimodal': 'MNE-multimodal-data', 'fnirs_motor': 'MNE-fNIRS-motor-data', + 'boxy_example': 'MNE-BOXY-example-data', 'opm': 'MNE-OPM-data', 'testing': 'MNE-testing-data', 'visual_92_categories': 'MNE-visual_92_categories-data', @@ -569,7 +574,7 @@ def _download_all_example_data(verbose=True): from . import (sample, testing, misc, spm_face, somato, brainstorm, eegbci, multimodal, opm, hf_sef, mtrf, fieldtrip_cmc, kiloword, phantom_4dbti, sleep_physionet, limo, - fnirs_motor) + fnirs_motor, boxy_example) sample_path = sample.data_path() testing.data_path() misc.data_path() @@ -578,6 +583,7 @@ def _download_all_example_data(verbose=True): hf_sef.data_path() multimodal.data_path() fnirs_motor.data_path() + boxy_example.data_path() opm.data_path() mtrf.data_path() fieldtrip_cmc.data_path() diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py new file mode 100644 index 00000000000..6579f4ddc09 --- /dev/null +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -0,0 +1,331 @@ +""" +.. _tut-fnirs-processing: + +Preprocessing functional near-infrared spectroscopy (fNIRS) data +================================================================ + +This tutorial covers how to convert functional near-infrared spectroscopy +(fNIRS) data from raw measurements to relative oxyhaemoglobin (HbO) and +deoxyhaemoglobin (HbR) concentration. + +.. contents:: Page contents + :local: + :depth: 2 + +Here we will work with the :ref:`fNIRS motor data `. +""" +# sphinx_gallery_thumbnail_number = 1 + +import os +import numpy as np +import matplotlib.pyplot as plt +from itertools import compress + +import mne + + +boxy_data_folder = mne.datasets.boxy_example.data_path() +boxy_raw_dir = os.path.join(fnirs_data_folder, 'Participant-1') +raw_intensity = mne.io.read_raw_boxy(boxy_raw_dir, verbose=True).load_data() + + +# ############################################################################### +# # View location of sensors over brain surface +# # ------------------------------------------- +# # +# # Here we validate that the location of sources-detector pairs and channels +# # are in the expected locations. Source-detector pairs are shown as lines +# # between the optodes, channels (the mid point of source-detector pairs) are +# # shown as dots. + +# subjects_dir = mne.datasets.sample.data_path() + '/subjects' + +# fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') +# fig = mne.viz.plot_alignment(raw_intensity.info, show_axes=True, +# subject='fsaverage', +# trans='fsaverage', surfaces=['brain'], +# fnirs=['channels', 'pairs'], +# subjects_dir=subjects_dir, fig=fig) +# mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) + + +# ############################################################################### +# # Selecting channels appropriate for detecting neural responses +# # ------------------------------------------------------------- +# # +# # First we remove channels that are too close together (short channels) to +# # detect a neural response (less than 1 cm distance between optodes). +# # These short channels can be seen in the figure above. +# # To achieve this we pick all the channels that are not considered to be short. + +# picks = mne.pick_types(raw_intensity.info, meg=False, fnirs=True) +# dists = mne.preprocessing.nirs.source_detector_distances( +# raw_intensity.info, picks=picks) +# raw_intensity.pick(picks[dists > 0.01]) +# raw_intensity.plot(n_channels=len(raw_intensity.ch_names), +# duration=500, show_scrollbars=False) + + +# ############################################################################### +# # Converting from raw intensity to optical density +# # ------------------------------------------------ +# # +# # The raw intensity values are then converted to optical density. + +# raw_od = mne.preprocessing.nirs.optical_density(raw_intensity) +# raw_od.plot(n_channels=len(raw_od.ch_names), +# duration=500, show_scrollbars=False) + + +# ############################################################################### +# # Evaluating the quality of the data +# # ---------------------------------- +# # +# # At this stage we can quantify the quality of the coupling +# # between the scalp and the optodes using the scalp coupling index. This +# # method looks for the presence of a prominent synchronous signal in the +# # frequency range of cardiac signals across both photodetected signals. +# # +# # In this example the data is clean and the coupling is good for all +# # channels, so we will not mark any channels as bad based on the scalp +# # coupling index. + +# sci = mne.preprocessing.nirs.scalp_coupling_index(raw_od) +# fig, ax = plt.subplots() +# ax.hist(sci) +# ax.set(xlabel='Scalp Coupling Index', ylabel='Count', xlim=[0, 1]) + + +# ############################################################################### +# # In this example we will mark all channels with a SCI less than 0.5 as bad +# # (this dataset is quite clean, so no channels are marked as bad). + +# raw_od.info['bads'] = list(compress(raw_od.ch_names, sci < 0.5)) + + +# ############################################################################### +# # At this stage it is appropriate to inspect your data +# # (for instructions on how to use the interactive data visualisation tool +# # see :ref:`tut-visualize-raw`) +# # to ensure that channels with poor scalp coupling have been removed. +# # If your data contains lots of artifacts you may decide to apply +# # artifact reduction techniques as described in :ref:`ex-fnirs-artifacts`. + + +# ############################################################################### +# # Converting from optical density to haemoglobin +# # ---------------------------------------------- +# # +# # Next we convert the optical density data to haemoglobin concentration using +# # the modified Beer-Lambert law. + +# raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od) +# raw_haemo.plot(n_channels=len(raw_haemo.ch_names), +# duration=500, show_scrollbars=False) + + +# ############################################################################### +# # Removing heart rate from signal +# # ------------------------------- +# # +# # The haemodynamic response has frequency content predominantly below 0.5 Hz. +# # An increase in activity around 1 Hz can be seen in the data that is due to +# # the person's heart beat and is unwanted. So we use a low pass filter to +# # remove this. A high pass filter is also included to remove slow drifts +# # in the data. + +# fig = raw_haemo.plot_psd(average=True) +# fig.suptitle('Before filtering', weight='bold', size='x-large') +# fig.subplots_adjust(top=0.88) +# raw_haemo = raw_haemo.filter(0.05, 0.7, h_trans_bandwidth=0.2, +# l_trans_bandwidth=0.02) +# fig = raw_haemo.plot_psd(average=True) +# fig.suptitle('After filtering', weight='bold', size='x-large') +# fig.subplots_adjust(top=0.88) + +# ############################################################################### +# # Extract epochs +# # -------------- +# # +# # Now that the signal has been converted to relative haemoglobin concentration, +# # and the unwanted heart rate component has been removed, we can extract epochs +# # related to each of the experimental conditions. +# # +# # First we extract the events of interest and visualise them to ensure they are +# # correct. + +# events, _ = mne.events_from_annotations(raw_haemo, event_id={'1.0': 1, +# '2.0': 2, +# '3.0': 3}) +# event_dict = {'Control': 1, 'Tapping/Left': 2, 'Tapping/Right': 3} +# fig = mne.viz.plot_events(events, event_id=event_dict, +# sfreq=raw_haemo.info['sfreq']) +# fig.subplots_adjust(right=0.7) # make room for the legend + + +# ############################################################################### +# # Next we define the range of our epochs, the rejection criteria, +# # baseline correction, and extract the epochs. We visualise the log of which +# # epochs were dropped. + +# reject_criteria = dict(hbo=80e-6) +# tmin, tmax = -5, 15 + +# epochs = mne.Epochs(raw_haemo, events, event_id=event_dict, +# tmin=tmin, tmax=tmax, +# reject=reject_criteria, reject_by_annotation=True, +# proj=True, baseline=(None, 0), preload=True, +# detrend=None, verbose=True) +# epochs.plot_drop_log() + + +# ############################################################################### +# # View consistency of responses across trials +# # ------------------------------------------- +# # +# # Now we can view the haemodynamic response for our tapping condition. +# # We visualise the response for both the oxy- and deoxyhaemoglobin, and +# # observe the expected peak in HbO at around 6 seconds consistently across +# # trials, and the consistent dip in HbR that is slightly delayed relative to +# # the HbO peak. + +# epochs['Tapping'].plot_image(combine='mean', vmin=-30, vmax=30, +# ts_args=dict(ylim=dict(hbo=[-15, 15], +# hbr=[-15, 15]))) + + +# ############################################################################### +# # We can also view the epoched data for the control condition and observe +# # that it does not show the expected morphology. + +# epochs['Control'].plot_image(combine='mean', vmin=-30, vmax=30, +# ts_args=dict(ylim=dict(hbo=[-15, 15], +# hbr=[-15, 15]))) + + +# ############################################################################### +# # View consistency of responses across channels +# # --------------------------------------------- +# # +# # Similarly we can view how consistent the response is across the optode +# # pairs that we selected. All the channels in this data are located over the +# # motor cortex, and all channels show a similar pattern in the data. + +# fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(15, 6)) +# clims = dict(hbo=[-20, 20], hbr=[-20, 20]) +# epochs['Control'].average().plot_image(axes=axes[:, 0], clim=clims) +# epochs['Tapping'].average().plot_image(axes=axes[:, 1], clim=clims) +# for column, condition in enumerate(['Control', 'Tapping']): +# for ax in axes[:, column]: +# ax.set_title('{}: {}'.format(condition, ax.get_title())) + + +# ############################################################################### +# # Plot standard fNIRS response image +# # ---------------------------------- +# # +# # Next we generate the most common visualisation of fNIRS data: plotting +# # both the HbO and HbR on the same figure to illustrate the relation between +# # the two signals. + +# evoked_dict = {'Tapping/HbO': epochs['Tapping'].average(picks='hbo'), +# 'Tapping/HbR': epochs['Tapping'].average(picks='hbr'), +# 'Control/HbO': epochs['Control'].average(picks='hbo'), +# 'Control/HbR': epochs['Control'].average(picks='hbr')} + +# # Rename channels until the encoding of frequency in ch_name is fixed +# for condition in evoked_dict: +# evoked_dict[condition].rename_channels(lambda x: x[:-4]) + +# color_dict = dict(HbO='#AA3377', HbR='b') +# styles_dict = dict(Control=dict(linestyle='dashed')) + +# mne.viz.plot_compare_evokeds(evoked_dict, combine="mean", ci=0.95, +# colors=color_dict, styles=styles_dict) + + +# ############################################################################### +# # View topographic representation of activity +# # ------------------------------------------- +# # +# # Next we view how the topographic activity changes throughout the response. + +# times = np.arange(-3.5, 13.2, 3.0) +# topomap_args = dict(extrapolate='local') +# epochs['Tapping'].average(picks='hbo').plot_joint( +# times=times, topomap_args=topomap_args) + + +# ############################################################################### +# # Compare tapping of left and right hands +# # --------------------------------------- +# # +# # Finally we generate topo maps for the left and right conditions to view +# # the location of activity. First we visualise the HbO activity. + +# times = np.arange(4.0, 11.0, 1.0) +# epochs['Tapping/Left'].average(picks='hbo').plot_topomap( +# times=times, **topomap_args) +# epochs['Tapping/Right'].average(picks='hbo').plot_topomap( +# times=times, **topomap_args) + +# ############################################################################### +# # And we also view the HbR activity for the two conditions. + +# epochs['Tapping/Left'].average(picks='hbr').plot_topomap( +# times=times, **topomap_args) +# epochs['Tapping/Right'].average(picks='hbr').plot_topomap( +# times=times, **topomap_args) + +# ############################################################################### +# # And we can plot the comparison at a single time point for two conditions. + +# fig, axes = plt.subplots(nrows=2, ncols=4, figsize=(9, 5), +# gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1])) +# vmin, vmax, ts = -8, 8, 9.0 + +# evoked_left = epochs['Tapping/Left'].average() +# evoked_right = epochs['Tapping/Right'].average() + +# evoked_left.plot_topomap(ch_type='hbo', times=ts, axes=axes[0, 0], +# vmin=vmin, vmax=vmax, colorbar=False, +# **topomap_args) +# evoked_left.plot_topomap(ch_type='hbr', times=ts, axes=axes[1, 0], +# vmin=vmin, vmax=vmax, colorbar=False, +# **topomap_args) +# evoked_right.plot_topomap(ch_type='hbo', times=ts, axes=axes[0, 1], +# vmin=vmin, vmax=vmax, colorbar=False, +# **topomap_args) +# evoked_right.plot_topomap(ch_type='hbr', times=ts, axes=axes[1, 1], +# vmin=vmin, vmax=vmax, colorbar=False, +# **topomap_args) + +# evoked_diff = mne.combine_evoked([evoked_left, -evoked_right], weights='equal') + +# evoked_diff.plot_topomap(ch_type='hbo', times=ts, axes=axes[0, 2:], +# vmin=vmin, vmax=vmax, colorbar=True, +# **topomap_args) +# evoked_diff.plot_topomap(ch_type='hbr', times=ts, axes=axes[1, 2:], +# vmin=vmin, vmax=vmax, colorbar=True, +# **topomap_args) + +# for column, condition in enumerate( +# ['Tapping Left', 'Tapping Right', 'Left-Right']): +# for row, chroma in enumerate(['HbO', 'HbR']): +# axes[row, column].set_title('{}: {}'.format(chroma, condition)) +# fig.tight_layout() + +# ############################################################################### +# # Lastly, we can also look at the individual waveforms to see what is +# # driving the topographic plot above. + +# fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(6, 4)) +# mne.viz.plot_evoked_topo(epochs['Left'].average(picks='hbo'), color='b', +# axes=axes, legend=False) +# mne.viz.plot_evoked_topo(epochs['Right'].average(picks='hbo'), color='r', +# axes=axes, legend=False) + +# # Tidy the legend +# leg_lines = [line for line in axes.lines if line.get_c() == 'b'][:1] +# leg_lines.append([line for line in axes.lines if line.get_c() == 'r'][0]) +# fig.legend(leg_lines, ['Left', 'Right'], loc='lower right') From 710c6a1949777d19001fb387d4a67f636b7f0168 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Sun, 3 May 2020 21:08:43 -0700 Subject: [PATCH 018/167] updated dataset stuff --- mne/datasets/__init__.py | 1 + mne/datasets/utils.py | 2 +- mne/io/boxy/boxy.py | 4 ++-- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/mne/datasets/__init__.py b/mne/datasets/__init__.py index 31387b7ce72..35e67ea6d22 100644 --- a/mne/datasets/__init__.py +++ b/mne/datasets/__init__.py @@ -15,6 +15,7 @@ from . import somato from . import multimodal from . import fnirs_motor +from . import boxy_example from . import opm from . import spm_face from . import testing diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py index 841fac05a17..9deb76285ce 100644 --- a/mne/datasets/utils.py +++ b/mne/datasets/utils.py @@ -328,7 +328,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, testing='1ef691944239411b869b3ed2f40a69fe', multimodal='26ec847ae9ab80f58f204d09e2c08367', fnirs_motor='c4935d19ddab35422a69f3326a01fef8', - boxy_example='b3793334548b7ba04c1b767c66117414', + boxy_example='6586c112d30402e584ceba25468cafef', opm='370ad1dcfd5c47e029e692c85358a374', visual_92_categories=['74f50bbeb65740903eadc229c9fa759f', '203410a98afc9df9ae8ba9f933370e20'], diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index ffce7135a2e..89e28d548b6 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -361,7 +361,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): ###save our data based on data type### if data_types.index(i_data) == 0: raw_ac[index_loc,:] = raw_data[channel][time_points].to_numpy() - elif data_ty pes.index(i_data) == 1: + elif data_types.index(i_data) == 1: raw_dc[index_loc,:] = raw_data[channel][time_points].to_numpy() elif data_types.index(i_data) == 2: raw_ph[index_loc,:] = raw_data[channel][time_points].to_numpy() @@ -390,4 +390,4 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): elif filetype == 'parsed': markers = digaux data = np.vstack((data, markers)) - return data \ No newline at end of file + return data From 22d71c53f1a03ef58c8f91216924cf80dcf7590d Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Sun, 3 May 2020 21:10:35 -0700 Subject: [PATCH 019/167] correct hash from osf --- mne/datasets/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py index 9deb76285ce..841fac05a17 100644 --- a/mne/datasets/utils.py +++ b/mne/datasets/utils.py @@ -328,7 +328,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, testing='1ef691944239411b869b3ed2f40a69fe', multimodal='26ec847ae9ab80f58f204d09e2c08367', fnirs_motor='c4935d19ddab35422a69f3326a01fef8', - boxy_example='6586c112d30402e584ceba25468cafef', + boxy_example='b3793334548b7ba04c1b767c66117414', opm='370ad1dcfd5c47e029e692c85358a374', visual_92_categories=['74f50bbeb65740903eadc229c9fa759f', '203410a98afc9df9ae8ba9f933370e20'], From d46874ca262170372d3df42b7c808cb34023790a Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Sun, 3 May 2020 21:24:45 -0700 Subject: [PATCH 020/167] working data load --- mne/utils/config.py | 1 + tutorials/preprocessing/plot_80_boxy_processing.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/mne/utils/config.py b/mne/utils/config.py index 1e0a7c7f5e9..1f03123c068 100644 --- a/mne/utils/config.py +++ b/mne/utils/config.py @@ -95,6 +95,7 @@ def set_memmap_min_size(memmap_min_size): 'MNE_DATASETS_SOMATO_PATH', 'MNE_DATASETS_MULTIMODAL_PATH', 'MNE_DATASETS_FNIRS_MOTOR_PATH', + 'MNE_DATASETS_BOXY_EXAMPLE_PATH', 'MNE_DATASETS_OPM_PATH', 'MNE_DATASETS_SPM_FACE_DATASETS_TESTS', 'MNE_DATASETS_SPM_FACE_PATH', diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 6579f4ddc09..23a182fe0bf 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -25,7 +25,7 @@ boxy_data_folder = mne.datasets.boxy_example.data_path() -boxy_raw_dir = os.path.join(fnirs_data_folder, 'Participant-1') +boxy_raw_dir = os.path.join(boxy_data_folder, 'Participant-1') raw_intensity = mne.io.read_raw_boxy(boxy_raw_dir, verbose=True).load_data() From 248064451b2b5a5d0f7cc408fb8d7e8eae6edeb5 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Sun, 3 May 2020 22:04:17 -0700 Subject: [PATCH 021/167] debuging loading file --- mne/io/boxy/boxy.py | 83 ++++++++++++++++++++++++--------------------- 1 file changed, 45 insertions(+), 38 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 89e28d548b6..a8f1fd55852 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -5,6 +5,7 @@ from configparser import ConfigParser, RawConfigParser import glob as glob import re as re +import os.path as op import numpy as np @@ -14,7 +15,8 @@ from ...annotations import Annotations from ...transforms import apply_trans, _get_trans from ...utils import logger, verbose, fill_doc - +from ...channels.montage import make_dig_montage +from ...datasets import fetch_fsaverage @fill_doc def read_raw_boxy(fname, preload=False, verbose=None): @@ -55,6 +57,17 @@ def __init__(self, fname, preload=False, verbose=None): from ...coreg import get_mni_fiducials # avoid circular import prob logger.info('Loading %s' % fname) + # Check if required files exist and store names for later use + files = dict() + keys = ('mtg', 'elp', 'tol', '001') + for key in keys: + files[key] = glob.glob('%s/*%s' % (fname, key)) + if len(files[key]) != 1: + raise RuntimeError('Expect one %s file, got %d' % + (key, len(files[key]),)) + files[key] = files[key][0] + + print(files) # Read header file # Parse required header fields ###this keeps track of the line we're on### @@ -63,7 +76,9 @@ def __init__(self, fname, preload=False, verbose=None): ###load and read data to get some meta information### ###there is alot of information at the beginning of a file### ###but this only grabs some of it### - with open(boxy_file,'r') as data: + + + with open(files['001'],'r') as data: for i_line in data: line_num += 1 if '#DATA ENDS' in i_line: @@ -93,7 +108,7 @@ def __init__(self, fname, preload=False, verbose=None): chan_modulation = [] ###load and read each line of the .mtg file### - with open(mtg_file,'r') as data: + with open(files['mtg'],'r') as data: for i_ignore in range(2): next(data) for i_line in data: @@ -116,39 +131,30 @@ def __init__(self, fname, preload=False, verbose=None): all_labels = [] all_coords = [] fiducial_coords = [] - if coord_file[-3:].lower() == 'elp'.lower(): - get_label = 0 - get_coords = 0 - ###load and read .elp file### - with open(coord_file,'r') as data: - for i_line in data: - ###first let's get our fiducial coordinates### - if '%F' in i_line: - fiducial_coords.append(i_line.split()[1:]) - ###check where sensor info starts### - if '//Sensor name' in i_line: - get_label = 1 - elif get_label == 1: - ###grab the part after '%N' for the label### - label = i_line.split()[1] - all_labels.append(label) - get_label = 0 - get_coords = 1 - elif get_coords == 1: - X, Y, Z = i_line.split() - all_coords.append([float(X),float(Y),float(Z)]) - get_coords = 0 - for i_index in range(3): - fiducial_coords[i_index] = np.asarray([float(x) for x in fiducial_coords[i_index]]) - elif coord_file[-3:] == 'tol': - ###load and read .tol file### - with open(coord_file,'r') as data: - for i_line in data: - label, X, Y, Z = i_line.split() + get_label = 0 + get_coords = 0 + ###load and read .elp file### + with open(files['elp'],'r') as data: + for i_line in data: + ###first let's get our fiducial coordinates### + if '%F' in i_line: + fiducial_coords.append(i_line.split()[1:]) + ###check where sensor info starts### + if '//Sensor name' in i_line: + get_label = 1 + elif get_label == 1: + ###grab the part after '%N' for the label### + label = i_line.split()[1] all_labels.append(label) - ###convert coordinates from mm to m## - all_coords.append([(float(X)*0.001),(float(Y)*0.001),(float(Z)*0.001)]) - + get_label = 0 + get_coords = 1 + elif get_coords == 1: + X, Y, Z = i_line.split() + all_coords.append([float(X),float(Y),float(Z)]) + get_coords = 0 + for i_index in range(3): + fiducial_coords[i_index] = np.asarray([float(x) for x in fiducial_coords[i_index]]) + ###get coordinates for sources### source_coords = [] for i_chan in source_label: @@ -207,7 +213,7 @@ def __init__(self, fname, preload=False, verbose=None): ###make our montage### - montage_orig = mne.channels.make_dig_montage(ch_pos=all_chan_dict,coord_frame='head', + montage_orig = make_dig_montage(ch_pos=all_chan_dict,coord_frame='head', nasion = fiducial_coords[0], lpa = fiducial_coords[1], rpa = fiducial_coords[2]) @@ -221,7 +227,7 @@ def __init__(self, fname, preload=False, verbose=None): ###add an extra channel for our triggers for later### boxy_labels.append('Markers') - info = mne.create_info(boxy_labels,srate,ch_types='fnirs_raw') + info = create_info(boxy_labels,srate,ch_types='fnirs_raw') info.update(dig=montage_orig.dig) # Set up digitization @@ -239,7 +245,7 @@ def __init__(self, fname, preload=False, verbose=None): fiducial_coords_trans = apply_trans(trans,fiducial_coords) ###make our montage### - montage_trans = mne.channels.make_dig_montage(ch_pos=all_chan_dict_trans,coord_frame='head', + montage_trans = make_dig_montage(ch_pos=all_chan_dict_trans,coord_frame='head', nasion = fiducial_coords_trans[0], lpa = fiducial_coords_trans[1], rpa = fiducial_coords_trans[2]) @@ -279,6 +285,7 @@ def __init__(self, fname, preload=False, verbose=None): def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a segment of data from a file. """ + print(self) with open(boxy_file,'r') as data: for i_line in data: line_num += 1 From 5d40535b39ce3dc3fbc4b523e9380ea7f8d0ea5b Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Sun, 3 May 2020 22:43:12 -0700 Subject: [PATCH 022/167] debugged import function, plotting locations kinda works, dists calculate, data flat --- mne/io/boxy/boxy.py | 36 ++++++++++--------- .../preprocessing/plot_80_boxy_processing.py | 30 ++++++++-------- 2 files changed, 35 insertions(+), 31 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index a8f1fd55852..c34fea4117b 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -6,17 +6,18 @@ import glob as glob import re as re import os.path as op - +import pandas as pd import numpy as np +import mne + from ..base import BaseRaw from ..constants import FIFF -from ..meas_info import create_info, _format_dig_points +from ..meas_info import create_info, _format_dig_points, read_fiducials from ...annotations import Annotations from ...transforms import apply_trans, _get_trans from ...utils import logger, verbose, fill_doc from ...channels.montage import make_dig_montage -from ...datasets import fetch_fsaverage @fill_doc def read_raw_boxy(fname, preload=False, verbose=None): @@ -67,7 +68,6 @@ def __init__(self, fname, preload=False, verbose=None): (key, len(files[key]),)) files[key] = files[key][0] - print(files) # Read header file # Parse required header fields ###this keeps track of the line we're on### @@ -83,6 +83,7 @@ def __init__(self, fname, preload=False, verbose=None): line_num += 1 if '#DATA ENDS' in i_line: end_line = line_num - 1 + last_sample = end_line break if 'Detector Channels' in i_line: detect_num = int(i_line.rsplit(' ')[0]) @@ -127,7 +128,7 @@ def __init__(self, fname, preload=False, verbose=None): # Each source - detector pair produces a channel # Channels are defined as the midpoint between source and detector - ###check if we are given a .tol or .elp file### + ###check if we are given .elp file### all_labels = [] all_coords = [] fiducial_coords = [] @@ -234,10 +235,10 @@ def __init__(self, fname, preload=False, verbose=None): # These are all in MNI coordinates, so let's transform them to # the Neuromag head coordinate frame ###get our fiducials and transform matrix from fsaverage### - subjects_dir = op.dirname(fetch_fsaverage()) + subjects_dir = op.dirname(mne.datasets.fetch_fsaverage()) fid_path = op.join(subjects_dir, 'fsaverage', 'bem', 'fsaverage-fiducials.fif') fiducials = read_fiducials(fid_path) - trans = coregister_fiducials(info, fiducials[0], tol=0.02) + trans = mne.coreg.coregister_fiducials(info, fiducials[0], tol=0.02) ###remake montage using the transformed coordinates### all_coords_trans = apply_trans(trans,all_coords) @@ -254,6 +255,7 @@ def __init__(self, fname, preload=False, verbose=None): for i_chan in range(len(all_coords_trans)): montage_trans.dig[i_chan+3]['r'] = all_coords_trans[i_chan] montage_trans.ch_names[i_chan] = all_labels[i_chan] + req_ind = montage_trans.ch_names # Create mne structure ###create info structure### @@ -277,6 +279,10 @@ def __init__(self, fname, preload=False, verbose=None): info['chs'][i_chan]['loc'] = test = np.concatenate((temp_chn, temp_src, temp_det, temp_other),axis=0) info['chs'][-1]['loc'] = np.zeros((12,)) + raw_extras = {'source_num': source_num, + 'detect_num': detect_num, + 'start_line': start_line, + 'files': files} super(RawBOXY, self).__init__( info, preload, filenames=[fname], last_samps=[last_sample], @@ -285,15 +291,11 @@ def __init__(self, fname, preload=False, verbose=None): def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a segment of data from a file. """ - print(self) - with open(boxy_file,'r') as data: - for i_line in data: - line_num += 1 - if '#DATA BEGINS' in i_line: - start_line = line_num - break + source_num = self._raw_extras[fi]['source_num'] + detect_num = self._raw_extras[fi]['detect_num'] + start_line = self._raw_extras[fi]['start_line'] - raw_data = pd.read_csv(boxy_file, skiprows=start_line, sep='\t') + raw_data = pd.read_csv(self._raw_extras[fi]['files']['001'], skiprows=start_line, sep='\t') ###detectors, sources, and data types### detectors = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', @@ -301,7 +303,8 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): data_types = ['AC','DC','Ph'] sources = np.arange(1,source_num+1,1) - ###since we can save boxy files in two different styles### + + ###since we can save boxy files in two different styles### ###this will check to see which style the data is saved### ###seems to also work with older boxy files### if 'exmux' in raw_data.columns: @@ -387,6 +390,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): ###now combine our data types into a single array with the data### data = np.append(raw_ac, np.append(raw_dc, raw_ph, axis=0),axis=0) + # Read triggers from event file ###add our markers to the data array based on filetype### if filetype == 'non-parsed': diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 23a182fe0bf..b84844e60b6 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -28,7 +28,6 @@ boxy_raw_dir = os.path.join(boxy_data_folder, 'Participant-1') raw_intensity = mne.io.read_raw_boxy(boxy_raw_dir, verbose=True).load_data() - # ############################################################################### # # View location of sensors over brain surface # # ------------------------------------------- @@ -38,15 +37,15 @@ # # between the optodes, channels (the mid point of source-detector pairs) are # # shown as dots. -# subjects_dir = mne.datasets.sample.data_path() + '/subjects' +subjects_dir = mne.datasets.sample.data_path() + '/subjects' -# fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') -# fig = mne.viz.plot_alignment(raw_intensity.info, show_axes=True, -# subject='fsaverage', -# trans='fsaverage', surfaces=['brain'], -# fnirs=['channels', 'pairs'], -# subjects_dir=subjects_dir, fig=fig) -# mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) +fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') +fig = mne.viz.plot_alignment(raw_intensity.info, show_axes=True, + subject='fsaverage', + trans='fsaverage', surfaces=['brain'], + fnirs=['channels', 'pairs'], + subjects_dir=subjects_dir, fig=fig) +mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) # ############################################################################### @@ -58,12 +57,13 @@ # # These short channels can be seen in the figure above. # # To achieve this we pick all the channels that are not considered to be short. -# picks = mne.pick_types(raw_intensity.info, meg=False, fnirs=True) -# dists = mne.preprocessing.nirs.source_detector_distances( -# raw_intensity.info, picks=picks) -# raw_intensity.pick(picks[dists > 0.01]) -# raw_intensity.plot(n_channels=len(raw_intensity.ch_names), -# duration=500, show_scrollbars=False) +picks = mne.pick_types(raw_intensity.info, meg=False, fnirs=True) +dists = mne.preprocessing.nirs.source_detector_distances( + raw_intensity.info, picks=picks) +print(dists) +raw_intensity.pick(picks[dists > 0.01]) +raw_intensity.plot(n_channels=10, + duration=500, show_scrollbars=False) # ############################################################################### From 17253b05ea007308aa6808d0896222d143498a59 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 4 May 2020 10:27:32 -0700 Subject: [PATCH 023/167] fixed name for return in utils.py --- mne/datasets/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py index 841fac05a17..d171dea4413 100644 --- a/mne/datasets/utils.py +++ b/mne/datasets/utils.py @@ -527,7 +527,7 @@ def has_dataset(name): Returns ------- - : bool + has : bool True if the dataset is present. """ name = 'spm' if name == 'spm_face' else name From 31139ccb7548dc44abb11b51e2427db35d4fcfeb Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 4 May 2020 10:29:29 -0700 Subject: [PATCH 024/167] extra newline in boxy.py --- mne/io/boxy/boxy.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index c34fea4117b..0fdab1c9f62 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -19,6 +19,7 @@ from ...utils import logger, verbose, fill_doc from ...channels.montage import make_dig_montage + @fill_doc def read_raw_boxy(fname, preload=False, verbose=None): """Reader for a BOXY optical imaging recording. From eb1eb12d14cdadff49195c7f24e29f4af865b1fd Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 4 May 2020 10:33:49 -0700 Subject: [PATCH 025/167] fixing newlines --- mne/io/boxy/boxy.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 0fdab1c9f62..2fad16c73f6 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -39,6 +39,7 @@ def read_raw_boxy(fname, preload=False, verbose=None): """ return RawBOXY(fname, preload, verbose) + @fill_doc class RawBOXY(BaseRaw): """Raw object from a BOXY optical imaging file. From bb4d80497bd0f6474374e25efa765b4bfc78b9e6 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 4 May 2020 11:06:34 -0700 Subject: [PATCH 026/167] removed call to findfiducials --- mne/io/boxy/boxy.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 2fad16c73f6..187b2a4415b 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -237,8 +237,7 @@ def __init__(self, fname, preload=False, verbose=None): # These are all in MNI coordinates, so let's transform them to # the Neuromag head coordinate frame ###get our fiducials and transform matrix from fsaverage### - subjects_dir = op.dirname(mne.datasets.fetch_fsaverage()) - fid_path = op.join(subjects_dir, 'fsaverage', 'bem', 'fsaverage-fiducials.fif') + fid_path = op.join('mne', 'data', 'fsaverage', 'fsaverage-fiducials.fif') fiducials = read_fiducials(fid_path) trans = mne.coreg.coregister_fiducials(info, fiducials[0], tol=0.02) @@ -390,8 +389,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): raw_ph[index_loc,:] = raw_data[channel].to_numpy() ###now combine our data types into a single array with the data### - data = np.append(raw_ac, np.append(raw_dc, raw_ph, axis=0),axis=0) - + data_ = np.append(raw_ac, np.append(raw_dc, raw_ph, axis=0),axis=0) # Read triggers from event file ###add our markers to the data array based on filetype### @@ -399,8 +397,11 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): if type(digaux) is list and digaux != []: markers = digaux[np.arange(0,len(digaux),source_num)] else: - markers = np.zeros(np.size(data,axis=1)) + markers = np.zeros(np.size(data_,axis=1)) elif filetype == 'parsed': markers = digaux - data = np.vstack((data, markers)) + + # place our data into the data object in place + data[:] = np.vstack((data_, markers))[:, start:stop] + return data From d635296c8d12b1857f8a03e3ec643f39a29e0cc5 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 4 May 2020 11:15:34 -0700 Subject: [PATCH 027/167] removed mne calls --- mne/io/boxy/boxy.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 187b2a4415b..edef555a529 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -9,8 +9,6 @@ import pandas as pd import numpy as np -import mne - from ..base import BaseRaw from ..constants import FIFF from ..meas_info import create_info, _format_dig_points, read_fiducials @@ -18,6 +16,7 @@ from ...transforms import apply_trans, _get_trans from ...utils import logger, verbose, fill_doc from ...channels.montage import make_dig_montage +from ...coreg import coregister_fiducials @fill_doc @@ -239,7 +238,7 @@ def __init__(self, fname, preload=False, verbose=None): ###get our fiducials and transform matrix from fsaverage### fid_path = op.join('mne', 'data', 'fsaverage', 'fsaverage-fiducials.fif') fiducials = read_fiducials(fid_path) - trans = mne.coreg.coregister_fiducials(info, fiducials[0], tol=0.02) + trans = coregister_fiducials(info, fiducials[0], tol=0.02) ###remake montage using the transformed coordinates### all_coords_trans = apply_trans(trans,all_coords) @@ -260,7 +259,7 @@ def __init__(self, fname, preload=False, verbose=None): # Create mne structure ###create info structure### - info = mne.create_info(boxy_labels,srate,ch_types='fnirs_raw') + info = create_info(boxy_labels,srate,ch_types='fnirs_raw') ###add data type and channel wavelength to info### info.update(dig=montage_trans.dig, trans=trans) From 36db2101f659e6b6081356e46dacf94da5ae6c53 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 4 May 2020 11:24:28 -0700 Subject: [PATCH 028/167] revert to import mne to debug data --- mne/io/boxy/boxy.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index edef555a529..4c8cc210dc3 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -9,6 +9,8 @@ import pandas as pd import numpy as np +import mne + from ..base import BaseRaw from ..constants import FIFF from ..meas_info import create_info, _format_dig_points, read_fiducials @@ -16,7 +18,7 @@ from ...transforms import apply_trans, _get_trans from ...utils import logger, verbose, fill_doc from ...channels.montage import make_dig_montage -from ...coreg import coregister_fiducials +# from ...coreg import coregister_fiducials @fill_doc @@ -238,7 +240,7 @@ def __init__(self, fname, preload=False, verbose=None): ###get our fiducials and transform matrix from fsaverage### fid_path = op.join('mne', 'data', 'fsaverage', 'fsaverage-fiducials.fif') fiducials = read_fiducials(fid_path) - trans = coregister_fiducials(info, fiducials[0], tol=0.02) + trans = mne.coreg.coregister_fiducials(info, fiducials[0], tol=0.02) ###remake montage using the transformed coordinates### all_coords_trans = apply_trans(trans,all_coords) From de699889891ce6e560b8fdfbe060ff818b47801d Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 4 May 2020 12:44:27 -0700 Subject: [PATCH 029/167] working data load and plot, needed to divide expected data by 16 for each source --- mne/io/boxy/boxy.py | 20 +++++++++++++++--- .../preprocessing/plot_80_boxy_processing.py | 21 ++++++++++--------- 2 files changed, 28 insertions(+), 13 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 4c8cc210dc3..b683d306662 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -86,7 +86,6 @@ def __init__(self, fname, preload=False, verbose=None): line_num += 1 if '#DATA ENDS' in i_line: end_line = line_num - 1 - last_sample = end_line break if 'Detector Channels' in i_line: detect_num = int(i_line.rsplit(' ')[0]) @@ -286,8 +285,20 @@ def __init__(self, fname, preload=False, verbose=None): 'start_line': start_line, 'files': files} + print('Start Line: ', start_line) + print('End Line: ', end_line) + print('Original Difference: ', end_line-start_line) + first_samps = start_line + print('New first_samps: ', first_samps) + diff = end_line-start_line + last_samps = start_line + int(diff/16)-1 + print('New last_samps: ', last_samps) + print('New Difference: ', last_samps-first_samps) + + super(RawBOXY, self).__init__( - info, preload, filenames=[fname], last_samps=[last_sample], + info, preload, filenames=[fname], first_samps=[first_samps], + last_samps=[last_samps], raw_extras=[raw_extras], verbose=verbose) def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): @@ -402,7 +413,10 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): elif filetype == 'parsed': markers = digaux + print('Blank Data shape: ', data.shape) + temp = np.vstack((data_, markers)) + print('Input Data shape: ',temp.shape) # place our data into the data object in place - data[:] = np.vstack((data_, markers))[:, start:stop] + data[:] = np.vstack((data_, markers)) return data diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index b84844e60b6..9ae9de83ba8 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -39,13 +39,13 @@ subjects_dir = mne.datasets.sample.data_path() + '/subjects' -fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') -fig = mne.viz.plot_alignment(raw_intensity.info, show_axes=True, - subject='fsaverage', - trans='fsaverage', surfaces=['brain'], - fnirs=['channels', 'pairs'], - subjects_dir=subjects_dir, fig=fig) -mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) +# fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') +# fig = mne.viz.plot_alignment(raw_intensity.info, show_axes=True, +# subject='fsaverage', +# trans='fsaverage', surfaces=['brain'], +# fnirs=['channels', 'pairs'], +# subjects_dir=subjects_dir, fig=fig) +# mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) # ############################################################################### @@ -60,10 +60,11 @@ picks = mne.pick_types(raw_intensity.info, meg=False, fnirs=True) dists = mne.preprocessing.nirs.source_detector_distances( raw_intensity.info, picks=picks) -print(dists) raw_intensity.pick(picks[dists > 0.01]) -raw_intensity.plot(n_channels=10, - duration=500, show_scrollbars=False) +print(raw_intensity.info) +scalings = dict(eeg=20e-100) +raw_intensity.plot(n_channels=1, + duration=100, scalings=scalings, show_scrollbars=False) # ############################################################################### From be3b6ec0ae027e9f7e01816f2190370a643ee1cd Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 4 May 2020 12:47:31 -0700 Subject: [PATCH 030/167] pull sorce number from code --- mne/io/boxy/boxy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index b683d306662..7f78abf6975 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -291,7 +291,7 @@ def __init__(self, fname, preload=False, verbose=None): first_samps = start_line print('New first_samps: ', first_samps) diff = end_line-start_line - last_samps = start_line + int(diff/16)-1 + last_samps = start_line + int(diff/source_num)-1 print('New last_samps: ', last_samps) print('New Difference: ', last_samps-first_samps) From a216b4f70bc88b772687f9f18d757a983c900102 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 4 May 2020 13:10:49 -0700 Subject: [PATCH 031/167] working plotting with rescale --- mne/io/boxy/boxy.py | 4 ++-- mne/io/brainvision/foo.py | 2 ++ tutorials/preprocessing/plot_80_boxy_processing.py | 8 ++++---- 3 files changed, 8 insertions(+), 6 deletions(-) create mode 100644 mne/io/brainvision/foo.py diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 7f78abf6975..b3d87179bc6 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -291,11 +291,11 @@ def __init__(self, fname, preload=False, verbose=None): first_samps = start_line print('New first_samps: ', first_samps) diff = end_line-start_line - last_samps = start_line + int(diff/source_num)-1 + #input file has rows for each source, output variable rearranges as columns and does not + last_samps = start_line + int(diff/source_num)-1 print('New last_samps: ', last_samps) print('New Difference: ', last_samps-first_samps) - super(RawBOXY, self).__init__( info, preload, filenames=[fname], first_samps=[first_samps], last_samps=[last_samps], diff --git a/mne/io/brainvision/foo.py b/mne/io/brainvision/foo.py new file mode 100644 index 00000000000..c15a9f85fc1 --- /dev/null +++ b/mne/io/brainvision/foo.py @@ -0,0 +1,2 @@ +from ...coreg import coregister_fiducials +print(coregister_fiducials) diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 9ae9de83ba8..2930fdfe0a7 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -61,10 +61,10 @@ dists = mne.preprocessing.nirs.source_detector_distances( raw_intensity.info, picks=picks) raw_intensity.pick(picks[dists > 0.01]) -print(raw_intensity.info) -scalings = dict(eeg=20e-100) -raw_intensity.plot(n_channels=1, - duration=100, scalings=scalings, show_scrollbars=False) +print(mne.io.pick.channel_type(raw_intensity.info, 0)) +scalings = dict(fnirs_raw=1e2) +raw_intensity.plot(n_channels=10, + duration=1000, scalings=scalings, show_scrollbars=True) # ############################################################################### From 6985bc289e6b2605743f26b6764b0fdfa8872746 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 4 May 2020 13:29:38 -0700 Subject: [PATCH 032/167] fixed import of coreg function --- mne/io/boxy/boxy.py | 7 ++----- mne/io/brainvision/foo.py | 2 -- tutorials/preprocessing/plot_80_boxy_processing.py | 14 +++++++------- 3 files changed, 9 insertions(+), 14 deletions(-) delete mode 100644 mne/io/brainvision/foo.py diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index b3d87179bc6..64c5ca2843b 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -9,8 +9,6 @@ import pandas as pd import numpy as np -import mne - from ..base import BaseRaw from ..constants import FIFF from ..meas_info import create_info, _format_dig_points, read_fiducials @@ -18,7 +16,6 @@ from ...transforms import apply_trans, _get_trans from ...utils import logger, verbose, fill_doc from ...channels.montage import make_dig_montage -# from ...coreg import coregister_fiducials @fill_doc @@ -58,7 +55,7 @@ class RawBOXY(BaseRaw): @verbose def __init__(self, fname, preload=False, verbose=None): from ...externals.pymatreader import read_mat - from ...coreg import get_mni_fiducials # avoid circular import prob + from ...coreg import get_mni_fiducials, coregister_fiducials # avoid circular import prob logger.info('Loading %s' % fname) # Check if required files exist and store names for later use @@ -239,7 +236,7 @@ def __init__(self, fname, preload=False, verbose=None): ###get our fiducials and transform matrix from fsaverage### fid_path = op.join('mne', 'data', 'fsaverage', 'fsaverage-fiducials.fif') fiducials = read_fiducials(fid_path) - trans = mne.coreg.coregister_fiducials(info, fiducials[0], tol=0.02) + trans = coregister_fiducials(info, fiducials[0], tol=0.02) ###remake montage using the transformed coordinates### all_coords_trans = apply_trans(trans,all_coords) diff --git a/mne/io/brainvision/foo.py b/mne/io/brainvision/foo.py deleted file mode 100644 index c15a9f85fc1..00000000000 --- a/mne/io/brainvision/foo.py +++ /dev/null @@ -1,2 +0,0 @@ -from ...coreg import coregister_fiducials -print(coregister_fiducials) diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 2930fdfe0a7..ace0246fa16 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -39,13 +39,13 @@ subjects_dir = mne.datasets.sample.data_path() + '/subjects' -# fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') -# fig = mne.viz.plot_alignment(raw_intensity.info, show_axes=True, -# subject='fsaverage', -# trans='fsaverage', surfaces=['brain'], -# fnirs=['channels', 'pairs'], -# subjects_dir=subjects_dir, fig=fig) -# mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) +fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') +fig = mne.viz.plot_alignment(raw_intensity.info, show_axes=True, + subject='fsaverage', + trans='fsaverage', surfaces=['brain'], + fnirs=['channels', 'pairs'], + subjects_dir=subjects_dir, fig=fig) +mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) # ############################################################################### From 2bab9aa2bfabac8433783f660e83d131a1caff0e Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 4 May 2020 15:00:40 -0700 Subject: [PATCH 033/167] removed transform into fsaverage since it already was there --- .../preprocessing/plot_80_boxy_processing.py | 25 ++++++++++++------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index ace0246fa16..743479e6f44 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -42,7 +42,15 @@ fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') fig = mne.viz.plot_alignment(raw_intensity.info, show_axes=True, subject='fsaverage', - trans='fsaverage', surfaces=['brain'], + trans=None, surfaces=['brain'], + fnirs=['channels', 'pairs'], + subjects_dir=subjects_dir, fig=fig) +mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) + +fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') +fig = mne.viz.plot_alignment(raw_intensity.info, show_axes=True, + subject='fsaverage', + trans=None, surfaces=['head'], fnirs=['channels', 'pairs'], subjects_dir=subjects_dir, fig=fig) mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) @@ -57,14 +65,13 @@ # # These short channels can be seen in the figure above. # # To achieve this we pick all the channels that are not considered to be short. -picks = mne.pick_types(raw_intensity.info, meg=False, fnirs=True) -dists = mne.preprocessing.nirs.source_detector_distances( - raw_intensity.info, picks=picks) -raw_intensity.pick(picks[dists > 0.01]) -print(mne.io.pick.channel_type(raw_intensity.info, 0)) -scalings = dict(fnirs_raw=1e2) -raw_intensity.plot(n_channels=10, - duration=1000, scalings=scalings, show_scrollbars=True) +# picks = mne.pick_types(raw_intensity.info, meg=False, fnirs=True) +# dists = mne.preprocessing.nirs.source_detector_distances( +# raw_intensity.info, picks=picks) +# raw_intensity.pick(picks[dists < 0.06]) +# scalings = dict(fnirs_raw=1e2) +# raw_intensity.plot(n_channels=10, +# duration=1000, scalings=scalings, show_scrollbars=True) # ############################################################################### From ab2b2de3863ab6a16234ec3427203d81936b803f Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 4 May 2020 15:50:30 -0700 Subject: [PATCH 034/167] working through bug plotting dig --- mne/io/boxy/boxy.py | 44 +++++++++---------- .../preprocessing/plot_80_boxy_processing.py | 21 +++++---- 2 files changed, 32 insertions(+), 33 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 64c5ca2843b..0e7088a454a 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -153,7 +153,7 @@ def __init__(self, fname, preload=False, verbose=None): all_coords.append([float(X),float(Y),float(Z)]) get_coords = 0 for i_index in range(3): - fiducial_coords[i_index] = np.asarray([float(x) for x in fiducial_coords[i_index]]) + fiducial_coords[i_index] = np.asarray([float(x) for x in fiducial_coords[i_index]]) ###get coordinates for sources### source_coords = [] @@ -237,29 +237,29 @@ def __init__(self, fname, preload=False, verbose=None): fid_path = op.join('mne', 'data', 'fsaverage', 'fsaverage-fiducials.fif') fiducials = read_fiducials(fid_path) trans = coregister_fiducials(info, fiducials[0], tol=0.02) - - ###remake montage using the transformed coordinates### - all_coords_trans = apply_trans(trans,all_coords) - all_chan_dict_trans = dict(zip(all_labels,all_coords_trans)) - fiducial_coords_trans = apply_trans(trans,fiducial_coords) + info.update(trans=trans) + + # ###remake montage using the transformed coordinates### + # all_coords_trans = apply_trans(trans,all_coords) + # all_chan_dict_trans = dict(zip(all_labels,all_coords_trans)) + # fiducial_coords_trans = apply_trans(trans,fiducial_coords) - ###make our montage### - montage_trans = make_dig_montage(ch_pos=all_chan_dict_trans,coord_frame='head', - nasion = fiducial_coords_trans[0], - lpa = fiducial_coords_trans[1], - rpa = fiducial_coords_trans[2]) + # ###make our montage### + # montage_trans = make_dig_montage(ch_pos=all_chan_dict,coord_frame='head', + # nasion = fiducial_coords[0], + # lpa = fiducial_coords[1], + # rpa = fiducial_coords[2]) - ###let's fix montage order ### - for i_chan in range(len(all_coords_trans)): - montage_trans.dig[i_chan+3]['r'] = all_coords_trans[i_chan] - montage_trans.ch_names[i_chan] = all_labels[i_chan] - req_ind = montage_trans.ch_names + # ###let's fix montage order ### + # for i_chan in range(len(all_coords)): + # montage_trans.dig[i_chan+3]['r'] = all_coords[i_chan] + # montage_trans.ch_names[i_chan] = all_labels[i_chan] # Create mne structure ###create info structure### - info = create_info(boxy_labels,srate,ch_types='fnirs_raw') - ###add data type and channel wavelength to info### - info.update(dig=montage_trans.dig, trans=trans) + # info = create_info(boxy_labels,srate,ch_types='fnirs_raw') + # ###add data type and channel wavelength to info### + # info.update(dig=montage_trans.dig, trans=trans) # Store channel, source, and detector locations # The channel location is stored in the first 3 entries of loc. @@ -270,9 +270,9 @@ def __init__(self, fname, preload=False, verbose=None): ###place our coordinates and wavelengths for each channel### for i_chan in range(len(boxy_labels)-1): - temp_chn = apply_trans(trans,boxy_coords[i_chan][0:3]) - temp_src = apply_trans(trans,boxy_coords[i_chan][3:6]) - temp_det = apply_trans(trans,boxy_coords[i_chan][6:9]) + temp_chn = boxy_coords[i_chan][0:3] + temp_src = boxy_coords[i_chan][3:6] + temp_det = boxy_coords[i_chan][6:9] temp_other = np.asarray(boxy_coords[i_chan][9:],dtype=np.float64) info['chs'][i_chan]['loc'] = test = np.concatenate((temp_chn, temp_src, temp_det, temp_other),axis=0) diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 743479e6f44..83c59743b98 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -39,20 +39,19 @@ subjects_dir = mne.datasets.sample.data_path() + '/subjects' +print(raw_intensity.info['dig'][0:5]) +print(raw_intensity.info['chs'][0]['loc']) fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') -fig = mne.viz.plot_alignment(raw_intensity.info, show_axes=True, +fig = mne.viz.plot_alignment(raw_intensity.info, + show_axes=True, subject='fsaverage', - trans=None, surfaces=['brain'], + trans=raw_intensity.info['trans'], + surfaces=['head-dense', 'brain'], fnirs=['channels', 'pairs'], - subjects_dir=subjects_dir, fig=fig) -mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) - -fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') -fig = mne.viz.plot_alignment(raw_intensity.info, show_axes=True, - subject='fsaverage', - trans=None, surfaces=['head'], - fnirs=['channels', 'pairs'], - subjects_dir=subjects_dir, fig=fig) + mri_fiducials=True, + dig=True, + subjects_dir=subjects_dir, + fig=fig) mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) From 6d08ca48eb007a06a131e6edb36758cbcae59e4a Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 4 May 2020 17:22:26 -0700 Subject: [PATCH 035/167] working example --- mne/io/boxy/boxy.py | 60 +++++++++---------- .../preprocessing/plot_80_boxy_processing.py | 4 +- 2 files changed, 30 insertions(+), 34 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 0e7088a454a..133df642009 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -13,7 +13,7 @@ from ..constants import FIFF from ..meas_info import create_info, _format_dig_points, read_fiducials from ...annotations import Annotations -from ...transforms import apply_trans, _get_trans +from ...transforms import apply_trans, _get_trans, get_ras_to_neuromag_trans from ...utils import logger, verbose, fill_doc from ...channels.montage import make_dig_montage @@ -153,7 +153,7 @@ def __init__(self, fname, preload=False, verbose=None): all_coords.append([float(X),float(Y),float(Z)]) get_coords = 0 for i_index in range(3): - fiducial_coords[i_index] = np.asarray([float(x) for x in fiducial_coords[i_index]]) + fiducial_coords[i_index] = np.asarray([float(x) for x in fiducial_coords[i_index]]) ###get coordinates for sources### source_coords = [] @@ -218,11 +218,11 @@ def __init__(self, fname, preload=False, verbose=None): lpa = fiducial_coords[1], rpa = fiducial_coords[2]) - ###for some reason make_dig_montage put our channels in a different order than what we input### - ###let's fix that. should be fine to just change coords and ch_names### - for i_chan in range(len(all_coords)): - montage_orig.dig[i_chan+3]['r'] = all_coords[i_chan] - montage_orig.ch_names[i_chan] = all_labels[i_chan] + # ###for some reason make_dig_montage put our channels in a different order than what we input### + # ###let's fix that. should be fine to just change coords and ch_names### + # for i_chan in range(len(all_coords)): + # montage_orig.dig[i_chan+3]['r'] = all_coords[i_chan] + # montage_orig.ch_names[i_chan] = all_labels[i_chan] ###add an extra channel for our triggers for later### boxy_labels.append('Markers') @@ -231,35 +231,33 @@ def __init__(self, fname, preload=False, verbose=None): info.update(dig=montage_orig.dig) # Set up digitization - # These are all in MNI coordinates, so let's transform them to + # These are all in actual 3d individual coordinates, so let's transform them to # the Neuromag head coordinate frame - ###get our fiducials and transform matrix from fsaverage### - fid_path = op.join('mne', 'data', 'fsaverage', 'fsaverage-fiducials.fif') - fiducials = read_fiducials(fid_path) - trans = coregister_fiducials(info, fiducials[0], tol=0.02) - info.update(trans=trans) - - # ###remake montage using the transformed coordinates### - # all_coords_trans = apply_trans(trans,all_coords) - # all_chan_dict_trans = dict(zip(all_labels,all_coords_trans)) - # fiducial_coords_trans = apply_trans(trans,fiducial_coords) + trans = get_ras_to_neuromag_trans(fiducial_coords[0], + fiducial_coords[1], + fiducial_coords[2]) + + ###remake montage using the transformed coordinates### + all_coords_trans = apply_trans(trans,all_coords) + all_chan_dict_trans = dict(zip(all_labels,all_coords_trans)) + fiducial_coords_trans = apply_trans(trans,fiducial_coords) - # ###make our montage### - # montage_trans = make_dig_montage(ch_pos=all_chan_dict,coord_frame='head', - # nasion = fiducial_coords[0], - # lpa = fiducial_coords[1], - # rpa = fiducial_coords[2]) + ###make our montage### + montage_trans = make_dig_montage(ch_pos=all_chan_dict_trans,coord_frame='head', + nasion = fiducial_coords_trans[0], + lpa = fiducial_coords_trans[1], + rpa = fiducial_coords_trans[2]) # ###let's fix montage order ### - # for i_chan in range(len(all_coords)): - # montage_trans.dig[i_chan+3]['r'] = all_coords[i_chan] + # for i_chan in range(len(all_coords_trans)): + # montage_trans.dig[i_chan+3]['r'] = all_coords_trans[i_chan] # montage_trans.ch_names[i_chan] = all_labels[i_chan] # Create mne structure ###create info structure### - # info = create_info(boxy_labels,srate,ch_types='fnirs_raw') - # ###add data type and channel wavelength to info### - # info.update(dig=montage_trans.dig, trans=trans) + info = create_info(boxy_labels,srate,ch_types='fnirs_raw') + ###add data type and channel wavelength to info### + info.update(dig=montage_trans.dig, trans=trans) # Store channel, source, and detector locations # The channel location is stored in the first 3 entries of loc. @@ -270,9 +268,9 @@ def __init__(self, fname, preload=False, verbose=None): ###place our coordinates and wavelengths for each channel### for i_chan in range(len(boxy_labels)-1): - temp_chn = boxy_coords[i_chan][0:3] - temp_src = boxy_coords[i_chan][3:6] - temp_det = boxy_coords[i_chan][6:9] + temp_chn = apply_trans(trans,boxy_coords[i_chan][0:3]) + temp_src = apply_trans(trans,boxy_coords[i_chan][3:6]) + temp_det = apply_trans(trans,boxy_coords[i_chan][6:9]) temp_other = np.asarray(boxy_coords[i_chan][9:],dtype=np.float64) info['chs'][i_chan]['loc'] = test = np.concatenate((temp_chn, temp_src, temp_det, temp_other),axis=0) diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 83c59743b98..93aeee767bd 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -39,13 +39,11 @@ subjects_dir = mne.datasets.sample.data_path() + '/subjects' -print(raw_intensity.info['dig'][0:5]) -print(raw_intensity.info['chs'][0]['loc']) fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') fig = mne.viz.plot_alignment(raw_intensity.info, show_axes=True, subject='fsaverage', - trans=raw_intensity.info['trans'], + trans='fsaverage', surfaces=['head-dense', 'brain'], fnirs=['channels', 'pairs'], mri_fiducials=True, From 62136cecb4d35529a5e516e62c65f3b17ad259a0 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 4 May 2020 18:05:47 -0700 Subject: [PATCH 036/167] working version that loads in coordiantes and transforms correctly --- mne/io/boxy/boxy.py | 69 ++++++------------- .../preprocessing/plot_80_boxy_processing.py | 14 ++-- 2 files changed, 29 insertions(+), 54 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 133df642009..fa5df41a01e 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -200,6 +200,9 @@ def __init__(self, fname, preload=False, verbose=None): + '_D' + str(unique_detect_labels.index(detect_label[i_coord])+1) + ' ' + chan_wavelength[i_coord] + ' ' + i_type) + + # add extra column for triggers + boxy_labels.append('Markers') ###montage only wants channel coords, so need to grab those, convert to### ###array, then make a dict with labels### @@ -211,62 +214,32 @@ def __init__(self, fname, preload=False, verbose=None): all_chan_dict = dict(zip(all_labels,all_coords)) - - ###make our montage### - montage_orig = make_dig_montage(ch_pos=all_chan_dict,coord_frame='head', - nasion = fiducial_coords[0], - lpa = fiducial_coords[1], - rpa = fiducial_coords[2]) + ###make our montage### + my_dig_montage = make_dig_montage(ch_pos=all_chan_dict, + coord_frame='unknown', + nasion = fiducial_coords[0], + lpa = fiducial_coords[1], + rpa = fiducial_coords[2]) - # ###for some reason make_dig_montage put our channels in a different order than what we input### - # ###let's fix that. should be fine to just change coords and ch_names### - # for i_chan in range(len(all_coords)): - # montage_orig.dig[i_chan+3]['r'] = all_coords[i_chan] - # montage_orig.ch_names[i_chan] = all_labels[i_chan] - - ###add an extra channel for our triggers for later### - boxy_labels.append('Markers') - - info = create_info(boxy_labels,srate,ch_types='fnirs_raw') - info.update(dig=montage_orig.dig) - - # Set up digitization - # These are all in actual 3d individual coordinates, so let's transform them to - # the Neuromag head coordinate frame - trans = get_ras_to_neuromag_trans(fiducial_coords[0], - fiducial_coords[1], - fiducial_coords[2]) - - ###remake montage using the transformed coordinates### - all_coords_trans = apply_trans(trans,all_coords) - all_chan_dict_trans = dict(zip(all_labels,all_coords_trans)) - fiducial_coords_trans = apply_trans(trans,fiducial_coords) - - ###make our montage### - montage_trans = make_dig_montage(ch_pos=all_chan_dict_trans,coord_frame='head', - nasion = fiducial_coords_trans[0], - lpa = fiducial_coords_trans[1], - rpa = fiducial_coords_trans[2]) - - # ###let's fix montage order ### - # for i_chan in range(len(all_coords_trans)): - # montage_trans.dig[i_chan+3]['r'] = all_coords_trans[i_chan] - # montage_trans.ch_names[i_chan] = all_labels[i_chan] - - # Create mne structure ###create info structure### - info = create_info(boxy_labels,srate,ch_types='fnirs_raw') - ###add data type and channel wavelength to info### - info.update(dig=montage_trans.dig, trans=trans) + info = create_info(boxy_labels, srate, ch_types='fnirs_raw') + ###add dig info### + ## this also applies a transform to the data into neuromag space based on fiducials + info.set_montage(my_dig_montage) # Store channel, source, and detector locations # The channel location is stored in the first 3 entries of loc. # The source location is stored in the second 3 entries of loc. # The detector location is stored in the third 3 entries of loc. - # NIRx NIRSite uses MNI coordinates. # Also encode the light frequency in the structure. - + ###place our coordinates and wavelengths for each channel### + # # These are all in actual 3d individual coordinates, so let's transform them to + # # the Neuromag head coordinate frame + trans = get_ras_to_neuromag_trans(fiducial_coords[0], + fiducial_coords[1], + fiducial_coords[2]) + for i_chan in range(len(boxy_labels)-1): temp_chn = apply_trans(trans,boxy_coords[i_chan][0:3]) temp_src = apply_trans(trans,boxy_coords[i_chan][3:6]) @@ -274,7 +247,9 @@ def __init__(self, fname, preload=False, verbose=None): temp_other = np.asarray(boxy_coords[i_chan][9:],dtype=np.float64) info['chs'][i_chan]['loc'] = test = np.concatenate((temp_chn, temp_src, temp_det, temp_other),axis=0) + info['chs'][-1]['loc'] = np.zeros((12,)) + raw_extras = {'source_num': source_num, 'detect_num': detect_num, 'start_line': start_line, diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 93aeee767bd..7d87a69adc8 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -62,13 +62,13 @@ # # These short channels can be seen in the figure above. # # To achieve this we pick all the channels that are not considered to be short. -# picks = mne.pick_types(raw_intensity.info, meg=False, fnirs=True) -# dists = mne.preprocessing.nirs.source_detector_distances( -# raw_intensity.info, picks=picks) -# raw_intensity.pick(picks[dists < 0.06]) -# scalings = dict(fnirs_raw=1e2) -# raw_intensity.plot(n_channels=10, -# duration=1000, scalings=scalings, show_scrollbars=True) +picks = mne.pick_types(raw_intensity.info, meg=False, fnirs=True) +dists = mne.preprocessing.nirs.source_detector_distances( + raw_intensity.info, picks=picks) +raw_intensity.pick(picks[dists < 0.06]) +scalings = dict(fnirs_raw=1e2) +raw_intensity.plot(n_channels=10, + duration=1000, scalings=scalings, show_scrollbars=True) # ############################################################################### From d9eef8148c76cf1475521329993bc01d922d4a03 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 4 May 2020 23:25:33 -0700 Subject: [PATCH 037/167] fixed review comments --- mne/io/boxy/boxy.py | 27 ++++++++++----------------- 1 file changed, 10 insertions(+), 17 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index fa5df41a01e..bf3031651c3 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -203,18 +203,15 @@ def __init__(self, fname, preload=False, verbose=None): # add extra column for triggers boxy_labels.append('Markers') + # convert to floats + boxy_coords = np.array(boxy_coords, float) + all_coords = np.array(all_coords, float) + ###make our montage### ###montage only wants channel coords, so need to grab those, convert to### ###array, then make a dict with labels### - for i_chan in range(len(boxy_coords)): - boxy_coords[i_chan] = np.asarray(boxy_coords[i_chan],dtype=np.float64) - - for i_chan in range(len(all_coords)): - all_coords[i_chan] = np.asarray(all_coords[i_chan],dtype=np.float64) - all_chan_dict = dict(zip(all_labels,all_coords)) - ###make our montage### my_dig_montage = make_dig_montage(ch_pos=all_chan_dict, coord_frame='unknown', nasion = fiducial_coords[0], @@ -236,19 +233,15 @@ def __init__(self, fname, preload=False, verbose=None): ###place our coordinates and wavelengths for each channel### # # These are all in actual 3d individual coordinates, so let's transform them to # # the Neuromag head coordinate frame - trans = get_ras_to_neuromag_trans(fiducial_coords[0], + native_head_t = get_ras_to_neuromag_trans(fiducial_coords[0], fiducial_coords[1], fiducial_coords[2]) for i_chan in range(len(boxy_labels)-1): - temp_chn = apply_trans(trans,boxy_coords[i_chan][0:3]) - temp_src = apply_trans(trans,boxy_coords[i_chan][3:6]) - temp_det = apply_trans(trans,boxy_coords[i_chan][6:9]) - temp_other = np.asarray(boxy_coords[i_chan][9:],dtype=np.float64) - info['chs'][i_chan]['loc'] = test = np.concatenate((temp_chn, temp_src, - temp_det, temp_other),axis=0) - - info['chs'][-1]['loc'] = np.zeros((12,)) + temp_ch_src_det = apply_trans(native_head_t, boxy_coords[i_chan][:9].reshape(3, 3)).ravel() + temp_other = np.asarray(boxy_coords[i_chan][9:], dtype=np.float64) # add wavelength and placeholders + info['chs'][i_chan]['loc'] = np.concatenate((temp_ch_src_det, temp_other), axis=0) + info['chs'][-1]['loc'] = np.zeros((12,)) #remove last line? raw_extras = {'source_num': source_num, 'detect_num': detect_num, @@ -262,7 +255,7 @@ def __init__(self, fname, preload=False, verbose=None): print('New first_samps: ', first_samps) diff = end_line-start_line #input file has rows for each source, output variable rearranges as columns and does not - last_samps = start_line + int(diff/source_num)-1 + last_samps = start_line + diff // source_num -1 print('New last_samps: ', last_samps) print('New Difference: ', last_samps-first_samps) From 38b8c3b8200781ba86be55b63f1f96146ec60af4 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 20 Apr 2020 08:42:53 -0700 Subject: [PATCH 038/167] created gratton_emcp_epochs function in preprocessing/eog.py --- mne/preprocessing/eog.py | 82 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) diff --git a/mne/preprocessing/eog.py b/mne/preprocessing/eog.py index 9481eef862d..598a9566c02 100644 --- a/mne/preprocessing/eog.py +++ b/mne/preprocessing/eog.py @@ -248,3 +248,85 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, picks=picks, baseline=baseline, preload=preload, reject_by_annotation=reject_by_annotation) return eog_epochs + + +@verbose +def GrattonEmcpEpochs(epochs): + + """Gratton, Coles, Donchin (1983) EMCP - Eye movement correction procedure. + + Parameters + ---------- + epochs : instance of Epoch + The epoched data with vertical and horizontal eye channels. + + Returns + ------- + emcp_epochs : instance of Epochs + Data epoched around EOG events. + + Notes + ----- + Correct EEG data for EOG artifacts with regression + -compute the ERP in each condition + -subtract ERP from each trial + -subtract baseline (mean over all epoch) + -predict eye channel remainder from eeg remainder + -use coefficients to subtract eog from eeg + + """ + + event_names = ['A_error','B_error'] + i = 0 + for key, value in sorted(epochs.event_id.items(), key=lambda x: (x[1], x[0])): + event_names[i] = key + i += 1 + + #select the correct channels and data + eeg_chans = pick_types(epochs.info, eeg=True, eog=False) + eog_chans = pick_types(epochs.info, eeg=False, eog=True) + original_data = epochs._data + + #subtract the average over trials from each trial + rem = {} + for event in event_names: + data = epochs[event]._data + avg = np.mean(epochs[event]._data,axis=0) + rem[event] = data-avg + + #concatenate trials together of different types + ## then put them all back together in X (regression on all at once) + allrem = np.concatenate([rem[event] for event in event_names]) + + #separate eog and eeg + X = allrem[:,eeg_chans,:] + Y = allrem[:,eog_chans,:] + + #subtract mean over time from every trial/channel + X = (X.T - np.mean(X,2).T).T + Y = (Y.T - np.mean(Y,2).T).T + + #move electrodes first + X = np.moveaxis(X,0,1) + Y = np.moveaxis(Y,0,1) + + #make 2d and compute regression + X = np.reshape(X,(X.shape[0],np.prod(X.shape[1:]))) + Y = np.reshape(Y,(Y.shape[0],np.prod(Y.shape[1:]))) + b = np.linalg.solve(np.dot(Y,Y.T), np.dot(Y,X.T)) + + #get original data and electrodes first for matrix math + raw_eeg = np.moveaxis(original_data[:,eeg_chans,:],0,1) + raw_eog = np.moveaxis(original_data[:,eog_chans,:],0,1) + + #subtract weighted eye channels from eeg channels + eeg_corrected = (raw_eeg.T - np.dot(raw_eog.T,b)).T + + #move back to match epochs + eeg_corrected = np.moveaxis(eeg_corrected,0,1) + + #copy original epochs and replace with corrected data + epochs_new = epochs.copy() + epochs_new._data[:,eeg_chans,:] = eeg_corrected + + return emcp_epochs From 1fd2c3ebeca105f971e347b5c12aa2ad1e830596 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 20 Apr 2020 08:45:23 -0700 Subject: [PATCH 039/167] revert commit to master --- mne/preprocessing/eog.py | 84 +--------------------------------------- 1 file changed, 1 insertion(+), 83 deletions(-) diff --git a/mne/preprocessing/eog.py b/mne/preprocessing/eog.py index 598a9566c02..27d0ac6252c 100644 --- a/mne/preprocessing/eog.py +++ b/mne/preprocessing/eog.py @@ -247,86 +247,4 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, tmax=tmax, proj=False, reject=reject, flat=flat, picks=picks, baseline=baseline, preload=preload, reject_by_annotation=reject_by_annotation) - return eog_epochs - - -@verbose -def GrattonEmcpEpochs(epochs): - - """Gratton, Coles, Donchin (1983) EMCP - Eye movement correction procedure. - - Parameters - ---------- - epochs : instance of Epoch - The epoched data with vertical and horizontal eye channels. - - Returns - ------- - emcp_epochs : instance of Epochs - Data epoched around EOG events. - - Notes - ----- - Correct EEG data for EOG artifacts with regression - -compute the ERP in each condition - -subtract ERP from each trial - -subtract baseline (mean over all epoch) - -predict eye channel remainder from eeg remainder - -use coefficients to subtract eog from eeg - - """ - - event_names = ['A_error','B_error'] - i = 0 - for key, value in sorted(epochs.event_id.items(), key=lambda x: (x[1], x[0])): - event_names[i] = key - i += 1 - - #select the correct channels and data - eeg_chans = pick_types(epochs.info, eeg=True, eog=False) - eog_chans = pick_types(epochs.info, eeg=False, eog=True) - original_data = epochs._data - - #subtract the average over trials from each trial - rem = {} - for event in event_names: - data = epochs[event]._data - avg = np.mean(epochs[event]._data,axis=0) - rem[event] = data-avg - - #concatenate trials together of different types - ## then put them all back together in X (regression on all at once) - allrem = np.concatenate([rem[event] for event in event_names]) - - #separate eog and eeg - X = allrem[:,eeg_chans,:] - Y = allrem[:,eog_chans,:] - - #subtract mean over time from every trial/channel - X = (X.T - np.mean(X,2).T).T - Y = (Y.T - np.mean(Y,2).T).T - - #move electrodes first - X = np.moveaxis(X,0,1) - Y = np.moveaxis(Y,0,1) - - #make 2d and compute regression - X = np.reshape(X,(X.shape[0],np.prod(X.shape[1:]))) - Y = np.reshape(Y,(Y.shape[0],np.prod(Y.shape[1:]))) - b = np.linalg.solve(np.dot(Y,Y.T), np.dot(Y,X.T)) - - #get original data and electrodes first for matrix math - raw_eeg = np.moveaxis(original_data[:,eeg_chans,:],0,1) - raw_eog = np.moveaxis(original_data[:,eog_chans,:],0,1) - - #subtract weighted eye channels from eeg channels - eeg_corrected = (raw_eeg.T - np.dot(raw_eog.T,b)).T - - #move back to match epochs - eeg_corrected = np.moveaxis(eeg_corrected,0,1) - - #copy original epochs and replace with corrected data - epochs_new = epochs.copy() - epochs_new._data[:,eeg_chans,:] = eeg_corrected - - return emcp_epochs + return eog_epochs \ No newline at end of file From 9f67db30edaf93af9d64aeed190be66e05220ec4 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 20 Apr 2020 08:46:49 -0700 Subject: [PATCH 040/167] fix --- mne/preprocessing/eog.py | 86 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 85 insertions(+), 1 deletion(-) diff --git a/mne/preprocessing/eog.py b/mne/preprocessing/eog.py index 27d0ac6252c..9aae4aa0402 100644 --- a/mne/preprocessing/eog.py +++ b/mne/preprocessing/eog.py @@ -247,4 +247,88 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, tmax=tmax, proj=False, reject=reject, flat=flat, picks=picks, baseline=baseline, preload=preload, reject_by_annotation=reject_by_annotation) - return eog_epochs \ No newline at end of file + return eog_epochs + + + +@verbose +def gratton_emcp_epochs(epochs): + + """Gratton, Coles, Donchin (1983) EMCP - Eye movement correction procedure. + + Parameters + ---------- + epochs : instance of Epoch + The epoched data with vertical and horizontal eye channels. + + Returns + ------- + emcp_epochs : instance of Epochs + Data epoched around EOG events. + + Notes + ----- + Correct EEG data for EOG artifacts with regression + -compute the ERP in each condition + -subtract ERP from each trial + -subtract baseline (mean over all epoch) + -predict eye channel remainder from eeg remainder + -use coefficients to subtract eog from eeg + + """ + + event_names = ['A_error','B_error'] + i = 0 + for key, value in sorted(epochs.event_id.items(), key=lambda x: (x[1], x[0])): + event_names[i] = key + i += 1 + + #select the correct channels and data + eeg_chans = pick_types(epochs.info, eeg=True, eog=False) + eog_chans = pick_types(epochs.info, eeg=False, eog=True) + original_data = epochs._data + + #subtract the average over trials from each trial + rem = {} + for event in event_names: + data = epochs[event]._data + avg = np.mean(epochs[event]._data,axis=0) + rem[event] = data-avg + + #concatenate trials together of different types + ## then put them all back together in X (regression on all at once) + allrem = np.concatenate([rem[event] for event in event_names]) + + #separate eog and eeg + X = allrem[:,eeg_chans,:] + Y = allrem[:,eog_chans,:] + + #subtract mean over time from every trial/channel + X = (X.T - np.mean(X,2).T).T + Y = (Y.T - np.mean(Y,2).T).T + + #move electrodes first + X = np.moveaxis(X,0,1) + Y = np.moveaxis(Y,0,1) + + #make 2d and compute regression + X = np.reshape(X,(X.shape[0],np.prod(X.shape[1:]))) + Y = np.reshape(Y,(Y.shape[0],np.prod(Y.shape[1:]))) + b = np.linalg.solve(np.dot(Y,Y.T), np.dot(Y,X.T)) + + #get original data and electrodes first for matrix math + raw_eeg = np.moveaxis(original_data[:,eeg_chans,:],0,1) + raw_eog = np.moveaxis(original_data[:,eog_chans,:],0,1) + + #subtract weighted eye channels from eeg channels + eeg_corrected = (raw_eeg.T - np.dot(raw_eog.T,b)).T + + #move back to match epochs + eeg_corrected = np.moveaxis(eeg_corrected,0,1) + + #copy original epochs and replace with corrected data + epochs_new = epochs.copy() + epochs_new._data[:,eeg_chans,:] = eeg_corrected + + return emcp_epochs + \ No newline at end of file From fb0cbb35bb0925c9437ef9e0367ef55c29686dca Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 20 Apr 2020 08:47:37 -0700 Subject: [PATCH 041/167] revert --- mne/preprocessing/eog.py | 85 +--------------------------------------- 1 file changed, 1 insertion(+), 84 deletions(-) diff --git a/mne/preprocessing/eog.py b/mne/preprocessing/eog.py index 9aae4aa0402..9c2bad50d4e 100644 --- a/mne/preprocessing/eog.py +++ b/mne/preprocessing/eog.py @@ -248,87 +248,4 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, picks=picks, baseline=baseline, preload=preload, reject_by_annotation=reject_by_annotation) return eog_epochs - - - -@verbose -def gratton_emcp_epochs(epochs): - - """Gratton, Coles, Donchin (1983) EMCP - Eye movement correction procedure. - - Parameters - ---------- - epochs : instance of Epoch - The epoched data with vertical and horizontal eye channels. - - Returns - ------- - emcp_epochs : instance of Epochs - Data epoched around EOG events. - - Notes - ----- - Correct EEG data for EOG artifacts with regression - -compute the ERP in each condition - -subtract ERP from each trial - -subtract baseline (mean over all epoch) - -predict eye channel remainder from eeg remainder - -use coefficients to subtract eog from eeg - - """ - - event_names = ['A_error','B_error'] - i = 0 - for key, value in sorted(epochs.event_id.items(), key=lambda x: (x[1], x[0])): - event_names[i] = key - i += 1 - - #select the correct channels and data - eeg_chans = pick_types(epochs.info, eeg=True, eog=False) - eog_chans = pick_types(epochs.info, eeg=False, eog=True) - original_data = epochs._data - - #subtract the average over trials from each trial - rem = {} - for event in event_names: - data = epochs[event]._data - avg = np.mean(epochs[event]._data,axis=0) - rem[event] = data-avg - - #concatenate trials together of different types - ## then put them all back together in X (regression on all at once) - allrem = np.concatenate([rem[event] for event in event_names]) - - #separate eog and eeg - X = allrem[:,eeg_chans,:] - Y = allrem[:,eog_chans,:] - - #subtract mean over time from every trial/channel - X = (X.T - np.mean(X,2).T).T - Y = (Y.T - np.mean(Y,2).T).T - - #move electrodes first - X = np.moveaxis(X,0,1) - Y = np.moveaxis(Y,0,1) - - #make 2d and compute regression - X = np.reshape(X,(X.shape[0],np.prod(X.shape[1:]))) - Y = np.reshape(Y,(Y.shape[0],np.prod(Y.shape[1:]))) - b = np.linalg.solve(np.dot(Y,Y.T), np.dot(Y,X.T)) - - #get original data and electrodes first for matrix math - raw_eeg = np.moveaxis(original_data[:,eeg_chans,:],0,1) - raw_eog = np.moveaxis(original_data[:,eog_chans,:],0,1) - - #subtract weighted eye channels from eeg channels - eeg_corrected = (raw_eeg.T - np.dot(raw_eog.T,b)).T - - #move back to match epochs - eeg_corrected = np.moveaxis(eeg_corrected,0,1) - - #copy original epochs and replace with corrected data - epochs_new = epochs.copy() - epochs_new._data[:,eeg_chans,:] = eeg_corrected - - return emcp_epochs - \ No newline at end of file + \ No newline at end of file From 1180a3dd29abe6c7482da9499e3481ae73207bbb Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 20 Apr 2020 08:48:42 -0700 Subject: [PATCH 042/167] fix --- mne/preprocessing/eog.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/mne/preprocessing/eog.py b/mne/preprocessing/eog.py index 9c2bad50d4e..4df6c8a43fe 100644 --- a/mne/preprocessing/eog.py +++ b/mne/preprocessing/eog.py @@ -247,5 +247,4 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, tmax=tmax, proj=False, reject=reject, flat=flat, picks=picks, baseline=baseline, preload=preload, reject_by_annotation=reject_by_annotation) - return eog_epochs - \ No newline at end of file + return eog_epochs \ No newline at end of file From a08b170cecb213121c0b6b87f3ecbc49044864d0 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 20 Apr 2020 08:49:09 -0700 Subject: [PATCH 043/167] space ; --- mne/preprocessing/eog.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mne/preprocessing/eog.py b/mne/preprocessing/eog.py index 4df6c8a43fe..27d0ac6252c 100644 --- a/mne/preprocessing/eog.py +++ b/mne/preprocessing/eog.py @@ -247,4 +247,4 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, tmax=tmax, proj=False, reject=reject, flat=flat, picks=picks, baseline=baseline, preload=preload, reject_by_annotation=reject_by_annotation) - return eog_epochs \ No newline at end of file + return eog_epochs \ No newline at end of file From b8a331929a02c6f0843fd1b4be29e35d3750f878 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Sun, 3 May 2020 16:12:43 -0700 Subject: [PATCH 044/167] initialize new branch with boxy.py file and folder in io with tests --- mne/io/__init__.py | 2 + mne/io/boxy/__init__.py | 7 + mne/io/boxy/boxy.py | 393 +++++++++++++++++++++++++++++++++ mne/io/boxy/tests/__init__.py | 0 mne/io/boxy/tests/test_boxy.py | 226 +++++++++++++++++++ 5 files changed, 628 insertions(+) create mode 100644 mne/io/boxy/__init__.py create mode 100644 mne/io/boxy/boxy.py create mode 100644 mne/io/boxy/tests/__init__.py create mode 100644 mne/io/boxy/tests/test_boxy.py diff --git a/mne/io/__init__.py b/mne/io/__init__.py index ac16517a635..0cf67ed4397 100644 --- a/mne/io/__init__.py +++ b/mne/io/__init__.py @@ -27,6 +27,7 @@ from . import kit from . import nicolet from . import nirx +from . import boxy from . import eeglab from . import pick @@ -45,6 +46,7 @@ from .eeglab import read_raw_eeglab, read_epochs_eeglab from .eximia import read_raw_eximia from .nirx import read_raw_nirx +from .boxy import read_raw_boxy from .fieldtrip import (read_raw_fieldtrip, read_epochs_fieldtrip, read_evoked_fieldtrip) diff --git a/mne/io/boxy/__init__.py b/mne/io/boxy/__init__.py new file mode 100644 index 00000000000..c06d590829e --- /dev/null +++ b/mne/io/boxy/__init__.py @@ -0,0 +1,7 @@ +"""fNIRS module for conversion to FIF.""" + +# Author: Robert Luke +# +# License: BSD (3-clause) + +from .boxy import read_raw_boxy diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py new file mode 100644 index 00000000000..ffce7135a2e --- /dev/null +++ b/mne/io/boxy/boxy.py @@ -0,0 +1,393 @@ +# Authors: Kyle Mathewson, Jonathan Kuziek +# +# License: BSD (3-clause) + +from configparser import ConfigParser, RawConfigParser +import glob as glob +import re as re + +import numpy as np + +from ..base import BaseRaw +from ..constants import FIFF +from ..meas_info import create_info, _format_dig_points +from ...annotations import Annotations +from ...transforms import apply_trans, _get_trans +from ...utils import logger, verbose, fill_doc + + +@fill_doc +def read_raw_boxy(fname, preload=False, verbose=None): + """Reader for a BOXY optical imaging recording. + Parameters + ---------- + fname : str + Path to the BOXY data folder. + %(preload)s + %(verbose)s + Returns + ------- + raw : instance of RawBOXY + A Raw object containing BOXY data. + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + """ + return RawBOXY(fname, preload, verbose) + +@fill_doc +class RawBOXY(BaseRaw): + """Raw object from a BOXY optical imaging file. + Parameters + ---------- + fname : str + Path to the BOXY data folder. + %(preload)s + %(verbose)s + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + """ + + @verbose + def __init__(self, fname, preload=False, verbose=None): + from ...externals.pymatreader import read_mat + from ...coreg import get_mni_fiducials # avoid circular import prob + logger.info('Loading %s' % fname) + + # Read header file + # Parse required header fields + ###this keeps track of the line we're on### + ###mostly to know the start and stop of data (probably an easier way)### + line_num = 0 + ###load and read data to get some meta information### + ###there is alot of information at the beginning of a file### + ###but this only grabs some of it### + with open(boxy_file,'r') as data: + for i_line in data: + line_num += 1 + if '#DATA ENDS' in i_line: + end_line = line_num - 1 + break + if 'Detector Channels' in i_line: + detect_num = int(i_line.rsplit(' ')[0]) + elif 'External MUX Channels' in i_line: + source_num = int(i_line.rsplit(' ')[0]) + elif 'Auxiliary Channels' in i_line: + aux_num = int(i_line.rsplit(' ')[0]) + elif 'Waveform (CCF) Frequency (Hz)' in i_line: + ccf_ha = float(i_line.rsplit(' ')[0]) + elif 'Update Rate (Hz)' in i_line: + srate = float(i_line.rsplit(' ')[0]) + elif 'Updata Rate (Hz)' in i_line: + srate = float(i_line.rsplit(' ')[0]) + elif '#DATA BEGINS' in i_line: + start_line = line_num + + # Extract source-detectors + ###set up some variables### + chan_num = [] + source_label = [] + detect_label = [] + chan_wavelength = [] + chan_modulation = [] + + ###load and read each line of the .mtg file### + with open(mtg_file,'r') as data: + for i_ignore in range(2): + next(data) + for i_line in data: + chan1, chan2, source, detector, wavelength, modulation = i_line.split() + chan_num.append(chan1) + source_label.append(source) + detect_label.append(detector) + chan_wavelength.append(wavelength) + chan_modulation.append(modulation) + + # Read information about probe/montage/optodes + # A word on terminology used here: + # Sources produce light + # Detectors measure light + # Sources and detectors are both called optodes + # Each source - detector pair produces a channel + # Channels are defined as the midpoint between source and detector + + ###check if we are given a .tol or .elp file### + all_labels = [] + all_coords = [] + fiducial_coords = [] + if coord_file[-3:].lower() == 'elp'.lower(): + get_label = 0 + get_coords = 0 + ###load and read .elp file### + with open(coord_file,'r') as data: + for i_line in data: + ###first let's get our fiducial coordinates### + if '%F' in i_line: + fiducial_coords.append(i_line.split()[1:]) + ###check where sensor info starts### + if '//Sensor name' in i_line: + get_label = 1 + elif get_label == 1: + ###grab the part after '%N' for the label### + label = i_line.split()[1] + all_labels.append(label) + get_label = 0 + get_coords = 1 + elif get_coords == 1: + X, Y, Z = i_line.split() + all_coords.append([float(X),float(Y),float(Z)]) + get_coords = 0 + for i_index in range(3): + fiducial_coords[i_index] = np.asarray([float(x) for x in fiducial_coords[i_index]]) + elif coord_file[-3:] == 'tol': + ###load and read .tol file### + with open(coord_file,'r') as data: + for i_line in data: + label, X, Y, Z = i_line.split() + all_labels.append(label) + ###convert coordinates from mm to m## + all_coords.append([(float(X)*0.001),(float(Y)*0.001),(float(Z)*0.001)]) + + ###get coordinates for sources### + source_coords = [] + for i_chan in source_label: + if i_chan in all_labels: + chan_index = all_labels.index(i_chan) + source_coords.append(all_coords[chan_index]) + + ###get coordinates for detectors### + detect_coords = [] + for i_chan in detect_label: + if i_chan in all_labels: + chan_index = all_labels.index(i_chan) + detect_coords.append(all_coords[chan_index]) + + + # Generate meaningful channel names + ###need to rename labels to make other functions happy### + ###get our unique labels for sources and detectors### + unique_source_labels = [] + unique_detect_labels = [] + [unique_source_labels.append(label) for label in source_label if label not in unique_source_labels] + [unique_detect_labels.append(label) for label in detect_label if label not in unique_detect_labels] + + ###now let's label each channel in our data### + ###data is channels X timepoint where the first source_num rows correspond to### + ###the first detector, and each row within that group is a different source### + ###should note that current .mtg files contain channels for multiple data files### + ###going to move to have a single .mtg file per participant, condition, and montage### + ###combine coordinates and label our channels### + ###will label them based on ac, dc, and ph data### + boxy_coords = [] + boxy_labels = [] + data_types = ['AC','DC','Ph'] + total_chans = detect_num*source_num + for i_type in data_types: + for i_coord in range(len(source_coords[0:total_chans])): + boxy_coords.append(np.mean( + np.vstack((source_coords[i_coord], detect_coords[i_coord])), + axis=0).tolist() + source_coords[i_coord] + + detect_coords[i_coord] + [chan_wavelength[i_coord]] + [0] + [0]) + boxy_labels.append('S' + + str(unique_source_labels.index(source_label[i_coord])+1) + + '_D' + + str(unique_detect_labels.index(detect_label[i_coord])+1) + + ' ' + chan_wavelength[i_coord] + ' ' + i_type) + + ###montage only wants channel coords, so need to grab those, convert to### + ###array, then make a dict with labels### + for i_chan in range(len(boxy_coords)): + boxy_coords[i_chan] = np.asarray(boxy_coords[i_chan],dtype=np.float64) + + for i_chan in range(len(all_coords)): + all_coords[i_chan] = np.asarray(all_coords[i_chan],dtype=np.float64) + + all_chan_dict = dict(zip(all_labels,all_coords)) + + + ###make our montage### + montage_orig = mne.channels.make_dig_montage(ch_pos=all_chan_dict,coord_frame='head', + nasion = fiducial_coords[0], + lpa = fiducial_coords[1], + rpa = fiducial_coords[2]) + + ###for some reason make_dig_montage put our channels in a different order than what we input### + ###let's fix that. should be fine to just change coords and ch_names### + for i_chan in range(len(all_coords)): + montage_orig.dig[i_chan+3]['r'] = all_coords[i_chan] + montage_orig.ch_names[i_chan] = all_labels[i_chan] + + ###add an extra channel for our triggers for later### + boxy_labels.append('Markers') + + info = mne.create_info(boxy_labels,srate,ch_types='fnirs_raw') + info.update(dig=montage_orig.dig) + + # Set up digitization + # These are all in MNI coordinates, so let's transform them to + # the Neuromag head coordinate frame + ###get our fiducials and transform matrix from fsaverage### + subjects_dir = op.dirname(fetch_fsaverage()) + fid_path = op.join(subjects_dir, 'fsaverage', 'bem', 'fsaverage-fiducials.fif') + fiducials = read_fiducials(fid_path) + trans = coregister_fiducials(info, fiducials[0], tol=0.02) + + ###remake montage using the transformed coordinates### + all_coords_trans = apply_trans(trans,all_coords) + all_chan_dict_trans = dict(zip(all_labels,all_coords_trans)) + fiducial_coords_trans = apply_trans(trans,fiducial_coords) + + ###make our montage### + montage_trans = mne.channels.make_dig_montage(ch_pos=all_chan_dict_trans,coord_frame='head', + nasion = fiducial_coords_trans[0], + lpa = fiducial_coords_trans[1], + rpa = fiducial_coords_trans[2]) + + ###let's fix montage order ### + for i_chan in range(len(all_coords_trans)): + montage_trans.dig[i_chan+3]['r'] = all_coords_trans[i_chan] + montage_trans.ch_names[i_chan] = all_labels[i_chan] + + # Create mne structure + ###create info structure### + info = mne.create_info(boxy_labels,srate,ch_types='fnirs_raw') + ###add data type and channel wavelength to info### + info.update(dig=montage_trans.dig, trans=trans) + + # Store channel, source, and detector locations + # The channel location is stored in the first 3 entries of loc. + # The source location is stored in the second 3 entries of loc. + # The detector location is stored in the third 3 entries of loc. + # NIRx NIRSite uses MNI coordinates. + # Also encode the light frequency in the structure. + + ###place our coordinates and wavelengths for each channel### + for i_chan in range(len(boxy_labels)-1): + temp_chn = apply_trans(trans,boxy_coords[i_chan][0:3]) + temp_src = apply_trans(trans,boxy_coords[i_chan][3:6]) + temp_det = apply_trans(trans,boxy_coords[i_chan][6:9]) + temp_other = np.asarray(boxy_coords[i_chan][9:],dtype=np.float64) + info['chs'][i_chan]['loc'] = test = np.concatenate((temp_chn, temp_src, + temp_det, temp_other),axis=0) + info['chs'][-1]['loc'] = np.zeros((12,)) + + super(RawBOXY, self).__init__( + info, preload, filenames=[fname], last_samps=[last_sample], + raw_extras=[raw_extras], verbose=verbose) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a segment of data from a file. + """ + with open(boxy_file,'r') as data: + for i_line in data: + line_num += 1 + if '#DATA BEGINS' in i_line: + start_line = line_num + break + + raw_data = pd.read_csv(boxy_file, skiprows=start_line, sep='\t') + ###detectors, sources, and data types### + detectors = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', + 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', + 'Y', 'Z'] + data_types = ['AC','DC','Ph'] + sources = np.arange(1,source_num+1,1) + + ###since we can save boxy files in two different styles### + ###this will check to see which style the data is saved### + ###seems to also work with older boxy files### + if 'exmux' in raw_data.columns: + filetype = 'non-parsed' + + ###drop the last line as this is just '#DATA ENDS'### + raw_data = raw_data.drop([len(raw_data)-1]) + + ###store some extra info### + record = raw_data['record'].to_numpy() + exmux = raw_data['exmux'].to_numpy() + + ###make some empty variables to store our data### + raw_ac = np.zeros((detect_num*source_num,int(len(raw_data)/source_num))) + raw_dc = np.zeros((detect_num*source_num,int(len(raw_data)/source_num))) + raw_ph = np.zeros((detect_num*source_num,int(len(raw_data)/source_num))) + else: + filetype = 'parsed' + + ###drop the last line as this is just '#DATA ENDS'### + ###also drop the first line since this is empty### + raw_data = raw_data.drop([0,len(raw_data)-1]) + + ###make some empty variables to store our data### + raw_ac = np.zeros(((detect_num*source_num),len(raw_data))) + raw_dc = np.zeros(((detect_num*source_num),len(raw_data))) + raw_ph = np.zeros(((detect_num*source_num),len(raw_data))) + + ###store some extra data, might not need these though### + time = raw_data['time'].to_numpy() if 'time' in raw_data.columns else [] + time = raw_data['time'].to_numpy() if 'time' in raw_data.columns else [] + group = raw_data['group'].to_numpy() if 'group' in raw_data.columns else [] + step = raw_data['step'].to_numpy() if 'step' in raw_data.columns else [] + mark = raw_data['mark'].to_numpy() if 'mark' in raw_data.columns else [] + flag = raw_data['flag'].to_numpy() if 'flag' in raw_data.columns else [] + aux1 = raw_data['aux-1'].to_numpy() if 'aux-1' in raw_data.columns else [] + digaux = raw_data['digaux'].to_numpy() if 'digaux' in raw_data.columns else [] + bias = np.zeros((detect_num,len(raw_data))) + + ###loop through detectors### + for i_detect in detectors[0:detect_num]: + + ###older boxy files don't seem to keep track of detector bias### + ###probably due to specific boxy settings actually### + if 'bias-A' in raw_data.columns: + bias[detectors.index(i_detect),:] = raw_data['bias-' + i_detect].to_numpy() + + ###loop through data types### + for i_data in data_types: + ###loop through sources### + for i_source in sources: + ###where to store our data### + index_loc = detectors.index(i_detect)*source_num + (i_source-1) + ###need to treat our filetypes differently### + if filetype == 'non-parsed': + + ###filetype saves timepoints in groups### + ###this should account for that### + time_points = np.arange(i_source-1,int(record[-1])*source_num,source_num) + + ###determine which channel to look for### + channel = i_detect + '-' + i_data + + ###save our data based on data type### + if data_types.index(i_data) == 0: + raw_ac[index_loc,:] = raw_data[channel][time_points].to_numpy() + elif data_ty pes.index(i_data) == 1: + raw_dc[index_loc,:] = raw_data[channel][time_points].to_numpy() + elif data_types.index(i_data) == 2: + raw_ph[index_loc,:] = raw_data[channel][time_points].to_numpy() + elif filetype == 'parsed': + ###determine which channel to look for### + channel = i_detect + '-' + i_data + str(i_source) + + ###save our data based on data type### + if data_types.index(i_data) == 0: + raw_ac[index_loc,:] = raw_data[channel].to_numpy() + elif data_types.index(i_data) == 1: + raw_dc[index_loc,:] = raw_data[channel].to_numpy() + elif data_types.index(i_data) == 2: + raw_ph[index_loc,:] = raw_data[channel].to_numpy() + + ###now combine our data types into a single array with the data### + data = np.append(raw_ac, np.append(raw_dc, raw_ph, axis=0),axis=0) + + # Read triggers from event file + ###add our markers to the data array based on filetype### + if filetype == 'non-parsed': + if type(digaux) is list and digaux != []: + markers = digaux[np.arange(0,len(digaux),source_num)] + else: + markers = np.zeros(np.size(data,axis=1)) + elif filetype == 'parsed': + markers = digaux + data = np.vstack((data, markers)) + return data \ No newline at end of file diff --git a/mne/io/boxy/tests/__init__.py b/mne/io/boxy/tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/mne/io/boxy/tests/test_boxy.py b/mne/io/boxy/tests/test_boxy.py new file mode 100644 index 00000000000..ed4a75014fb --- /dev/null +++ b/mne/io/boxy/tests/test_boxy.py @@ -0,0 +1,226 @@ +# -*- coding: utf-8 -*- +# Authors: Robert Luke +# Eric Larson +# simplified BSD-3 license + +import os.path as op +import shutil + +import pytest +from numpy.testing import assert_allclose, assert_array_equal + +from mne.datasets.testing import data_path, requires_testing_data +from mne.io import read_raw_nirx +from mne.io.tests.test_raw import _test_raw_reader +from mne.transforms import apply_trans, _get_trans +from mne.utils import run_tests_if_main +from mne.preprocessing.nirs import source_detector_distances,\ + short_channels + +fname_nirx_15_0 = op.join(data_path(download=False), + 'NIRx', 'nirx_15_0_recording') +fname_nirx_15_2 = op.join(data_path(download=False), + 'NIRx', 'nirx_15_2_recording') +fname_nirx_15_2_short = op.join(data_path(download=False), + 'NIRx', 'nirx_15_2_recording_w_short') + + +@requires_testing_data +def test_nirx_15_2_short(): + """Test reading NIRX files.""" + raw = read_raw_nirx(fname_nirx_15_2_short, preload=True) + + # Test data import + assert raw._data.shape == (26, 145) + assert raw.info['sfreq'] == 12.5 + + # Test channel naming + assert raw.info['ch_names'][:4] == ["S1_D1 760", "S1_D1 850", + "S1_D9 760", "S1_D9 850"] + assert raw.info['ch_names'][24:26] == ["S5_D13 760", "S5_D13 850"] + + # Test frequency encoding + assert raw.info['chs'][0]['loc'][9] == 760 + assert raw.info['chs'][1]['loc'][9] == 850 + + # Test info import + assert raw.info['subject_info'] == dict(sex=1, first_name="MNE", + middle_name="Test", + last_name="Recording") + + # Test distance between optodes matches values from + # nirsite https://github.com/mne-tools/mne-testing-data/pull/51 + # step 4 figure 2 + allowed_distance_error = 0.0002 + distances = source_detector_distances(raw.info) + assert_allclose(distances[::2], [ + 0.0304, 0.0078, 0.0310, 0.0086, 0.0416, + 0.0072, 0.0389, 0.0075, 0.0558, 0.0562, + 0.0561, 0.0565, 0.0077], atol=allowed_distance_error) + + # Test which channels are short + # These are the ones marked as red at + # https://github.com/mne-tools/mne-testing-data/pull/51 step 4 figure 2 + is_short = short_channels(raw.info) + assert_array_equal(is_short[:9:2], [False, True, False, True, False]) + is_short = short_channels(raw.info, threshold=0.003) + assert_array_equal(is_short[:3:2], [False, False]) + is_short = short_channels(raw.info, threshold=50) + assert_array_equal(is_short[:3:2], [True, True]) + + # Test trigger events + assert_array_equal(raw.annotations.description, ['3.0', '2.0', '1.0']) + + # Test location of detectors + # The locations of detectors can be seen in the first + # figure on this page... + # https://github.com/mne-tools/mne-testing-data/pull/51 + # And have been manually copied below + # These values were reported in mm, but according to this page... + # https://mne.tools/stable/auto_tutorials/intro/plot_40_sensor_locations.html + # 3d locations should be specified in meters, so that's what's tested below + # Detector locations are stored in the third three loc values + allowed_dist_error = 0.0002 + locs = [ch['loc'][6:9] for ch in raw.info['chs']] + head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri') + mni_locs = apply_trans(head_mri_t, locs) + + assert raw.info['ch_names'][0][3:5] == 'D1' + assert_allclose( + mni_locs[0], [-0.0841, -0.0464, -0.0129], atol=allowed_dist_error) + + assert raw.info['ch_names'][4][3:5] == 'D3' + assert_allclose( + mni_locs[4], [0.0846, -0.0142, -0.0156], atol=allowed_dist_error) + + assert raw.info['ch_names'][8][3:5] == 'D2' + assert_allclose( + mni_locs[8], [0.0207, -0.1062, 0.0484], atol=allowed_dist_error) + + assert raw.info['ch_names'][12][3:5] == 'D4' + assert_allclose( + mni_locs[12], [-0.0196, 0.0821, 0.0275], atol=allowed_dist_error) + + assert raw.info['ch_names'][16][3:5] == 'D5' + assert_allclose( + mni_locs[16], [-0.0360, 0.0276, 0.0778], atol=allowed_dist_error) + + assert raw.info['ch_names'][19][3:5] == 'D6' + assert_allclose( + mni_locs[19], [0.0352, 0.0283, 0.0780], atol=allowed_dist_error) + + assert raw.info['ch_names'][21][3:5] == 'D7' + assert_allclose( + mni_locs[21], [0.0388, -0.0477, 0.0932], atol=allowed_dist_error) + + +@requires_testing_data +def test_encoding(tmpdir): + """Test NIRx encoding.""" + fname = str(tmpdir.join('latin')) + shutil.copytree(fname_nirx_15_2, fname) + hdr_fname = op.join(fname, 'NIRS-2019-10-02_003.hdr') + hdr = list() + with open(hdr_fname, 'rb') as fid: + hdr.extend(line for line in fid) + hdr[2] = b'Date="jeu. 13 f\xe9vr. 2020"\r\n' + with open(hdr_fname, 'wb') as fid: + for line in hdr: + fid.write(line) + # smoke test + read_raw_nirx(fname) + + +@requires_testing_data +def test_nirx_15_2(): + """Test reading NIRX files.""" + raw = read_raw_nirx(fname_nirx_15_2, preload=True) + + # Test data import + assert raw._data.shape == (64, 67) + assert raw.info['sfreq'] == 3.90625 + + # Test channel naming + assert raw.info['ch_names'][:4] == ["S1_D1 760", "S1_D1 850", + "S1_D10 760", "S1_D10 850"] + + # Test info import + assert raw.info['subject_info'] == dict(sex=1, first_name="TestRecording") + + # Test trigger events + assert_array_equal(raw.annotations.description, ['4.0', '6.0', '2.0']) + + # Test location of detectors + allowed_dist_error = 0.0002 + locs = [ch['loc'][6:9] for ch in raw.info['chs']] + head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri') + mni_locs = apply_trans(head_mri_t, locs) + + assert raw.info['ch_names'][0][3:5] == 'D1' + assert_allclose( + mni_locs[0], [-0.0292, 0.0852, -0.0142], atol=allowed_dist_error) + + assert raw.info['ch_names'][15][3:5] == 'D4' + assert_allclose( + mni_locs[15], [-0.0739, -0.0756, -0.0075], atol=allowed_dist_error) + + +@requires_testing_data +def test_nirx_15_0(): + """Test reading NIRX files.""" + raw = read_raw_nirx(fname_nirx_15_0, preload=True) + + # Test data import + assert raw._data.shape == (20, 92) + assert raw.info['sfreq'] == 6.25 + + # Test channel naming + assert raw.info['ch_names'][:12] == ["S1_D1 760", "S1_D1 850", + "S2_D2 760", "S2_D2 850", + "S3_D3 760", "S3_D3 850", + "S4_D4 760", "S4_D4 850", + "S5_D5 760", "S5_D5 850", + "S6_D6 760", "S6_D6 850"] + + # Test info import + assert raw.info['subject_info'] == {'first_name': 'NIRX', + 'last_name': 'Test', 'sex': '0'} + + # Test trigger events + assert_array_equal(raw.annotations.description, ['1.0', '2.0', '2.0']) + + # Test location of detectors + allowed_dist_error = 0.0002 + locs = [ch['loc'][6:9] for ch in raw.info['chs']] + head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri') + mni_locs = apply_trans(head_mri_t, locs) + + assert raw.info['ch_names'][0][3:5] == 'D1' + assert_allclose( + mni_locs[0], [0.0287, -0.1143, -0.0332], atol=allowed_dist_error) + + assert raw.info['ch_names'][15][3:5] == 'D8' + assert_allclose( + mni_locs[15], [-0.0693, -0.0480, 0.0657], atol=allowed_dist_error) + + # Test distance between optodes matches values from + allowed_distance_error = 0.0002 + distances = source_detector_distances(raw.info) + assert_allclose(distances[::2], [ + 0.0301, 0.0315, 0.0343, 0.0368, 0.0408, + 0.0399, 0.0393, 0.0367, 0.0336, 0.0447], atol=allowed_distance_error) + + +@requires_testing_data +@pytest.mark.parametrize('fname, boundary_decimal', ( + [fname_nirx_15_2_short, 1], + [fname_nirx_15_2, 0], + [fname_nirx_15_0, 0] +)) +def test_nirx_standard(fname, boundary_decimal): + """Test standard operations.""" + _test_raw_reader(read_raw_nirx, fname=fname, + boundary_decimal=boundary_decimal) # low fs + + +run_tests_if_main() From 03ef3a76e96be8b4dcfdad4af91cbd9fdb088c55 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Sun, 3 May 2020 16:28:39 -0700 Subject: [PATCH 045/167] fixing eog nonchange --- mne/preprocessing/eog.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/mne/preprocessing/eog.py b/mne/preprocessing/eog.py index 27d0ac6252c..873a95fea2a 100644 --- a/mne/preprocessing/eog.py +++ b/mne/preprocessing/eog.py @@ -18,7 +18,6 @@ def find_eog_events(raw, event_id=998, l_freq=1, h_freq=10, filter_length='10s', ch_name=None, tstart=0, reject_by_annotation=False, thresh=None, verbose=None): """Locate EOG artifacts. - Parameters ---------- raw : instance of Raw @@ -40,12 +39,10 @@ def find_eog_events(raw, event_id=998, l_freq=1, h_freq=10, thresh : float Threshold to trigger EOG event. %(verbose)s - Returns ------- eog_events : array Events. - See Also -------- create_eog_epochs @@ -165,7 +162,6 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, baseline=None, preload=True, reject_by_annotation=True, thresh=None, verbose=None): """Conveniently generate epochs around EOG artifact events. - Parameters ---------- raw : instance of Raw @@ -188,13 +184,11 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, Rejection parameters based on peak-to-peak amplitude. Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'. If reject is None then no rejection is done. Example:: - reject = dict(grad=4000e-13, # T / m (gradiometers) mag=4e-12, # T (magnetometers) eeg=40e-6, # V (EEG channels) eog=250e-6 # V (EOG channels) ) - flat : dict | None Rejection parameters based on flatness of signal. Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values @@ -215,22 +209,18 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, whose description begins with ``'bad'`` are not used for finding artifacts and epochs overlapping with them are rejected. If False, no rejection based on annotations is performed. - .. versionadded:: 0.14.0 thresh : float Threshold to trigger EOG event. %(verbose)s - Returns ------- eog_epochs : instance of Epochs Data epoched around EOG events. - See Also -------- find_eog_events compute_proj_eog - Notes ----- Filtering is only applied to the EOG channel while finding events. From a4882b23a30e5dfd77b51c5dd87e177240f873af Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Sun, 3 May 2020 16:29:53 -0700 Subject: [PATCH 046/167] fix --- mne/preprocessing/eog.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/mne/preprocessing/eog.py b/mne/preprocessing/eog.py index 873a95fea2a..27d0ac6252c 100644 --- a/mne/preprocessing/eog.py +++ b/mne/preprocessing/eog.py @@ -18,6 +18,7 @@ def find_eog_events(raw, event_id=998, l_freq=1, h_freq=10, filter_length='10s', ch_name=None, tstart=0, reject_by_annotation=False, thresh=None, verbose=None): """Locate EOG artifacts. + Parameters ---------- raw : instance of Raw @@ -39,10 +40,12 @@ def find_eog_events(raw, event_id=998, l_freq=1, h_freq=10, thresh : float Threshold to trigger EOG event. %(verbose)s + Returns ------- eog_events : array Events. + See Also -------- create_eog_epochs @@ -162,6 +165,7 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, baseline=None, preload=True, reject_by_annotation=True, thresh=None, verbose=None): """Conveniently generate epochs around EOG artifact events. + Parameters ---------- raw : instance of Raw @@ -184,11 +188,13 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, Rejection parameters based on peak-to-peak amplitude. Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'. If reject is None then no rejection is done. Example:: + reject = dict(grad=4000e-13, # T / m (gradiometers) mag=4e-12, # T (magnetometers) eeg=40e-6, # V (EEG channels) eog=250e-6 # V (EOG channels) ) + flat : dict | None Rejection parameters based on flatness of signal. Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values @@ -209,18 +215,22 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, whose description begins with ``'bad'`` are not used for finding artifacts and epochs overlapping with them are rejected. If False, no rejection based on annotations is performed. + .. versionadded:: 0.14.0 thresh : float Threshold to trigger EOG event. %(verbose)s + Returns ------- eog_epochs : instance of Epochs Data epoched around EOG events. + See Also -------- find_eog_events compute_proj_eog + Notes ----- Filtering is only applied to the EOG channel while finding events. From 4ac590d2ddda296166b36374797b3452b5a60964 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Sun, 3 May 2020 16:30:25 -0700 Subject: [PATCH 047/167] fix --- mne/preprocessing/eog.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mne/preprocessing/eog.py b/mne/preprocessing/eog.py index 27d0ac6252c..9c2bad50d4e 100644 --- a/mne/preprocessing/eog.py +++ b/mne/preprocessing/eog.py @@ -247,4 +247,5 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, tmax=tmax, proj=False, reject=reject, flat=flat, picks=picks, baseline=baseline, preload=preload, reject_by_annotation=reject_by_annotation) - return eog_epochs \ No newline at end of file + return eog_epochs + \ No newline at end of file From 8121cdb0f1bce5030a7496b15ec5c807a7981865 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Sun, 3 May 2020 16:35:39 -0700 Subject: [PATCH 048/167] rebase fix? --- mne/preprocessing/eog.py | 139 ++++++++++++++------------------------- 1 file changed, 51 insertions(+), 88 deletions(-) diff --git a/mne/preprocessing/eog.py b/mne/preprocessing/eog.py index 9c2bad50d4e..d9aae78bf49 100644 --- a/mne/preprocessing/eog.py +++ b/mne/preprocessing/eog.py @@ -1,4 +1,4 @@ -# Authors: Alexandre Gramfort +# Authors: Alexandre Gramfort # Denis Engemann # Eric Larson # @@ -6,18 +6,19 @@ import numpy as np -from ._peak_finder import peak_finder +from .peak_finder import peak_finder from .. import pick_types, pick_channels -from ..utils import logger, verbose, _pl -from ..filter import filter_data +from ..utils import logger, verbose +from ..filter import band_pass_filter from ..epochs import Epochs +from ..externals.six import string_types @verbose def find_eog_events(raw, event_id=998, l_freq=1, h_freq=10, filter_length='10s', ch_name=None, tstart=0, - reject_by_annotation=False, thresh=None, verbose=None): - """Locate EOG artifacts. + verbose=None): + """Locate EOG artifacts Parameters ---------- @@ -26,80 +27,62 @@ def find_eog_events(raw, event_id=998, l_freq=1, h_freq=10, event_id : int The index to assign to found events. l_freq : float - Low cut-off frequency to apply to the EOG channel in Hz. + Low cut-off frequency in Hz. h_freq : float - High cut-off frequency to apply to the EOG channel in Hz. + High cut-off frequency in Hz. filter_length : str | int | None Number of taps to use for filtering. - ch_name : str | None - If not None, use specified channel(s) for EOG. + ch_name: str | None + If not None, use specified channel(s) for EOG tstart : float Start detection after tstart seconds. - reject_by_annotation : bool - Whether to omit data that is annotated as bad. - thresh : float - Threshold to trigger EOG event. - %(verbose)s + verbose : bool, str, int, or None + If not None, override default verbose level (see mne.verbose). Returns ------- eog_events : array Events. - - See Also - -------- - create_eog_epochs - compute_proj_eog """ + # Getting EOG Channel eog_inds = _get_eog_channel_index(ch_name, raw) logger.info('EOG channel index for this subject is: %s' % eog_inds) - # Reject bad segments. - reject_by_annotation = 'omit' if reject_by_annotation else None - eog, times = raw.get_data(picks=eog_inds, - reject_by_annotation=reject_by_annotation, - return_times=True) - times = times * raw.info['sfreq'] + raw.first_samp + eog, _ = raw[eog_inds, :] eog_events = _find_eog_events(eog, event_id=event_id, l_freq=l_freq, h_freq=h_freq, sampling_rate=raw.info['sfreq'], first_samp=raw.first_samp, filter_length=filter_length, - tstart=tstart, thresh=thresh, - verbose=verbose) - # Map times to corresponding samples. - eog_events[:, 0] = np.round(times[eog_events[:, 0] - - raw.first_samp]).astype(int) + tstart=tstart) + return eog_events -@verbose def _find_eog_events(eog, event_id, l_freq, h_freq, sampling_rate, first_samp, - filter_length='10s', tstart=0., thresh=None, - verbose=None): - """Find EOG events.""" + filter_length='10s', tstart=0.): + """Helper function""" + logger.info('Filtering the data to remove DC offset to help ' 'distinguish blinks from saccades') # filtering to remove dc offset so that we know which is blink and saccades - # hardcode verbose=False to suppress filter param messages (since this - # filter is not under user control) fmax = np.minimum(45, sampling_rate / 2.0 - 0.75) # protect Nyquist - filteog = np.array([filter_data( - x, sampling_rate, 2, fmax, None, filter_length, 0.5, 0.5, - phase='zero-double', fir_window='hann', fir_design='firwin2', - verbose=False) for x in eog]) + filteog = np.array([band_pass_filter( + x, sampling_rate, 2, fmax, filter_length=filter_length, + l_trans_bandwidth=0.5, h_trans_bandwidth=0.5, phase='zero-double', + fir_window='hann') for x in eog]) temp = np.sqrt(np.sum(filteog ** 2, axis=1)) indexmax = np.argmax(temp) # easier to detect peaks with filtering. - filteog = filter_data( - eog[indexmax], sampling_rate, l_freq, h_freq, None, - filter_length, 0.5, 0.5, phase='zero-double', fir_window='hann', - fir_design='firwin2') + filteog = band_pass_filter( + eog[indexmax], sampling_rate, l_freq, h_freq, + filter_length=filter_length, l_trans_bandwidth=0.5, + h_trans_bandwidth=0.5, phase='zero-double', fir_window='hann') # detecting eog blinks and generating event file @@ -108,11 +91,9 @@ def _find_eog_events(eog, event_id, l_freq, h_freq, sampling_rate, first_samp, temp = filteog - np.mean(filteog) n_samples_start = int(sampling_rate * tstart) if np.abs(np.max(temp)) > np.abs(np.min(temp)): - eog_events, _ = peak_finder(filteog[n_samples_start:], - thresh, extrema=1) + eog_events, _ = peak_finder(filteog[n_samples_start:], extrema=1) else: - eog_events, _ = peak_finder(filteog[n_samples_start:], - thresh, extrema=-1) + eog_events, _ = peak_finder(filteog[n_samples_start:], extrema=-1) eog_events += n_samples_start n_events = len(eog_events) @@ -125,8 +106,7 @@ def _find_eog_events(eog, event_id, l_freq, h_freq, sampling_rate, first_samp, def _get_eog_channel_index(ch_name, inst): - """Get EOG channel index.""" - if isinstance(ch_name, str): + if isinstance(ch_name, string_types): # Check if multiple EOG Channels if ',' in ch_name: ch_name = ch_name.split(',') @@ -139,7 +119,8 @@ def _get_eog_channel_index(ch_name, inst): raise ValueError('%s not in channel list' % ch_name) else: logger.info('Using channel %s as EOG channel%s' % ( - " and ".join(ch_name), _pl(eog_inds))) + " and ".join(ch_name), + '' if len(eog_inds) < 2 else 's')) elif ch_name is None: eog_inds = pick_types(inst.info, meg=False, eeg=False, stim=False, @@ -160,30 +141,32 @@ def _get_eog_channel_index(ch_name, inst): @verbose -def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, - tmax=0.5, l_freq=1, h_freq=10, reject=None, flat=None, - baseline=None, preload=True, reject_by_annotation=True, - thresh=None, verbose=None): - """Conveniently generate epochs around EOG artifact events. +def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, + tmin=-0.5, tmax=0.5, l_freq=1, h_freq=10, + reject=None, flat=None, baseline=None, + preload=True, verbose=None): + """Conveniently generate epochs around EOG artifact events Parameters ---------- raw : instance of Raw - The raw data. + The raw data ch_name : str The name of the channel to use for EOG peak detection. The argument is mandatory if the dataset contains no EOG channels. event_id : int - The index to assign to found events. - %(picks_all)s + The index to assign to found events + picks : array-like of int | None (default) + Indices of channels to include (if None, all channels + are used). tmin : float Start time before event. tmax : float End time after event. l_freq : float - Low pass frequency to apply to the EOG channel while finding events. + Low pass frequency. h_freq : float - High pass frequency to apply to the EOG channel while finding events. + High pass frequency. reject : dict | None Rejection parameters based on peak-to-peak amplitude. Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'. @@ -206,41 +189,20 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, the interval is between "a (s)" and "b (s)". If a is None the beginning of the data is used and if b is None then b is set to the end of the interval. - If baseline is equal to (None, None) all the time + If baseline is equal ot (None, None) all the time interval is used. If None, no correction is applied. preload : bool Preload epochs or not. - reject_by_annotation : bool - Whether to reject based on annotations. If True (default), segments - whose description begins with ``'bad'`` are not used for finding - artifacts and epochs overlapping with them are rejected. If False, no - rejection based on annotations is performed. - - .. versionadded:: 0.14.0 - thresh : float - Threshold to trigger EOG event. - %(verbose)s + verbose : bool, str, int, or None + If not None, override default verbose level (see mne.verbose). Returns ------- eog_epochs : instance of Epochs Data epoched around EOG events. - - See Also - -------- - find_eog_events - compute_proj_eog - - Notes - ----- - Filtering is only applied to the EOG channel while finding events. - The resulting ``eog_epochs`` will have no filtering applied (i.e., have - the same filter properties as the input ``raw`` instance). """ events = find_eog_events(raw, ch_name=ch_name, event_id=event_id, - l_freq=l_freq, h_freq=h_freq, - reject_by_annotation=reject_by_annotation, - thresh=thresh) + l_freq=l_freq, h_freq=h_freq) # create epochs around EOG events eog_epochs = Epochs(raw, events=events, event_id=event_id, tmin=tmin, @@ -248,4 +210,5 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, picks=picks, baseline=baseline, preload=preload, reject_by_annotation=reject_by_annotation) return eog_epochs - \ No newline at end of file + + From 3e1f72a69db4495d4239e74da0e80b1e6a54a3af Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Sun, 3 May 2020 16:37:48 -0700 Subject: [PATCH 049/167] fix? --- mne/preprocessing/eog.py | 138 ++++++++++++++++++++++++--------------- 1 file changed, 87 insertions(+), 51 deletions(-) diff --git a/mne/preprocessing/eog.py b/mne/preprocessing/eog.py index d9aae78bf49..9481eef862d 100644 --- a/mne/preprocessing/eog.py +++ b/mne/preprocessing/eog.py @@ -1,4 +1,4 @@ -# Authors: Alexandre Gramfort +# Authors: Alexandre Gramfort # Denis Engemann # Eric Larson # @@ -6,19 +6,18 @@ import numpy as np -from .peak_finder import peak_finder +from ._peak_finder import peak_finder from .. import pick_types, pick_channels -from ..utils import logger, verbose -from ..filter import band_pass_filter +from ..utils import logger, verbose, _pl +from ..filter import filter_data from ..epochs import Epochs -from ..externals.six import string_types @verbose def find_eog_events(raw, event_id=998, l_freq=1, h_freq=10, filter_length='10s', ch_name=None, tstart=0, - verbose=None): - """Locate EOG artifacts + reject_by_annotation=False, thresh=None, verbose=None): + """Locate EOG artifacts. Parameters ---------- @@ -27,62 +26,80 @@ def find_eog_events(raw, event_id=998, l_freq=1, h_freq=10, event_id : int The index to assign to found events. l_freq : float - Low cut-off frequency in Hz. + Low cut-off frequency to apply to the EOG channel in Hz. h_freq : float - High cut-off frequency in Hz. + High cut-off frequency to apply to the EOG channel in Hz. filter_length : str | int | None Number of taps to use for filtering. - ch_name: str | None - If not None, use specified channel(s) for EOG + ch_name : str | None + If not None, use specified channel(s) for EOG. tstart : float Start detection after tstart seconds. - verbose : bool, str, int, or None - If not None, override default verbose level (see mne.verbose). + reject_by_annotation : bool + Whether to omit data that is annotated as bad. + thresh : float + Threshold to trigger EOG event. + %(verbose)s Returns ------- eog_events : array Events. - """ + See Also + -------- + create_eog_epochs + compute_proj_eog + """ # Getting EOG Channel eog_inds = _get_eog_channel_index(ch_name, raw) logger.info('EOG channel index for this subject is: %s' % eog_inds) - eog, _ = raw[eog_inds, :] + # Reject bad segments. + reject_by_annotation = 'omit' if reject_by_annotation else None + eog, times = raw.get_data(picks=eog_inds, + reject_by_annotation=reject_by_annotation, + return_times=True) + times = times * raw.info['sfreq'] + raw.first_samp eog_events = _find_eog_events(eog, event_id=event_id, l_freq=l_freq, h_freq=h_freq, sampling_rate=raw.info['sfreq'], first_samp=raw.first_samp, filter_length=filter_length, - tstart=tstart) - + tstart=tstart, thresh=thresh, + verbose=verbose) + # Map times to corresponding samples. + eog_events[:, 0] = np.round(times[eog_events[:, 0] - + raw.first_samp]).astype(int) return eog_events +@verbose def _find_eog_events(eog, event_id, l_freq, h_freq, sampling_rate, first_samp, - filter_length='10s', tstart=0.): - """Helper function""" - + filter_length='10s', tstart=0., thresh=None, + verbose=None): + """Find EOG events.""" logger.info('Filtering the data to remove DC offset to help ' 'distinguish blinks from saccades') # filtering to remove dc offset so that we know which is blink and saccades + # hardcode verbose=False to suppress filter param messages (since this + # filter is not under user control) fmax = np.minimum(45, sampling_rate / 2.0 - 0.75) # protect Nyquist - filteog = np.array([band_pass_filter( - x, sampling_rate, 2, fmax, filter_length=filter_length, - l_trans_bandwidth=0.5, h_trans_bandwidth=0.5, phase='zero-double', - fir_window='hann') for x in eog]) + filteog = np.array([filter_data( + x, sampling_rate, 2, fmax, None, filter_length, 0.5, 0.5, + phase='zero-double', fir_window='hann', fir_design='firwin2', + verbose=False) for x in eog]) temp = np.sqrt(np.sum(filteog ** 2, axis=1)) indexmax = np.argmax(temp) # easier to detect peaks with filtering. - filteog = band_pass_filter( - eog[indexmax], sampling_rate, l_freq, h_freq, - filter_length=filter_length, l_trans_bandwidth=0.5, - h_trans_bandwidth=0.5, phase='zero-double', fir_window='hann') + filteog = filter_data( + eog[indexmax], sampling_rate, l_freq, h_freq, None, + filter_length, 0.5, 0.5, phase='zero-double', fir_window='hann', + fir_design='firwin2') # detecting eog blinks and generating event file @@ -91,9 +108,11 @@ def _find_eog_events(eog, event_id, l_freq, h_freq, sampling_rate, first_samp, temp = filteog - np.mean(filteog) n_samples_start = int(sampling_rate * tstart) if np.abs(np.max(temp)) > np.abs(np.min(temp)): - eog_events, _ = peak_finder(filteog[n_samples_start:], extrema=1) + eog_events, _ = peak_finder(filteog[n_samples_start:], + thresh, extrema=1) else: - eog_events, _ = peak_finder(filteog[n_samples_start:], extrema=-1) + eog_events, _ = peak_finder(filteog[n_samples_start:], + thresh, extrema=-1) eog_events += n_samples_start n_events = len(eog_events) @@ -106,7 +125,8 @@ def _find_eog_events(eog, event_id, l_freq, h_freq, sampling_rate, first_samp, def _get_eog_channel_index(ch_name, inst): - if isinstance(ch_name, string_types): + """Get EOG channel index.""" + if isinstance(ch_name, str): # Check if multiple EOG Channels if ',' in ch_name: ch_name = ch_name.split(',') @@ -119,8 +139,7 @@ def _get_eog_channel_index(ch_name, inst): raise ValueError('%s not in channel list' % ch_name) else: logger.info('Using channel %s as EOG channel%s' % ( - " and ".join(ch_name), - '' if len(eog_inds) < 2 else 's')) + " and ".join(ch_name), _pl(eog_inds))) elif ch_name is None: eog_inds = pick_types(inst.info, meg=False, eeg=False, stim=False, @@ -141,32 +160,30 @@ def _get_eog_channel_index(ch_name, inst): @verbose -def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, - tmin=-0.5, tmax=0.5, l_freq=1, h_freq=10, - reject=None, flat=None, baseline=None, - preload=True, verbose=None): - """Conveniently generate epochs around EOG artifact events +def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, + tmax=0.5, l_freq=1, h_freq=10, reject=None, flat=None, + baseline=None, preload=True, reject_by_annotation=True, + thresh=None, verbose=None): + """Conveniently generate epochs around EOG artifact events. Parameters ---------- raw : instance of Raw - The raw data + The raw data. ch_name : str The name of the channel to use for EOG peak detection. The argument is mandatory if the dataset contains no EOG channels. event_id : int - The index to assign to found events - picks : array-like of int | None (default) - Indices of channels to include (if None, all channels - are used). + The index to assign to found events. + %(picks_all)s tmin : float Start time before event. tmax : float End time after event. l_freq : float - Low pass frequency. + Low pass frequency to apply to the EOG channel while finding events. h_freq : float - High pass frequency. + High pass frequency to apply to the EOG channel while finding events. reject : dict | None Rejection parameters based on peak-to-peak amplitude. Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'. @@ -189,20 +206,41 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, the interval is between "a (s)" and "b (s)". If a is None the beginning of the data is used and if b is None then b is set to the end of the interval. - If baseline is equal ot (None, None) all the time + If baseline is equal to (None, None) all the time interval is used. If None, no correction is applied. preload : bool Preload epochs or not. - verbose : bool, str, int, or None - If not None, override default verbose level (see mne.verbose). + reject_by_annotation : bool + Whether to reject based on annotations. If True (default), segments + whose description begins with ``'bad'`` are not used for finding + artifacts and epochs overlapping with them are rejected. If False, no + rejection based on annotations is performed. + + .. versionadded:: 0.14.0 + thresh : float + Threshold to trigger EOG event. + %(verbose)s Returns ------- eog_epochs : instance of Epochs Data epoched around EOG events. + + See Also + -------- + find_eog_events + compute_proj_eog + + Notes + ----- + Filtering is only applied to the EOG channel while finding events. + The resulting ``eog_epochs`` will have no filtering applied (i.e., have + the same filter properties as the input ``raw`` instance). """ events = find_eog_events(raw, ch_name=ch_name, event_id=event_id, - l_freq=l_freq, h_freq=h_freq) + l_freq=l_freq, h_freq=h_freq, + reject_by_annotation=reject_by_annotation, + thresh=thresh) # create epochs around EOG events eog_epochs = Epochs(raw, events=events, event_id=event_id, tmin=tmin, @@ -210,5 +248,3 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, picks=picks, baseline=baseline, preload=preload, reject_by_annotation=reject_by_annotation) return eog_epochs - - From 1dfd09f27c0e765cfeb346501b3b7babdd59a1a6 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Sun, 3 May 2020 20:50:56 -0700 Subject: [PATCH 050/167] added dataset import information --- mne/datasets/boxy_example/__init__.py | 3 + mne/datasets/boxy_example/boxy_example.py | 30 ++ mne/datasets/utils.py | 10 +- .../preprocessing/plot_80_boxy_processing.py | 331 ++++++++++++++++++ 4 files changed, 372 insertions(+), 2 deletions(-) create mode 100644 mne/datasets/boxy_example/__init__.py create mode 100644 mne/datasets/boxy_example/boxy_example.py create mode 100644 tutorials/preprocessing/plot_80_boxy_processing.py diff --git a/mne/datasets/boxy_example/__init__.py b/mne/datasets/boxy_example/__init__.py new file mode 100644 index 00000000000..a90c5723ce8 --- /dev/null +++ b/mne/datasets/boxy_example/__init__.py @@ -0,0 +1,3 @@ +"""fNIRS motor dataset.""" + +from .boxy_example import data_path, has_boxy_example_data, get_version diff --git a/mne/datasets/boxy_example/boxy_example.py b/mne/datasets/boxy_example/boxy_example.py new file mode 100644 index 00000000000..3aa114aa3eb --- /dev/null +++ b/mne/datasets/boxy_example/boxy_example.py @@ -0,0 +1,30 @@ +# Authors: Eric Larson +# License: BSD Style. + +from functools import partial + +from ...utils import verbose +from ..utils import (has_dataset, _data_path, _data_path_doc, + _get_version, _version_doc) + + +has_boxy_example_data = partial(has_dataset, name='boxy_example') + + +@verbose +def data_path(path=None, force_update=False, update_path=True, download=True, + verbose=None): # noqa: D103 + return _data_path(path=path, force_update=force_update, + update_path=update_path, name='boxy_example', + download=download) + + +data_path.__doc__ = _data_path_doc.format(name='boxy_example', + conf='MNE_DATASETS_BOXY_EXAMPLE_PATH') + + +def get_version(): # noqa: D103 + return _get_version('boxy_example') + + +get_version.__doc__ = _version_doc.format(name='boxy_example') diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py index 542ceb17231..841fac05a17 100644 --- a/mne/datasets/utils.py +++ b/mne/datasets/utils.py @@ -226,6 +226,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, 'testing': 'MNE_DATASETS_TESTING_PATH', 'multimodal': 'MNE_DATASETS_MULTIMODAL_PATH', 'fnirs_motor': 'MNE_DATASETS_FNIRS_MOTOR_PATH', + 'boxy_example': 'MNE_DATASETS_BOXY_EXAMPLE_PATH', 'opm': 'MNE_DATASETS_OPM_PATH', 'visual_92_categories': 'MNE_DATASETS_VISUAL_92_CATEGORIES_PATH', 'kiloword': 'MNE_DATASETS_KILOWORD_PATH', @@ -263,6 +264,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, 'tar.gz/%s' % releases['testing'], multimodal='https://ndownloader.figshare.com/files/5999598', fnirs_motor='https://osf.io/dj3eh/download?version=1', + boxy_example='https://osf.io/hksme/download?version=1', opm='https://osf.io/p6ae7/download?version=2', visual_92_categories=[ 'https://osf.io/8ejrs/download?version=1', @@ -281,6 +283,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, mtrf='mTRF_1.5.zip', multimodal='MNE-multimodal-data.tar.gz', fnirs_motor='MNE-fNIRS-motor-data.tgz', + boxy_example='MNE-BOXY-example-data.tgz', opm='MNE-OPM-data.tar.gz', sample='MNE-sample-data-processed.tar.gz', somato='MNE-somato-data.tar.gz', @@ -325,6 +328,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, testing='1ef691944239411b869b3ed2f40a69fe', multimodal='26ec847ae9ab80f58f204d09e2c08367', fnirs_motor='c4935d19ddab35422a69f3326a01fef8', + boxy_example='b3793334548b7ba04c1b767c66117414', opm='370ad1dcfd5c47e029e692c85358a374', visual_92_categories=['74f50bbeb65740903eadc229c9fa759f', '203410a98afc9df9ae8ba9f933370e20'], @@ -523,7 +527,7 @@ def has_dataset(name): Returns ------- - has : bool + : bool True if the dataset is present. """ name = 'spm' if name == 'spm_face' else name @@ -542,6 +546,7 @@ def has_dataset(name): 'spm': 'MNE-spm-face', 'multimodal': 'MNE-multimodal-data', 'fnirs_motor': 'MNE-fNIRS-motor-data', + 'boxy_example': 'MNE-BOXY-example-data', 'opm': 'MNE-OPM-data', 'testing': 'MNE-testing-data', 'visual_92_categories': 'MNE-visual_92_categories-data', @@ -569,7 +574,7 @@ def _download_all_example_data(verbose=True): from . import (sample, testing, misc, spm_face, somato, brainstorm, eegbci, multimodal, opm, hf_sef, mtrf, fieldtrip_cmc, kiloword, phantom_4dbti, sleep_physionet, limo, - fnirs_motor) + fnirs_motor, boxy_example) sample_path = sample.data_path() testing.data_path() misc.data_path() @@ -578,6 +583,7 @@ def _download_all_example_data(verbose=True): hf_sef.data_path() multimodal.data_path() fnirs_motor.data_path() + boxy_example.data_path() opm.data_path() mtrf.data_path() fieldtrip_cmc.data_path() diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py new file mode 100644 index 00000000000..6579f4ddc09 --- /dev/null +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -0,0 +1,331 @@ +""" +.. _tut-fnirs-processing: + +Preprocessing functional near-infrared spectroscopy (fNIRS) data +================================================================ + +This tutorial covers how to convert functional near-infrared spectroscopy +(fNIRS) data from raw measurements to relative oxyhaemoglobin (HbO) and +deoxyhaemoglobin (HbR) concentration. + +.. contents:: Page contents + :local: + :depth: 2 + +Here we will work with the :ref:`fNIRS motor data `. +""" +# sphinx_gallery_thumbnail_number = 1 + +import os +import numpy as np +import matplotlib.pyplot as plt +from itertools import compress + +import mne + + +boxy_data_folder = mne.datasets.boxy_example.data_path() +boxy_raw_dir = os.path.join(fnirs_data_folder, 'Participant-1') +raw_intensity = mne.io.read_raw_boxy(boxy_raw_dir, verbose=True).load_data() + + +# ############################################################################### +# # View location of sensors over brain surface +# # ------------------------------------------- +# # +# # Here we validate that the location of sources-detector pairs and channels +# # are in the expected locations. Source-detector pairs are shown as lines +# # between the optodes, channels (the mid point of source-detector pairs) are +# # shown as dots. + +# subjects_dir = mne.datasets.sample.data_path() + '/subjects' + +# fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') +# fig = mne.viz.plot_alignment(raw_intensity.info, show_axes=True, +# subject='fsaverage', +# trans='fsaverage', surfaces=['brain'], +# fnirs=['channels', 'pairs'], +# subjects_dir=subjects_dir, fig=fig) +# mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) + + +# ############################################################################### +# # Selecting channels appropriate for detecting neural responses +# # ------------------------------------------------------------- +# # +# # First we remove channels that are too close together (short channels) to +# # detect a neural response (less than 1 cm distance between optodes). +# # These short channels can be seen in the figure above. +# # To achieve this we pick all the channels that are not considered to be short. + +# picks = mne.pick_types(raw_intensity.info, meg=False, fnirs=True) +# dists = mne.preprocessing.nirs.source_detector_distances( +# raw_intensity.info, picks=picks) +# raw_intensity.pick(picks[dists > 0.01]) +# raw_intensity.plot(n_channels=len(raw_intensity.ch_names), +# duration=500, show_scrollbars=False) + + +# ############################################################################### +# # Converting from raw intensity to optical density +# # ------------------------------------------------ +# # +# # The raw intensity values are then converted to optical density. + +# raw_od = mne.preprocessing.nirs.optical_density(raw_intensity) +# raw_od.plot(n_channels=len(raw_od.ch_names), +# duration=500, show_scrollbars=False) + + +# ############################################################################### +# # Evaluating the quality of the data +# # ---------------------------------- +# # +# # At this stage we can quantify the quality of the coupling +# # between the scalp and the optodes using the scalp coupling index. This +# # method looks for the presence of a prominent synchronous signal in the +# # frequency range of cardiac signals across both photodetected signals. +# # +# # In this example the data is clean and the coupling is good for all +# # channels, so we will not mark any channels as bad based on the scalp +# # coupling index. + +# sci = mne.preprocessing.nirs.scalp_coupling_index(raw_od) +# fig, ax = plt.subplots() +# ax.hist(sci) +# ax.set(xlabel='Scalp Coupling Index', ylabel='Count', xlim=[0, 1]) + + +# ############################################################################### +# # In this example we will mark all channels with a SCI less than 0.5 as bad +# # (this dataset is quite clean, so no channels are marked as bad). + +# raw_od.info['bads'] = list(compress(raw_od.ch_names, sci < 0.5)) + + +# ############################################################################### +# # At this stage it is appropriate to inspect your data +# # (for instructions on how to use the interactive data visualisation tool +# # see :ref:`tut-visualize-raw`) +# # to ensure that channels with poor scalp coupling have been removed. +# # If your data contains lots of artifacts you may decide to apply +# # artifact reduction techniques as described in :ref:`ex-fnirs-artifacts`. + + +# ############################################################################### +# # Converting from optical density to haemoglobin +# # ---------------------------------------------- +# # +# # Next we convert the optical density data to haemoglobin concentration using +# # the modified Beer-Lambert law. + +# raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od) +# raw_haemo.plot(n_channels=len(raw_haemo.ch_names), +# duration=500, show_scrollbars=False) + + +# ############################################################################### +# # Removing heart rate from signal +# # ------------------------------- +# # +# # The haemodynamic response has frequency content predominantly below 0.5 Hz. +# # An increase in activity around 1 Hz can be seen in the data that is due to +# # the person's heart beat and is unwanted. So we use a low pass filter to +# # remove this. A high pass filter is also included to remove slow drifts +# # in the data. + +# fig = raw_haemo.plot_psd(average=True) +# fig.suptitle('Before filtering', weight='bold', size='x-large') +# fig.subplots_adjust(top=0.88) +# raw_haemo = raw_haemo.filter(0.05, 0.7, h_trans_bandwidth=0.2, +# l_trans_bandwidth=0.02) +# fig = raw_haemo.plot_psd(average=True) +# fig.suptitle('After filtering', weight='bold', size='x-large') +# fig.subplots_adjust(top=0.88) + +# ############################################################################### +# # Extract epochs +# # -------------- +# # +# # Now that the signal has been converted to relative haemoglobin concentration, +# # and the unwanted heart rate component has been removed, we can extract epochs +# # related to each of the experimental conditions. +# # +# # First we extract the events of interest and visualise them to ensure they are +# # correct. + +# events, _ = mne.events_from_annotations(raw_haemo, event_id={'1.0': 1, +# '2.0': 2, +# '3.0': 3}) +# event_dict = {'Control': 1, 'Tapping/Left': 2, 'Tapping/Right': 3} +# fig = mne.viz.plot_events(events, event_id=event_dict, +# sfreq=raw_haemo.info['sfreq']) +# fig.subplots_adjust(right=0.7) # make room for the legend + + +# ############################################################################### +# # Next we define the range of our epochs, the rejection criteria, +# # baseline correction, and extract the epochs. We visualise the log of which +# # epochs were dropped. + +# reject_criteria = dict(hbo=80e-6) +# tmin, tmax = -5, 15 + +# epochs = mne.Epochs(raw_haemo, events, event_id=event_dict, +# tmin=tmin, tmax=tmax, +# reject=reject_criteria, reject_by_annotation=True, +# proj=True, baseline=(None, 0), preload=True, +# detrend=None, verbose=True) +# epochs.plot_drop_log() + + +# ############################################################################### +# # View consistency of responses across trials +# # ------------------------------------------- +# # +# # Now we can view the haemodynamic response for our tapping condition. +# # We visualise the response for both the oxy- and deoxyhaemoglobin, and +# # observe the expected peak in HbO at around 6 seconds consistently across +# # trials, and the consistent dip in HbR that is slightly delayed relative to +# # the HbO peak. + +# epochs['Tapping'].plot_image(combine='mean', vmin=-30, vmax=30, +# ts_args=dict(ylim=dict(hbo=[-15, 15], +# hbr=[-15, 15]))) + + +# ############################################################################### +# # We can also view the epoched data for the control condition and observe +# # that it does not show the expected morphology. + +# epochs['Control'].plot_image(combine='mean', vmin=-30, vmax=30, +# ts_args=dict(ylim=dict(hbo=[-15, 15], +# hbr=[-15, 15]))) + + +# ############################################################################### +# # View consistency of responses across channels +# # --------------------------------------------- +# # +# # Similarly we can view how consistent the response is across the optode +# # pairs that we selected. All the channels in this data are located over the +# # motor cortex, and all channels show a similar pattern in the data. + +# fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(15, 6)) +# clims = dict(hbo=[-20, 20], hbr=[-20, 20]) +# epochs['Control'].average().plot_image(axes=axes[:, 0], clim=clims) +# epochs['Tapping'].average().plot_image(axes=axes[:, 1], clim=clims) +# for column, condition in enumerate(['Control', 'Tapping']): +# for ax in axes[:, column]: +# ax.set_title('{}: {}'.format(condition, ax.get_title())) + + +# ############################################################################### +# # Plot standard fNIRS response image +# # ---------------------------------- +# # +# # Next we generate the most common visualisation of fNIRS data: plotting +# # both the HbO and HbR on the same figure to illustrate the relation between +# # the two signals. + +# evoked_dict = {'Tapping/HbO': epochs['Tapping'].average(picks='hbo'), +# 'Tapping/HbR': epochs['Tapping'].average(picks='hbr'), +# 'Control/HbO': epochs['Control'].average(picks='hbo'), +# 'Control/HbR': epochs['Control'].average(picks='hbr')} + +# # Rename channels until the encoding of frequency in ch_name is fixed +# for condition in evoked_dict: +# evoked_dict[condition].rename_channels(lambda x: x[:-4]) + +# color_dict = dict(HbO='#AA3377', HbR='b') +# styles_dict = dict(Control=dict(linestyle='dashed')) + +# mne.viz.plot_compare_evokeds(evoked_dict, combine="mean", ci=0.95, +# colors=color_dict, styles=styles_dict) + + +# ############################################################################### +# # View topographic representation of activity +# # ------------------------------------------- +# # +# # Next we view how the topographic activity changes throughout the response. + +# times = np.arange(-3.5, 13.2, 3.0) +# topomap_args = dict(extrapolate='local') +# epochs['Tapping'].average(picks='hbo').plot_joint( +# times=times, topomap_args=topomap_args) + + +# ############################################################################### +# # Compare tapping of left and right hands +# # --------------------------------------- +# # +# # Finally we generate topo maps for the left and right conditions to view +# # the location of activity. First we visualise the HbO activity. + +# times = np.arange(4.0, 11.0, 1.0) +# epochs['Tapping/Left'].average(picks='hbo').plot_topomap( +# times=times, **topomap_args) +# epochs['Tapping/Right'].average(picks='hbo').plot_topomap( +# times=times, **topomap_args) + +# ############################################################################### +# # And we also view the HbR activity for the two conditions. + +# epochs['Tapping/Left'].average(picks='hbr').plot_topomap( +# times=times, **topomap_args) +# epochs['Tapping/Right'].average(picks='hbr').plot_topomap( +# times=times, **topomap_args) + +# ############################################################################### +# # And we can plot the comparison at a single time point for two conditions. + +# fig, axes = plt.subplots(nrows=2, ncols=4, figsize=(9, 5), +# gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1])) +# vmin, vmax, ts = -8, 8, 9.0 + +# evoked_left = epochs['Tapping/Left'].average() +# evoked_right = epochs['Tapping/Right'].average() + +# evoked_left.plot_topomap(ch_type='hbo', times=ts, axes=axes[0, 0], +# vmin=vmin, vmax=vmax, colorbar=False, +# **topomap_args) +# evoked_left.plot_topomap(ch_type='hbr', times=ts, axes=axes[1, 0], +# vmin=vmin, vmax=vmax, colorbar=False, +# **topomap_args) +# evoked_right.plot_topomap(ch_type='hbo', times=ts, axes=axes[0, 1], +# vmin=vmin, vmax=vmax, colorbar=False, +# **topomap_args) +# evoked_right.plot_topomap(ch_type='hbr', times=ts, axes=axes[1, 1], +# vmin=vmin, vmax=vmax, colorbar=False, +# **topomap_args) + +# evoked_diff = mne.combine_evoked([evoked_left, -evoked_right], weights='equal') + +# evoked_diff.plot_topomap(ch_type='hbo', times=ts, axes=axes[0, 2:], +# vmin=vmin, vmax=vmax, colorbar=True, +# **topomap_args) +# evoked_diff.plot_topomap(ch_type='hbr', times=ts, axes=axes[1, 2:], +# vmin=vmin, vmax=vmax, colorbar=True, +# **topomap_args) + +# for column, condition in enumerate( +# ['Tapping Left', 'Tapping Right', 'Left-Right']): +# for row, chroma in enumerate(['HbO', 'HbR']): +# axes[row, column].set_title('{}: {}'.format(chroma, condition)) +# fig.tight_layout() + +# ############################################################################### +# # Lastly, we can also look at the individual waveforms to see what is +# # driving the topographic plot above. + +# fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(6, 4)) +# mne.viz.plot_evoked_topo(epochs['Left'].average(picks='hbo'), color='b', +# axes=axes, legend=False) +# mne.viz.plot_evoked_topo(epochs['Right'].average(picks='hbo'), color='r', +# axes=axes, legend=False) + +# # Tidy the legend +# leg_lines = [line for line in axes.lines if line.get_c() == 'b'][:1] +# leg_lines.append([line for line in axes.lines if line.get_c() == 'r'][0]) +# fig.legend(leg_lines, ['Left', 'Right'], loc='lower right') From 884c5c891e45bb2f82e86758e28220fe1f87293a Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Sun, 3 May 2020 21:08:43 -0700 Subject: [PATCH 051/167] updated dataset stuff --- mne/datasets/__init__.py | 1 + mne/datasets/utils.py | 2 +- mne/io/boxy/boxy.py | 4 ++-- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/mne/datasets/__init__.py b/mne/datasets/__init__.py index 31387b7ce72..35e67ea6d22 100644 --- a/mne/datasets/__init__.py +++ b/mne/datasets/__init__.py @@ -15,6 +15,7 @@ from . import somato from . import multimodal from . import fnirs_motor +from . import boxy_example from . import opm from . import spm_face from . import testing diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py index 841fac05a17..9deb76285ce 100644 --- a/mne/datasets/utils.py +++ b/mne/datasets/utils.py @@ -328,7 +328,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, testing='1ef691944239411b869b3ed2f40a69fe', multimodal='26ec847ae9ab80f58f204d09e2c08367', fnirs_motor='c4935d19ddab35422a69f3326a01fef8', - boxy_example='b3793334548b7ba04c1b767c66117414', + boxy_example='6586c112d30402e584ceba25468cafef', opm='370ad1dcfd5c47e029e692c85358a374', visual_92_categories=['74f50bbeb65740903eadc229c9fa759f', '203410a98afc9df9ae8ba9f933370e20'], diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index ffce7135a2e..89e28d548b6 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -361,7 +361,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): ###save our data based on data type### if data_types.index(i_data) == 0: raw_ac[index_loc,:] = raw_data[channel][time_points].to_numpy() - elif data_ty pes.index(i_data) == 1: + elif data_types.index(i_data) == 1: raw_dc[index_loc,:] = raw_data[channel][time_points].to_numpy() elif data_types.index(i_data) == 2: raw_ph[index_loc,:] = raw_data[channel][time_points].to_numpy() @@ -390,4 +390,4 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): elif filetype == 'parsed': markers = digaux data = np.vstack((data, markers)) - return data \ No newline at end of file + return data From 10fc5e72838790d5487086f1ab69dcae389fb44f Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Sun, 3 May 2020 21:10:35 -0700 Subject: [PATCH 052/167] correct hash from osf --- mne/datasets/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py index 9deb76285ce..841fac05a17 100644 --- a/mne/datasets/utils.py +++ b/mne/datasets/utils.py @@ -328,7 +328,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, testing='1ef691944239411b869b3ed2f40a69fe', multimodal='26ec847ae9ab80f58f204d09e2c08367', fnirs_motor='c4935d19ddab35422a69f3326a01fef8', - boxy_example='6586c112d30402e584ceba25468cafef', + boxy_example='b3793334548b7ba04c1b767c66117414', opm='370ad1dcfd5c47e029e692c85358a374', visual_92_categories=['74f50bbeb65740903eadc229c9fa759f', '203410a98afc9df9ae8ba9f933370e20'], From 32adc376821b87b7696c44063e337b18aee63c82 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Sun, 3 May 2020 21:24:45 -0700 Subject: [PATCH 053/167] working data load --- mne/utils/config.py | 1 + tutorials/preprocessing/plot_80_boxy_processing.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/mne/utils/config.py b/mne/utils/config.py index 1e0a7c7f5e9..1f03123c068 100644 --- a/mne/utils/config.py +++ b/mne/utils/config.py @@ -95,6 +95,7 @@ def set_memmap_min_size(memmap_min_size): 'MNE_DATASETS_SOMATO_PATH', 'MNE_DATASETS_MULTIMODAL_PATH', 'MNE_DATASETS_FNIRS_MOTOR_PATH', + 'MNE_DATASETS_BOXY_EXAMPLE_PATH', 'MNE_DATASETS_OPM_PATH', 'MNE_DATASETS_SPM_FACE_DATASETS_TESTS', 'MNE_DATASETS_SPM_FACE_PATH', diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 6579f4ddc09..23a182fe0bf 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -25,7 +25,7 @@ boxy_data_folder = mne.datasets.boxy_example.data_path() -boxy_raw_dir = os.path.join(fnirs_data_folder, 'Participant-1') +boxy_raw_dir = os.path.join(boxy_data_folder, 'Participant-1') raw_intensity = mne.io.read_raw_boxy(boxy_raw_dir, verbose=True).load_data() From 26c3c763e68ad0286d3c2eee05005af75909ea7f Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Sun, 3 May 2020 22:04:17 -0700 Subject: [PATCH 054/167] debuging loading file --- mne/io/boxy/boxy.py | 83 ++++++++++++++++++++++++--------------------- 1 file changed, 45 insertions(+), 38 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 89e28d548b6..a8f1fd55852 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -5,6 +5,7 @@ from configparser import ConfigParser, RawConfigParser import glob as glob import re as re +import os.path as op import numpy as np @@ -14,7 +15,8 @@ from ...annotations import Annotations from ...transforms import apply_trans, _get_trans from ...utils import logger, verbose, fill_doc - +from ...channels.montage import make_dig_montage +from ...datasets import fetch_fsaverage @fill_doc def read_raw_boxy(fname, preload=False, verbose=None): @@ -55,6 +57,17 @@ def __init__(self, fname, preload=False, verbose=None): from ...coreg import get_mni_fiducials # avoid circular import prob logger.info('Loading %s' % fname) + # Check if required files exist and store names for later use + files = dict() + keys = ('mtg', 'elp', 'tol', '001') + for key in keys: + files[key] = glob.glob('%s/*%s' % (fname, key)) + if len(files[key]) != 1: + raise RuntimeError('Expect one %s file, got %d' % + (key, len(files[key]),)) + files[key] = files[key][0] + + print(files) # Read header file # Parse required header fields ###this keeps track of the line we're on### @@ -63,7 +76,9 @@ def __init__(self, fname, preload=False, verbose=None): ###load and read data to get some meta information### ###there is alot of information at the beginning of a file### ###but this only grabs some of it### - with open(boxy_file,'r') as data: + + + with open(files['001'],'r') as data: for i_line in data: line_num += 1 if '#DATA ENDS' in i_line: @@ -93,7 +108,7 @@ def __init__(self, fname, preload=False, verbose=None): chan_modulation = [] ###load and read each line of the .mtg file### - with open(mtg_file,'r') as data: + with open(files['mtg'],'r') as data: for i_ignore in range(2): next(data) for i_line in data: @@ -116,39 +131,30 @@ def __init__(self, fname, preload=False, verbose=None): all_labels = [] all_coords = [] fiducial_coords = [] - if coord_file[-3:].lower() == 'elp'.lower(): - get_label = 0 - get_coords = 0 - ###load and read .elp file### - with open(coord_file,'r') as data: - for i_line in data: - ###first let's get our fiducial coordinates### - if '%F' in i_line: - fiducial_coords.append(i_line.split()[1:]) - ###check where sensor info starts### - if '//Sensor name' in i_line: - get_label = 1 - elif get_label == 1: - ###grab the part after '%N' for the label### - label = i_line.split()[1] - all_labels.append(label) - get_label = 0 - get_coords = 1 - elif get_coords == 1: - X, Y, Z = i_line.split() - all_coords.append([float(X),float(Y),float(Z)]) - get_coords = 0 - for i_index in range(3): - fiducial_coords[i_index] = np.asarray([float(x) for x in fiducial_coords[i_index]]) - elif coord_file[-3:] == 'tol': - ###load and read .tol file### - with open(coord_file,'r') as data: - for i_line in data: - label, X, Y, Z = i_line.split() + get_label = 0 + get_coords = 0 + ###load and read .elp file### + with open(files['elp'],'r') as data: + for i_line in data: + ###first let's get our fiducial coordinates### + if '%F' in i_line: + fiducial_coords.append(i_line.split()[1:]) + ###check where sensor info starts### + if '//Sensor name' in i_line: + get_label = 1 + elif get_label == 1: + ###grab the part after '%N' for the label### + label = i_line.split()[1] all_labels.append(label) - ###convert coordinates from mm to m## - all_coords.append([(float(X)*0.001),(float(Y)*0.001),(float(Z)*0.001)]) - + get_label = 0 + get_coords = 1 + elif get_coords == 1: + X, Y, Z = i_line.split() + all_coords.append([float(X),float(Y),float(Z)]) + get_coords = 0 + for i_index in range(3): + fiducial_coords[i_index] = np.asarray([float(x) for x in fiducial_coords[i_index]]) + ###get coordinates for sources### source_coords = [] for i_chan in source_label: @@ -207,7 +213,7 @@ def __init__(self, fname, preload=False, verbose=None): ###make our montage### - montage_orig = mne.channels.make_dig_montage(ch_pos=all_chan_dict,coord_frame='head', + montage_orig = make_dig_montage(ch_pos=all_chan_dict,coord_frame='head', nasion = fiducial_coords[0], lpa = fiducial_coords[1], rpa = fiducial_coords[2]) @@ -221,7 +227,7 @@ def __init__(self, fname, preload=False, verbose=None): ###add an extra channel for our triggers for later### boxy_labels.append('Markers') - info = mne.create_info(boxy_labels,srate,ch_types='fnirs_raw') + info = create_info(boxy_labels,srate,ch_types='fnirs_raw') info.update(dig=montage_orig.dig) # Set up digitization @@ -239,7 +245,7 @@ def __init__(self, fname, preload=False, verbose=None): fiducial_coords_trans = apply_trans(trans,fiducial_coords) ###make our montage### - montage_trans = mne.channels.make_dig_montage(ch_pos=all_chan_dict_trans,coord_frame='head', + montage_trans = make_dig_montage(ch_pos=all_chan_dict_trans,coord_frame='head', nasion = fiducial_coords_trans[0], lpa = fiducial_coords_trans[1], rpa = fiducial_coords_trans[2]) @@ -279,6 +285,7 @@ def __init__(self, fname, preload=False, verbose=None): def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a segment of data from a file. """ + print(self) with open(boxy_file,'r') as data: for i_line in data: line_num += 1 From 9a02fc801bcf0dc2453afabe0b27c2dc31f1b7bf Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Sun, 3 May 2020 22:43:12 -0700 Subject: [PATCH 055/167] debugged import function, plotting locations kinda works, dists calculate, data flat --- mne/io/boxy/boxy.py | 36 ++++++++++--------- .../preprocessing/plot_80_boxy_processing.py | 30 ++++++++-------- 2 files changed, 35 insertions(+), 31 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index a8f1fd55852..c34fea4117b 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -6,17 +6,18 @@ import glob as glob import re as re import os.path as op - +import pandas as pd import numpy as np +import mne + from ..base import BaseRaw from ..constants import FIFF -from ..meas_info import create_info, _format_dig_points +from ..meas_info import create_info, _format_dig_points, read_fiducials from ...annotations import Annotations from ...transforms import apply_trans, _get_trans from ...utils import logger, verbose, fill_doc from ...channels.montage import make_dig_montage -from ...datasets import fetch_fsaverage @fill_doc def read_raw_boxy(fname, preload=False, verbose=None): @@ -67,7 +68,6 @@ def __init__(self, fname, preload=False, verbose=None): (key, len(files[key]),)) files[key] = files[key][0] - print(files) # Read header file # Parse required header fields ###this keeps track of the line we're on### @@ -83,6 +83,7 @@ def __init__(self, fname, preload=False, verbose=None): line_num += 1 if '#DATA ENDS' in i_line: end_line = line_num - 1 + last_sample = end_line break if 'Detector Channels' in i_line: detect_num = int(i_line.rsplit(' ')[0]) @@ -127,7 +128,7 @@ def __init__(self, fname, preload=False, verbose=None): # Each source - detector pair produces a channel # Channels are defined as the midpoint between source and detector - ###check if we are given a .tol or .elp file### + ###check if we are given .elp file### all_labels = [] all_coords = [] fiducial_coords = [] @@ -234,10 +235,10 @@ def __init__(self, fname, preload=False, verbose=None): # These are all in MNI coordinates, so let's transform them to # the Neuromag head coordinate frame ###get our fiducials and transform matrix from fsaverage### - subjects_dir = op.dirname(fetch_fsaverage()) + subjects_dir = op.dirname(mne.datasets.fetch_fsaverage()) fid_path = op.join(subjects_dir, 'fsaverage', 'bem', 'fsaverage-fiducials.fif') fiducials = read_fiducials(fid_path) - trans = coregister_fiducials(info, fiducials[0], tol=0.02) + trans = mne.coreg.coregister_fiducials(info, fiducials[0], tol=0.02) ###remake montage using the transformed coordinates### all_coords_trans = apply_trans(trans,all_coords) @@ -254,6 +255,7 @@ def __init__(self, fname, preload=False, verbose=None): for i_chan in range(len(all_coords_trans)): montage_trans.dig[i_chan+3]['r'] = all_coords_trans[i_chan] montage_trans.ch_names[i_chan] = all_labels[i_chan] + req_ind = montage_trans.ch_names # Create mne structure ###create info structure### @@ -277,6 +279,10 @@ def __init__(self, fname, preload=False, verbose=None): info['chs'][i_chan]['loc'] = test = np.concatenate((temp_chn, temp_src, temp_det, temp_other),axis=0) info['chs'][-1]['loc'] = np.zeros((12,)) + raw_extras = {'source_num': source_num, + 'detect_num': detect_num, + 'start_line': start_line, + 'files': files} super(RawBOXY, self).__init__( info, preload, filenames=[fname], last_samps=[last_sample], @@ -285,15 +291,11 @@ def __init__(self, fname, preload=False, verbose=None): def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a segment of data from a file. """ - print(self) - with open(boxy_file,'r') as data: - for i_line in data: - line_num += 1 - if '#DATA BEGINS' in i_line: - start_line = line_num - break + source_num = self._raw_extras[fi]['source_num'] + detect_num = self._raw_extras[fi]['detect_num'] + start_line = self._raw_extras[fi]['start_line'] - raw_data = pd.read_csv(boxy_file, skiprows=start_line, sep='\t') + raw_data = pd.read_csv(self._raw_extras[fi]['files']['001'], skiprows=start_line, sep='\t') ###detectors, sources, and data types### detectors = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', @@ -301,7 +303,8 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): data_types = ['AC','DC','Ph'] sources = np.arange(1,source_num+1,1) - ###since we can save boxy files in two different styles### + + ###since we can save boxy files in two different styles### ###this will check to see which style the data is saved### ###seems to also work with older boxy files### if 'exmux' in raw_data.columns: @@ -387,6 +390,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): ###now combine our data types into a single array with the data### data = np.append(raw_ac, np.append(raw_dc, raw_ph, axis=0),axis=0) + # Read triggers from event file ###add our markers to the data array based on filetype### if filetype == 'non-parsed': diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 23a182fe0bf..b84844e60b6 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -28,7 +28,6 @@ boxy_raw_dir = os.path.join(boxy_data_folder, 'Participant-1') raw_intensity = mne.io.read_raw_boxy(boxy_raw_dir, verbose=True).load_data() - # ############################################################################### # # View location of sensors over brain surface # # ------------------------------------------- @@ -38,15 +37,15 @@ # # between the optodes, channels (the mid point of source-detector pairs) are # # shown as dots. -# subjects_dir = mne.datasets.sample.data_path() + '/subjects' +subjects_dir = mne.datasets.sample.data_path() + '/subjects' -# fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') -# fig = mne.viz.plot_alignment(raw_intensity.info, show_axes=True, -# subject='fsaverage', -# trans='fsaverage', surfaces=['brain'], -# fnirs=['channels', 'pairs'], -# subjects_dir=subjects_dir, fig=fig) -# mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) +fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') +fig = mne.viz.plot_alignment(raw_intensity.info, show_axes=True, + subject='fsaverage', + trans='fsaverage', surfaces=['brain'], + fnirs=['channels', 'pairs'], + subjects_dir=subjects_dir, fig=fig) +mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) # ############################################################################### @@ -58,12 +57,13 @@ # # These short channels can be seen in the figure above. # # To achieve this we pick all the channels that are not considered to be short. -# picks = mne.pick_types(raw_intensity.info, meg=False, fnirs=True) -# dists = mne.preprocessing.nirs.source_detector_distances( -# raw_intensity.info, picks=picks) -# raw_intensity.pick(picks[dists > 0.01]) -# raw_intensity.plot(n_channels=len(raw_intensity.ch_names), -# duration=500, show_scrollbars=False) +picks = mne.pick_types(raw_intensity.info, meg=False, fnirs=True) +dists = mne.preprocessing.nirs.source_detector_distances( + raw_intensity.info, picks=picks) +print(dists) +raw_intensity.pick(picks[dists > 0.01]) +raw_intensity.plot(n_channels=10, + duration=500, show_scrollbars=False) # ############################################################################### From 753d66d51137b187d2aba314c965ff18806bbe36 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 4 May 2020 10:27:32 -0700 Subject: [PATCH 056/167] fixed name for return in utils.py --- mne/datasets/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py index 841fac05a17..d171dea4413 100644 --- a/mne/datasets/utils.py +++ b/mne/datasets/utils.py @@ -527,7 +527,7 @@ def has_dataset(name): Returns ------- - : bool + has : bool True if the dataset is present. """ name = 'spm' if name == 'spm_face' else name From 5cef74dba7801546e709435bfe732c79567440d6 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 4 May 2020 10:29:29 -0700 Subject: [PATCH 057/167] extra newline in boxy.py --- mne/io/boxy/boxy.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index c34fea4117b..0fdab1c9f62 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -19,6 +19,7 @@ from ...utils import logger, verbose, fill_doc from ...channels.montage import make_dig_montage + @fill_doc def read_raw_boxy(fname, preload=False, verbose=None): """Reader for a BOXY optical imaging recording. From 0206d544d6c5fed7bf11eb1f22850e0b7d763bf8 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 4 May 2020 10:33:49 -0700 Subject: [PATCH 058/167] fixing newlines --- mne/io/boxy/boxy.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 0fdab1c9f62..2fad16c73f6 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -39,6 +39,7 @@ def read_raw_boxy(fname, preload=False, verbose=None): """ return RawBOXY(fname, preload, verbose) + @fill_doc class RawBOXY(BaseRaw): """Raw object from a BOXY optical imaging file. From 10c161282dca407e036a36e51141b51783bf7e58 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 4 May 2020 11:06:34 -0700 Subject: [PATCH 059/167] removed call to findfiducials --- mne/io/boxy/boxy.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 2fad16c73f6..187b2a4415b 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -237,8 +237,7 @@ def __init__(self, fname, preload=False, verbose=None): # These are all in MNI coordinates, so let's transform them to # the Neuromag head coordinate frame ###get our fiducials and transform matrix from fsaverage### - subjects_dir = op.dirname(mne.datasets.fetch_fsaverage()) - fid_path = op.join(subjects_dir, 'fsaverage', 'bem', 'fsaverage-fiducials.fif') + fid_path = op.join('mne', 'data', 'fsaverage', 'fsaverage-fiducials.fif') fiducials = read_fiducials(fid_path) trans = mne.coreg.coregister_fiducials(info, fiducials[0], tol=0.02) @@ -390,8 +389,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): raw_ph[index_loc,:] = raw_data[channel].to_numpy() ###now combine our data types into a single array with the data### - data = np.append(raw_ac, np.append(raw_dc, raw_ph, axis=0),axis=0) - + data_ = np.append(raw_ac, np.append(raw_dc, raw_ph, axis=0),axis=0) # Read triggers from event file ###add our markers to the data array based on filetype### @@ -399,8 +397,11 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): if type(digaux) is list and digaux != []: markers = digaux[np.arange(0,len(digaux),source_num)] else: - markers = np.zeros(np.size(data,axis=1)) + markers = np.zeros(np.size(data_,axis=1)) elif filetype == 'parsed': markers = digaux - data = np.vstack((data, markers)) + + # place our data into the data object in place + data[:] = np.vstack((data_, markers))[:, start:stop] + return data From 0be3184a65acf19be14f137e742b22bf701b93a4 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 4 May 2020 11:15:34 -0700 Subject: [PATCH 060/167] removed mne calls --- mne/io/boxy/boxy.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 187b2a4415b..edef555a529 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -9,8 +9,6 @@ import pandas as pd import numpy as np -import mne - from ..base import BaseRaw from ..constants import FIFF from ..meas_info import create_info, _format_dig_points, read_fiducials @@ -18,6 +16,7 @@ from ...transforms import apply_trans, _get_trans from ...utils import logger, verbose, fill_doc from ...channels.montage import make_dig_montage +from ...coreg import coregister_fiducials @fill_doc @@ -239,7 +238,7 @@ def __init__(self, fname, preload=False, verbose=None): ###get our fiducials and transform matrix from fsaverage### fid_path = op.join('mne', 'data', 'fsaverage', 'fsaverage-fiducials.fif') fiducials = read_fiducials(fid_path) - trans = mne.coreg.coregister_fiducials(info, fiducials[0], tol=0.02) + trans = coregister_fiducials(info, fiducials[0], tol=0.02) ###remake montage using the transformed coordinates### all_coords_trans = apply_trans(trans,all_coords) @@ -260,7 +259,7 @@ def __init__(self, fname, preload=False, verbose=None): # Create mne structure ###create info structure### - info = mne.create_info(boxy_labels,srate,ch_types='fnirs_raw') + info = create_info(boxy_labels,srate,ch_types='fnirs_raw') ###add data type and channel wavelength to info### info.update(dig=montage_trans.dig, trans=trans) From a7e8c1128a5410f2c05583f813da9c91dfec9b0a Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 4 May 2020 11:24:28 -0700 Subject: [PATCH 061/167] revert to import mne to debug data --- mne/io/boxy/boxy.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index edef555a529..4c8cc210dc3 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -9,6 +9,8 @@ import pandas as pd import numpy as np +import mne + from ..base import BaseRaw from ..constants import FIFF from ..meas_info import create_info, _format_dig_points, read_fiducials @@ -16,7 +18,7 @@ from ...transforms import apply_trans, _get_trans from ...utils import logger, verbose, fill_doc from ...channels.montage import make_dig_montage -from ...coreg import coregister_fiducials +# from ...coreg import coregister_fiducials @fill_doc @@ -238,7 +240,7 @@ def __init__(self, fname, preload=False, verbose=None): ###get our fiducials and transform matrix from fsaverage### fid_path = op.join('mne', 'data', 'fsaverage', 'fsaverage-fiducials.fif') fiducials = read_fiducials(fid_path) - trans = coregister_fiducials(info, fiducials[0], tol=0.02) + trans = mne.coreg.coregister_fiducials(info, fiducials[0], tol=0.02) ###remake montage using the transformed coordinates### all_coords_trans = apply_trans(trans,all_coords) From c6742a34d427108139cbb69f329a15d587c0e4e5 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 4 May 2020 12:44:27 -0700 Subject: [PATCH 062/167] working data load and plot, needed to divide expected data by 16 for each source --- mne/io/boxy/boxy.py | 20 +++++++++++++++--- .../preprocessing/plot_80_boxy_processing.py | 21 ++++++++++--------- 2 files changed, 28 insertions(+), 13 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 4c8cc210dc3..b683d306662 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -86,7 +86,6 @@ def __init__(self, fname, preload=False, verbose=None): line_num += 1 if '#DATA ENDS' in i_line: end_line = line_num - 1 - last_sample = end_line break if 'Detector Channels' in i_line: detect_num = int(i_line.rsplit(' ')[0]) @@ -286,8 +285,20 @@ def __init__(self, fname, preload=False, verbose=None): 'start_line': start_line, 'files': files} + print('Start Line: ', start_line) + print('End Line: ', end_line) + print('Original Difference: ', end_line-start_line) + first_samps = start_line + print('New first_samps: ', first_samps) + diff = end_line-start_line + last_samps = start_line + int(diff/16)-1 + print('New last_samps: ', last_samps) + print('New Difference: ', last_samps-first_samps) + + super(RawBOXY, self).__init__( - info, preload, filenames=[fname], last_samps=[last_sample], + info, preload, filenames=[fname], first_samps=[first_samps], + last_samps=[last_samps], raw_extras=[raw_extras], verbose=verbose) def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): @@ -402,7 +413,10 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): elif filetype == 'parsed': markers = digaux + print('Blank Data shape: ', data.shape) + temp = np.vstack((data_, markers)) + print('Input Data shape: ',temp.shape) # place our data into the data object in place - data[:] = np.vstack((data_, markers))[:, start:stop] + data[:] = np.vstack((data_, markers)) return data diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index b84844e60b6..9ae9de83ba8 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -39,13 +39,13 @@ subjects_dir = mne.datasets.sample.data_path() + '/subjects' -fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') -fig = mne.viz.plot_alignment(raw_intensity.info, show_axes=True, - subject='fsaverage', - trans='fsaverage', surfaces=['brain'], - fnirs=['channels', 'pairs'], - subjects_dir=subjects_dir, fig=fig) -mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) +# fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') +# fig = mne.viz.plot_alignment(raw_intensity.info, show_axes=True, +# subject='fsaverage', +# trans='fsaverage', surfaces=['brain'], +# fnirs=['channels', 'pairs'], +# subjects_dir=subjects_dir, fig=fig) +# mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) # ############################################################################### @@ -60,10 +60,11 @@ picks = mne.pick_types(raw_intensity.info, meg=False, fnirs=True) dists = mne.preprocessing.nirs.source_detector_distances( raw_intensity.info, picks=picks) -print(dists) raw_intensity.pick(picks[dists > 0.01]) -raw_intensity.plot(n_channels=10, - duration=500, show_scrollbars=False) +print(raw_intensity.info) +scalings = dict(eeg=20e-100) +raw_intensity.plot(n_channels=1, + duration=100, scalings=scalings, show_scrollbars=False) # ############################################################################### From 840d78a22d456f50b59d74899507e783e6e055a9 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 4 May 2020 12:47:31 -0700 Subject: [PATCH 063/167] pull sorce number from code --- mne/io/boxy/boxy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index b683d306662..7f78abf6975 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -291,7 +291,7 @@ def __init__(self, fname, preload=False, verbose=None): first_samps = start_line print('New first_samps: ', first_samps) diff = end_line-start_line - last_samps = start_line + int(diff/16)-1 + last_samps = start_line + int(diff/source_num)-1 print('New last_samps: ', last_samps) print('New Difference: ', last_samps-first_samps) From a4ede9fcc4846a81fd8e0601c0a73e9aa32aa40a Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 4 May 2020 13:10:49 -0700 Subject: [PATCH 064/167] working plotting with rescale --- mne/io/boxy/boxy.py | 4 ++-- mne/io/brainvision/foo.py | 2 ++ tutorials/preprocessing/plot_80_boxy_processing.py | 8 ++++---- 3 files changed, 8 insertions(+), 6 deletions(-) create mode 100644 mne/io/brainvision/foo.py diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 7f78abf6975..b3d87179bc6 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -291,11 +291,11 @@ def __init__(self, fname, preload=False, verbose=None): first_samps = start_line print('New first_samps: ', first_samps) diff = end_line-start_line - last_samps = start_line + int(diff/source_num)-1 + #input file has rows for each source, output variable rearranges as columns and does not + last_samps = start_line + int(diff/source_num)-1 print('New last_samps: ', last_samps) print('New Difference: ', last_samps-first_samps) - super(RawBOXY, self).__init__( info, preload, filenames=[fname], first_samps=[first_samps], last_samps=[last_samps], diff --git a/mne/io/brainvision/foo.py b/mne/io/brainvision/foo.py new file mode 100644 index 00000000000..c15a9f85fc1 --- /dev/null +++ b/mne/io/brainvision/foo.py @@ -0,0 +1,2 @@ +from ...coreg import coregister_fiducials +print(coregister_fiducials) diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 9ae9de83ba8..2930fdfe0a7 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -61,10 +61,10 @@ dists = mne.preprocessing.nirs.source_detector_distances( raw_intensity.info, picks=picks) raw_intensity.pick(picks[dists > 0.01]) -print(raw_intensity.info) -scalings = dict(eeg=20e-100) -raw_intensity.plot(n_channels=1, - duration=100, scalings=scalings, show_scrollbars=False) +print(mne.io.pick.channel_type(raw_intensity.info, 0)) +scalings = dict(fnirs_raw=1e2) +raw_intensity.plot(n_channels=10, + duration=1000, scalings=scalings, show_scrollbars=True) # ############################################################################### From 581a31c0055a5b74bb15ad891028315bcfb49899 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 4 May 2020 13:29:38 -0700 Subject: [PATCH 065/167] fixed import of coreg function --- mne/io/boxy/boxy.py | 7 ++----- mne/io/brainvision/foo.py | 2 -- tutorials/preprocessing/plot_80_boxy_processing.py | 14 +++++++------- 3 files changed, 9 insertions(+), 14 deletions(-) delete mode 100644 mne/io/brainvision/foo.py diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index b3d87179bc6..64c5ca2843b 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -9,8 +9,6 @@ import pandas as pd import numpy as np -import mne - from ..base import BaseRaw from ..constants import FIFF from ..meas_info import create_info, _format_dig_points, read_fiducials @@ -18,7 +16,6 @@ from ...transforms import apply_trans, _get_trans from ...utils import logger, verbose, fill_doc from ...channels.montage import make_dig_montage -# from ...coreg import coregister_fiducials @fill_doc @@ -58,7 +55,7 @@ class RawBOXY(BaseRaw): @verbose def __init__(self, fname, preload=False, verbose=None): from ...externals.pymatreader import read_mat - from ...coreg import get_mni_fiducials # avoid circular import prob + from ...coreg import get_mni_fiducials, coregister_fiducials # avoid circular import prob logger.info('Loading %s' % fname) # Check if required files exist and store names for later use @@ -239,7 +236,7 @@ def __init__(self, fname, preload=False, verbose=None): ###get our fiducials and transform matrix from fsaverage### fid_path = op.join('mne', 'data', 'fsaverage', 'fsaverage-fiducials.fif') fiducials = read_fiducials(fid_path) - trans = mne.coreg.coregister_fiducials(info, fiducials[0], tol=0.02) + trans = coregister_fiducials(info, fiducials[0], tol=0.02) ###remake montage using the transformed coordinates### all_coords_trans = apply_trans(trans,all_coords) diff --git a/mne/io/brainvision/foo.py b/mne/io/brainvision/foo.py deleted file mode 100644 index c15a9f85fc1..00000000000 --- a/mne/io/brainvision/foo.py +++ /dev/null @@ -1,2 +0,0 @@ -from ...coreg import coregister_fiducials -print(coregister_fiducials) diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 2930fdfe0a7..ace0246fa16 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -39,13 +39,13 @@ subjects_dir = mne.datasets.sample.data_path() + '/subjects' -# fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') -# fig = mne.viz.plot_alignment(raw_intensity.info, show_axes=True, -# subject='fsaverage', -# trans='fsaverage', surfaces=['brain'], -# fnirs=['channels', 'pairs'], -# subjects_dir=subjects_dir, fig=fig) -# mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) +fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') +fig = mne.viz.plot_alignment(raw_intensity.info, show_axes=True, + subject='fsaverage', + trans='fsaverage', surfaces=['brain'], + fnirs=['channels', 'pairs'], + subjects_dir=subjects_dir, fig=fig) +mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) # ############################################################################### From d9240de73a39e513a742ce4c0ba356d7b4e9825a Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 4 May 2020 15:00:40 -0700 Subject: [PATCH 066/167] removed transform into fsaverage since it already was there --- .../preprocessing/plot_80_boxy_processing.py | 25 ++++++++++++------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index ace0246fa16..743479e6f44 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -42,7 +42,15 @@ fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') fig = mne.viz.plot_alignment(raw_intensity.info, show_axes=True, subject='fsaverage', - trans='fsaverage', surfaces=['brain'], + trans=None, surfaces=['brain'], + fnirs=['channels', 'pairs'], + subjects_dir=subjects_dir, fig=fig) +mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) + +fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') +fig = mne.viz.plot_alignment(raw_intensity.info, show_axes=True, + subject='fsaverage', + trans=None, surfaces=['head'], fnirs=['channels', 'pairs'], subjects_dir=subjects_dir, fig=fig) mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) @@ -57,14 +65,13 @@ # # These short channels can be seen in the figure above. # # To achieve this we pick all the channels that are not considered to be short. -picks = mne.pick_types(raw_intensity.info, meg=False, fnirs=True) -dists = mne.preprocessing.nirs.source_detector_distances( - raw_intensity.info, picks=picks) -raw_intensity.pick(picks[dists > 0.01]) -print(mne.io.pick.channel_type(raw_intensity.info, 0)) -scalings = dict(fnirs_raw=1e2) -raw_intensity.plot(n_channels=10, - duration=1000, scalings=scalings, show_scrollbars=True) +# picks = mne.pick_types(raw_intensity.info, meg=False, fnirs=True) +# dists = mne.preprocessing.nirs.source_detector_distances( +# raw_intensity.info, picks=picks) +# raw_intensity.pick(picks[dists < 0.06]) +# scalings = dict(fnirs_raw=1e2) +# raw_intensity.plot(n_channels=10, +# duration=1000, scalings=scalings, show_scrollbars=True) # ############################################################################### From 0fd200f96c3a0c01f8da8f33838679cca099a25c Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 4 May 2020 15:50:30 -0700 Subject: [PATCH 067/167] working through bug plotting dig --- mne/io/boxy/boxy.py | 44 +++++++++---------- .../preprocessing/plot_80_boxy_processing.py | 21 +++++---- 2 files changed, 32 insertions(+), 33 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 64c5ca2843b..0e7088a454a 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -153,7 +153,7 @@ def __init__(self, fname, preload=False, verbose=None): all_coords.append([float(X),float(Y),float(Z)]) get_coords = 0 for i_index in range(3): - fiducial_coords[i_index] = np.asarray([float(x) for x in fiducial_coords[i_index]]) + fiducial_coords[i_index] = np.asarray([float(x) for x in fiducial_coords[i_index]]) ###get coordinates for sources### source_coords = [] @@ -237,29 +237,29 @@ def __init__(self, fname, preload=False, verbose=None): fid_path = op.join('mne', 'data', 'fsaverage', 'fsaverage-fiducials.fif') fiducials = read_fiducials(fid_path) trans = coregister_fiducials(info, fiducials[0], tol=0.02) - - ###remake montage using the transformed coordinates### - all_coords_trans = apply_trans(trans,all_coords) - all_chan_dict_trans = dict(zip(all_labels,all_coords_trans)) - fiducial_coords_trans = apply_trans(trans,fiducial_coords) + info.update(trans=trans) + + # ###remake montage using the transformed coordinates### + # all_coords_trans = apply_trans(trans,all_coords) + # all_chan_dict_trans = dict(zip(all_labels,all_coords_trans)) + # fiducial_coords_trans = apply_trans(trans,fiducial_coords) - ###make our montage### - montage_trans = make_dig_montage(ch_pos=all_chan_dict_trans,coord_frame='head', - nasion = fiducial_coords_trans[0], - lpa = fiducial_coords_trans[1], - rpa = fiducial_coords_trans[2]) + # ###make our montage### + # montage_trans = make_dig_montage(ch_pos=all_chan_dict,coord_frame='head', + # nasion = fiducial_coords[0], + # lpa = fiducial_coords[1], + # rpa = fiducial_coords[2]) - ###let's fix montage order ### - for i_chan in range(len(all_coords_trans)): - montage_trans.dig[i_chan+3]['r'] = all_coords_trans[i_chan] - montage_trans.ch_names[i_chan] = all_labels[i_chan] - req_ind = montage_trans.ch_names + # ###let's fix montage order ### + # for i_chan in range(len(all_coords)): + # montage_trans.dig[i_chan+3]['r'] = all_coords[i_chan] + # montage_trans.ch_names[i_chan] = all_labels[i_chan] # Create mne structure ###create info structure### - info = create_info(boxy_labels,srate,ch_types='fnirs_raw') - ###add data type and channel wavelength to info### - info.update(dig=montage_trans.dig, trans=trans) + # info = create_info(boxy_labels,srate,ch_types='fnirs_raw') + # ###add data type and channel wavelength to info### + # info.update(dig=montage_trans.dig, trans=trans) # Store channel, source, and detector locations # The channel location is stored in the first 3 entries of loc. @@ -270,9 +270,9 @@ def __init__(self, fname, preload=False, verbose=None): ###place our coordinates and wavelengths for each channel### for i_chan in range(len(boxy_labels)-1): - temp_chn = apply_trans(trans,boxy_coords[i_chan][0:3]) - temp_src = apply_trans(trans,boxy_coords[i_chan][3:6]) - temp_det = apply_trans(trans,boxy_coords[i_chan][6:9]) + temp_chn = boxy_coords[i_chan][0:3] + temp_src = boxy_coords[i_chan][3:6] + temp_det = boxy_coords[i_chan][6:9] temp_other = np.asarray(boxy_coords[i_chan][9:],dtype=np.float64) info['chs'][i_chan]['loc'] = test = np.concatenate((temp_chn, temp_src, temp_det, temp_other),axis=0) diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 743479e6f44..83c59743b98 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -39,20 +39,19 @@ subjects_dir = mne.datasets.sample.data_path() + '/subjects' +print(raw_intensity.info['dig'][0:5]) +print(raw_intensity.info['chs'][0]['loc']) fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') -fig = mne.viz.plot_alignment(raw_intensity.info, show_axes=True, +fig = mne.viz.plot_alignment(raw_intensity.info, + show_axes=True, subject='fsaverage', - trans=None, surfaces=['brain'], + trans=raw_intensity.info['trans'], + surfaces=['head-dense', 'brain'], fnirs=['channels', 'pairs'], - subjects_dir=subjects_dir, fig=fig) -mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) - -fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') -fig = mne.viz.plot_alignment(raw_intensity.info, show_axes=True, - subject='fsaverage', - trans=None, surfaces=['head'], - fnirs=['channels', 'pairs'], - subjects_dir=subjects_dir, fig=fig) + mri_fiducials=True, + dig=True, + subjects_dir=subjects_dir, + fig=fig) mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) From e9af9cbdcfdfdf8fa62f8eab88977ce04fff33d1 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 4 May 2020 17:22:26 -0700 Subject: [PATCH 068/167] working example --- mne/io/boxy/boxy.py | 60 +++++++++---------- .../preprocessing/plot_80_boxy_processing.py | 4 +- 2 files changed, 30 insertions(+), 34 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 0e7088a454a..133df642009 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -13,7 +13,7 @@ from ..constants import FIFF from ..meas_info import create_info, _format_dig_points, read_fiducials from ...annotations import Annotations -from ...transforms import apply_trans, _get_trans +from ...transforms import apply_trans, _get_trans, get_ras_to_neuromag_trans from ...utils import logger, verbose, fill_doc from ...channels.montage import make_dig_montage @@ -153,7 +153,7 @@ def __init__(self, fname, preload=False, verbose=None): all_coords.append([float(X),float(Y),float(Z)]) get_coords = 0 for i_index in range(3): - fiducial_coords[i_index] = np.asarray([float(x) for x in fiducial_coords[i_index]]) + fiducial_coords[i_index] = np.asarray([float(x) for x in fiducial_coords[i_index]]) ###get coordinates for sources### source_coords = [] @@ -218,11 +218,11 @@ def __init__(self, fname, preload=False, verbose=None): lpa = fiducial_coords[1], rpa = fiducial_coords[2]) - ###for some reason make_dig_montage put our channels in a different order than what we input### - ###let's fix that. should be fine to just change coords and ch_names### - for i_chan in range(len(all_coords)): - montage_orig.dig[i_chan+3]['r'] = all_coords[i_chan] - montage_orig.ch_names[i_chan] = all_labels[i_chan] + # ###for some reason make_dig_montage put our channels in a different order than what we input### + # ###let's fix that. should be fine to just change coords and ch_names### + # for i_chan in range(len(all_coords)): + # montage_orig.dig[i_chan+3]['r'] = all_coords[i_chan] + # montage_orig.ch_names[i_chan] = all_labels[i_chan] ###add an extra channel for our triggers for later### boxy_labels.append('Markers') @@ -231,35 +231,33 @@ def __init__(self, fname, preload=False, verbose=None): info.update(dig=montage_orig.dig) # Set up digitization - # These are all in MNI coordinates, so let's transform them to + # These are all in actual 3d individual coordinates, so let's transform them to # the Neuromag head coordinate frame - ###get our fiducials and transform matrix from fsaverage### - fid_path = op.join('mne', 'data', 'fsaverage', 'fsaverage-fiducials.fif') - fiducials = read_fiducials(fid_path) - trans = coregister_fiducials(info, fiducials[0], tol=0.02) - info.update(trans=trans) - - # ###remake montage using the transformed coordinates### - # all_coords_trans = apply_trans(trans,all_coords) - # all_chan_dict_trans = dict(zip(all_labels,all_coords_trans)) - # fiducial_coords_trans = apply_trans(trans,fiducial_coords) + trans = get_ras_to_neuromag_trans(fiducial_coords[0], + fiducial_coords[1], + fiducial_coords[2]) + + ###remake montage using the transformed coordinates### + all_coords_trans = apply_trans(trans,all_coords) + all_chan_dict_trans = dict(zip(all_labels,all_coords_trans)) + fiducial_coords_trans = apply_trans(trans,fiducial_coords) - # ###make our montage### - # montage_trans = make_dig_montage(ch_pos=all_chan_dict,coord_frame='head', - # nasion = fiducial_coords[0], - # lpa = fiducial_coords[1], - # rpa = fiducial_coords[2]) + ###make our montage### + montage_trans = make_dig_montage(ch_pos=all_chan_dict_trans,coord_frame='head', + nasion = fiducial_coords_trans[0], + lpa = fiducial_coords_trans[1], + rpa = fiducial_coords_trans[2]) # ###let's fix montage order ### - # for i_chan in range(len(all_coords)): - # montage_trans.dig[i_chan+3]['r'] = all_coords[i_chan] + # for i_chan in range(len(all_coords_trans)): + # montage_trans.dig[i_chan+3]['r'] = all_coords_trans[i_chan] # montage_trans.ch_names[i_chan] = all_labels[i_chan] # Create mne structure ###create info structure### - # info = create_info(boxy_labels,srate,ch_types='fnirs_raw') - # ###add data type and channel wavelength to info### - # info.update(dig=montage_trans.dig, trans=trans) + info = create_info(boxy_labels,srate,ch_types='fnirs_raw') + ###add data type and channel wavelength to info### + info.update(dig=montage_trans.dig, trans=trans) # Store channel, source, and detector locations # The channel location is stored in the first 3 entries of loc. @@ -270,9 +268,9 @@ def __init__(self, fname, preload=False, verbose=None): ###place our coordinates and wavelengths for each channel### for i_chan in range(len(boxy_labels)-1): - temp_chn = boxy_coords[i_chan][0:3] - temp_src = boxy_coords[i_chan][3:6] - temp_det = boxy_coords[i_chan][6:9] + temp_chn = apply_trans(trans,boxy_coords[i_chan][0:3]) + temp_src = apply_trans(trans,boxy_coords[i_chan][3:6]) + temp_det = apply_trans(trans,boxy_coords[i_chan][6:9]) temp_other = np.asarray(boxy_coords[i_chan][9:],dtype=np.float64) info['chs'][i_chan]['loc'] = test = np.concatenate((temp_chn, temp_src, temp_det, temp_other),axis=0) diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 83c59743b98..93aeee767bd 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -39,13 +39,11 @@ subjects_dir = mne.datasets.sample.data_path() + '/subjects' -print(raw_intensity.info['dig'][0:5]) -print(raw_intensity.info['chs'][0]['loc']) fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') fig = mne.viz.plot_alignment(raw_intensity.info, show_axes=True, subject='fsaverage', - trans=raw_intensity.info['trans'], + trans='fsaverage', surfaces=['head-dense', 'brain'], fnirs=['channels', 'pairs'], mri_fiducials=True, From 27edac2f198611bdf6760b09111a560b47095d74 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 4 May 2020 18:05:47 -0700 Subject: [PATCH 069/167] working version that loads in coordiantes and transforms correctly --- mne/io/boxy/boxy.py | 69 ++++++------------- .../preprocessing/plot_80_boxy_processing.py | 14 ++-- 2 files changed, 29 insertions(+), 54 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 133df642009..fa5df41a01e 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -200,6 +200,9 @@ def __init__(self, fname, preload=False, verbose=None): + '_D' + str(unique_detect_labels.index(detect_label[i_coord])+1) + ' ' + chan_wavelength[i_coord] + ' ' + i_type) + + # add extra column for triggers + boxy_labels.append('Markers') ###montage only wants channel coords, so need to grab those, convert to### ###array, then make a dict with labels### @@ -211,62 +214,32 @@ def __init__(self, fname, preload=False, verbose=None): all_chan_dict = dict(zip(all_labels,all_coords)) - - ###make our montage### - montage_orig = make_dig_montage(ch_pos=all_chan_dict,coord_frame='head', - nasion = fiducial_coords[0], - lpa = fiducial_coords[1], - rpa = fiducial_coords[2]) + ###make our montage### + my_dig_montage = make_dig_montage(ch_pos=all_chan_dict, + coord_frame='unknown', + nasion = fiducial_coords[0], + lpa = fiducial_coords[1], + rpa = fiducial_coords[2]) - # ###for some reason make_dig_montage put our channels in a different order than what we input### - # ###let's fix that. should be fine to just change coords and ch_names### - # for i_chan in range(len(all_coords)): - # montage_orig.dig[i_chan+3]['r'] = all_coords[i_chan] - # montage_orig.ch_names[i_chan] = all_labels[i_chan] - - ###add an extra channel for our triggers for later### - boxy_labels.append('Markers') - - info = create_info(boxy_labels,srate,ch_types='fnirs_raw') - info.update(dig=montage_orig.dig) - - # Set up digitization - # These are all in actual 3d individual coordinates, so let's transform them to - # the Neuromag head coordinate frame - trans = get_ras_to_neuromag_trans(fiducial_coords[0], - fiducial_coords[1], - fiducial_coords[2]) - - ###remake montage using the transformed coordinates### - all_coords_trans = apply_trans(trans,all_coords) - all_chan_dict_trans = dict(zip(all_labels,all_coords_trans)) - fiducial_coords_trans = apply_trans(trans,fiducial_coords) - - ###make our montage### - montage_trans = make_dig_montage(ch_pos=all_chan_dict_trans,coord_frame='head', - nasion = fiducial_coords_trans[0], - lpa = fiducial_coords_trans[1], - rpa = fiducial_coords_trans[2]) - - # ###let's fix montage order ### - # for i_chan in range(len(all_coords_trans)): - # montage_trans.dig[i_chan+3]['r'] = all_coords_trans[i_chan] - # montage_trans.ch_names[i_chan] = all_labels[i_chan] - - # Create mne structure ###create info structure### - info = create_info(boxy_labels,srate,ch_types='fnirs_raw') - ###add data type and channel wavelength to info### - info.update(dig=montage_trans.dig, trans=trans) + info = create_info(boxy_labels, srate, ch_types='fnirs_raw') + ###add dig info### + ## this also applies a transform to the data into neuromag space based on fiducials + info.set_montage(my_dig_montage) # Store channel, source, and detector locations # The channel location is stored in the first 3 entries of loc. # The source location is stored in the second 3 entries of loc. # The detector location is stored in the third 3 entries of loc. - # NIRx NIRSite uses MNI coordinates. # Also encode the light frequency in the structure. - + ###place our coordinates and wavelengths for each channel### + # # These are all in actual 3d individual coordinates, so let's transform them to + # # the Neuromag head coordinate frame + trans = get_ras_to_neuromag_trans(fiducial_coords[0], + fiducial_coords[1], + fiducial_coords[2]) + for i_chan in range(len(boxy_labels)-1): temp_chn = apply_trans(trans,boxy_coords[i_chan][0:3]) temp_src = apply_trans(trans,boxy_coords[i_chan][3:6]) @@ -274,7 +247,9 @@ def __init__(self, fname, preload=False, verbose=None): temp_other = np.asarray(boxy_coords[i_chan][9:],dtype=np.float64) info['chs'][i_chan]['loc'] = test = np.concatenate((temp_chn, temp_src, temp_det, temp_other),axis=0) + info['chs'][-1]['loc'] = np.zeros((12,)) + raw_extras = {'source_num': source_num, 'detect_num': detect_num, 'start_line': start_line, diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 93aeee767bd..7d87a69adc8 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -62,13 +62,13 @@ # # These short channels can be seen in the figure above. # # To achieve this we pick all the channels that are not considered to be short. -# picks = mne.pick_types(raw_intensity.info, meg=False, fnirs=True) -# dists = mne.preprocessing.nirs.source_detector_distances( -# raw_intensity.info, picks=picks) -# raw_intensity.pick(picks[dists < 0.06]) -# scalings = dict(fnirs_raw=1e2) -# raw_intensity.plot(n_channels=10, -# duration=1000, scalings=scalings, show_scrollbars=True) +picks = mne.pick_types(raw_intensity.info, meg=False, fnirs=True) +dists = mne.preprocessing.nirs.source_detector_distances( + raw_intensity.info, picks=picks) +raw_intensity.pick(picks[dists < 0.06]) +scalings = dict(fnirs_raw=1e2) +raw_intensity.plot(n_channels=10, + duration=1000, scalings=scalings, show_scrollbars=True) # ############################################################################### From 74d6ebde622490549fffdbf9f551daa474a3f69d Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 4 May 2020 23:25:33 -0700 Subject: [PATCH 070/167] fixed review comments --- mne/io/boxy/boxy.py | 27 ++++++++++----------------- 1 file changed, 10 insertions(+), 17 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index fa5df41a01e..bf3031651c3 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -203,18 +203,15 @@ def __init__(self, fname, preload=False, verbose=None): # add extra column for triggers boxy_labels.append('Markers') + # convert to floats + boxy_coords = np.array(boxy_coords, float) + all_coords = np.array(all_coords, float) + ###make our montage### ###montage only wants channel coords, so need to grab those, convert to### ###array, then make a dict with labels### - for i_chan in range(len(boxy_coords)): - boxy_coords[i_chan] = np.asarray(boxy_coords[i_chan],dtype=np.float64) - - for i_chan in range(len(all_coords)): - all_coords[i_chan] = np.asarray(all_coords[i_chan],dtype=np.float64) - all_chan_dict = dict(zip(all_labels,all_coords)) - ###make our montage### my_dig_montage = make_dig_montage(ch_pos=all_chan_dict, coord_frame='unknown', nasion = fiducial_coords[0], @@ -236,19 +233,15 @@ def __init__(self, fname, preload=False, verbose=None): ###place our coordinates and wavelengths for each channel### # # These are all in actual 3d individual coordinates, so let's transform them to # # the Neuromag head coordinate frame - trans = get_ras_to_neuromag_trans(fiducial_coords[0], + native_head_t = get_ras_to_neuromag_trans(fiducial_coords[0], fiducial_coords[1], fiducial_coords[2]) for i_chan in range(len(boxy_labels)-1): - temp_chn = apply_trans(trans,boxy_coords[i_chan][0:3]) - temp_src = apply_trans(trans,boxy_coords[i_chan][3:6]) - temp_det = apply_trans(trans,boxy_coords[i_chan][6:9]) - temp_other = np.asarray(boxy_coords[i_chan][9:],dtype=np.float64) - info['chs'][i_chan]['loc'] = test = np.concatenate((temp_chn, temp_src, - temp_det, temp_other),axis=0) - - info['chs'][-1]['loc'] = np.zeros((12,)) + temp_ch_src_det = apply_trans(native_head_t, boxy_coords[i_chan][:9].reshape(3, 3)).ravel() + temp_other = np.asarray(boxy_coords[i_chan][9:], dtype=np.float64) # add wavelength and placeholders + info['chs'][i_chan]['loc'] = np.concatenate((temp_ch_src_det, temp_other), axis=0) + info['chs'][-1]['loc'] = np.zeros((12,)) #remove last line? raw_extras = {'source_num': source_num, 'detect_num': detect_num, @@ -262,7 +255,7 @@ def __init__(self, fname, preload=False, verbose=None): print('New first_samps: ', first_samps) diff = end_line-start_line #input file has rows for each source, output variable rearranges as columns and does not - last_samps = start_line + int(diff/source_num)-1 + last_samps = start_line + diff // source_num -1 print('New last_samps: ', last_samps) print('New Difference: ', last_samps-first_samps) From 4ee0ffcfb704b324bd95e2632f7bded7e1039678 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Thu, 7 May 2020 11:00:20 -0700 Subject: [PATCH 071/167] rebase from import to fix pandas use --- mne/io/boxy/boxy.py | 178 +++++++++++++++++++++----------------------- 1 file changed, 85 insertions(+), 93 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index bf3031651c3..71192aa6be2 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -6,7 +6,6 @@ import glob as glob import re as re import os.path as op -import pandas as pd import numpy as np from ..base import BaseRaw @@ -72,15 +71,11 @@ def __init__(self, fname, preload=False, verbose=None): # Parse required header fields ###this keeps track of the line we're on### ###mostly to know the start and stop of data (probably an easier way)### - line_num = 0 ###load and read data to get some meta information### ###there is alot of information at the beginning of a file### ###but this only grabs some of it### - - with open(files['001'],'r') as data: - for i_line in data: - line_num += 1 + for line_num,i_line in enumerate(data,1): if '#DATA ENDS' in i_line: end_line = line_num - 1 break @@ -98,7 +93,7 @@ def __init__(self, fname, preload=False, verbose=None): srate = float(i_line.rsplit(' ')[0]) elif '#DATA BEGINS' in i_line: start_line = line_num - + # Extract source-detectors ###set up some variables### chan_num = [] @@ -169,7 +164,6 @@ def __init__(self, fname, preload=False, verbose=None): chan_index = all_labels.index(i_chan) detect_coords.append(all_coords[chan_index]) - # Generate meaningful channel names ###need to rename labels to make other functions happy### ###get our unique labels for sources and detectors### @@ -246,7 +240,8 @@ def __init__(self, fname, preload=False, verbose=None): raw_extras = {'source_num': source_num, 'detect_num': detect_num, 'start_line': start_line, - 'files': files} + 'end_line': end_line, + 'files': files,} print('Start Line: ', start_line) print('End Line: ', end_line) @@ -270,8 +265,16 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): source_num = self._raw_extras[fi]['source_num'] detect_num = self._raw_extras[fi]['detect_num'] start_line = self._raw_extras[fi]['start_line'] + end_line = self._raw_extras[fi]['end_line'] + boxy_file = self._raw_extras[fi]['files']['001'] + + ###load our data### + boxy_data = [] + with open(boxy_file,'r') as data_file: + for line_num,i_line in enumerate(data_file,1): + if line_num > start_line and line_num <= end_line: + boxy_data.append(i_line.rsplit(' ')) - raw_data = pd.read_csv(self._raw_extras[fi]['files']['001'], skiprows=start_line, sep='\t') ###detectors, sources, and data types### detectors = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', @@ -279,107 +282,96 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): data_types = ['AC','DC','Ph'] sources = np.arange(1,source_num+1,1) - - ###since we can save boxy files in two different styles### - ###this will check to see which style the data is saved### - ###seems to also work with older boxy files### - if 'exmux' in raw_data.columns: - filetype = 'non-parsed' - - ###drop the last line as this is just '#DATA ENDS'### - raw_data = raw_data.drop([len(raw_data)-1]) - - ###store some extra info### - record = raw_data['record'].to_numpy() - exmux = raw_data['exmux'].to_numpy() - - ###make some empty variables to store our data### - raw_ac = np.zeros((detect_num*source_num,int(len(raw_data)/source_num))) - raw_dc = np.zeros((detect_num*source_num,int(len(raw_data)/source_num))) - raw_ph = np.zeros((detect_num*source_num,int(len(raw_data)/source_num))) - else: - filetype = 'parsed' + ###get column names from the first row of our boxy data### + col_names = np.asarray(re.findall('\w+\-\w+|\w+\-\d+|\w+',boxy_data[0][0])) + del boxy_data[0] + + ###sometimes there is an empty line before our data starts### + ###this should remove them### + while re.findall('[-+]?\d*\.?\d+',boxy_data[0][0]) == []: + del boxy_data[0] - ###drop the last line as this is just '#DATA ENDS'### - ###also drop the first line since this is empty### - raw_data = raw_data.drop([0,len(raw_data)-1]) + ###grba the individual data points for each column### + boxy_data = [re.findall('[-+]?\d*\.?\d+',i_row[0]) for i_row in boxy_data] - ###make some empty variables to store our data### - raw_ac = np.zeros(((detect_num*source_num),len(raw_data))) - raw_dc = np.zeros(((detect_num*source_num),len(raw_data))) - raw_ph = np.zeros(((detect_num*source_num),len(raw_data))) - - ###store some extra data, might not need these though### - time = raw_data['time'].to_numpy() if 'time' in raw_data.columns else [] - time = raw_data['time'].to_numpy() if 'time' in raw_data.columns else [] - group = raw_data['group'].to_numpy() if 'group' in raw_data.columns else [] - step = raw_data['step'].to_numpy() if 'step' in raw_data.columns else [] - mark = raw_data['mark'].to_numpy() if 'mark' in raw_data.columns else [] - flag = raw_data['flag'].to_numpy() if 'flag' in raw_data.columns else [] - aux1 = raw_data['aux-1'].to_numpy() if 'aux-1' in raw_data.columns else [] - digaux = raw_data['digaux'].to_numpy() if 'digaux' in raw_data.columns else [] - bias = np.zeros((detect_num,len(raw_data))) - - ###loop through detectors### + ###make variable to store our data as an array rather than list of strings### + boxy_length = len(boxy_data[0]) + boxy_array = np.full((len(boxy_data),boxy_length),np.nan) + for ii, i_data in enumerate(boxy_data): + ###need to make sure our rows are the same length### + ###this is done by padding the shorter ones### + padding = boxy_length - len(i_data) + boxy_array[ii] = np.pad(np.asarray(i_data, dtype=float), (0,padding), mode='empty') + + ###grab data from the other columns that don't pertain to AC, DC, or Ph### + meta_data = dict() + keys = ['time','record','group','exmux','step','mark','flag','aux1','digaux'] for i_detect in detectors[0:detect_num]: - - ###older boxy files don't seem to keep track of detector bias### - ###probably due to specific boxy settings actually### - if 'bias-A' in raw_data.columns: - bias[detectors.index(i_detect),:] = raw_data['bias-' + i_detect].to_numpy() - - ###loop through data types### - for i_data in data_types: + keys.append('bias-' + i_detect) + + ###data that isn't in our boxy file will be an empty list### + for key in keys: + meta_data[key] = (boxy_array[:,np.where(col_names == key)[0][0]] if + key in col_names else []) + + ###determine what kind of boxy file we have### + filetype = 'non-parsed' if type(meta_data['exmux']) is not list else 'parsed' + + ###make some empty variables to store our data### + if filetype == 'non-parsed': + data_ = np.zeros(((((detect_num*source_num)*3)+1), + int(len(boxy_data)/source_num))) + elif filetype == 'parsed': + data_ = np.zeros(((((detect_num*source_num)*3)+1), + int(len(boxy_data)))) + + ###loop through data types### + for i_data in data_types: + + ###loop through detectors### + for i_detect in detectors[0:detect_num]: + ###loop through sources### - for i_source in sources: - ###where to store our data### - index_loc = detectors.index(i_detect)*source_num + (i_source-1) + for i_source in sources: + + ###determine where to store our data### + index_loc = (detectors.index(i_detect)*source_num + + (i_source-1) + (data_types.index(i_data)*(source_num*detect_num))) + ###need to treat our filetypes differently### if filetype == 'non-parsed': - ###filetype saves timepoints in groups### + ###non-parsed saves timepoints in groups### ###this should account for that### - time_points = np.arange(i_source-1,int(record[-1])*source_num,source_num) + time_points = np.arange(i_source-1,int(meta_data['record'][-1])*source_num,source_num) - ###determine which channel to look for### - channel = i_detect + '-' + i_data + ###determine which channel to look for in boxy_array### + channel = np.where(col_names == i_detect + '-' + i_data)[0][0] ###save our data based on data type### - if data_types.index(i_data) == 0: - raw_ac[index_loc,:] = raw_data[channel][time_points].to_numpy() - elif data_types.index(i_data) == 1: - raw_dc[index_loc,:] = raw_data[channel][time_points].to_numpy() - elif data_types.index(i_data) == 2: - raw_ph[index_loc,:] = raw_data[channel][time_points].to_numpy() - elif filetype == 'parsed': - ###determine which channel to look for### - channel = i_detect + '-' + i_data + str(i_source) + data_[index_loc,:] = boxy_array[time_points,channel] + + elif filetype == 'parsed': + + ###determine which channel to look for in boxy_array### + channel = np.where(col_names == i_detect + '-' + + i_data + str(i_source))[0][0] ###save our data based on data type### - if data_types.index(i_data) == 0: - raw_ac[index_loc,:] = raw_data[channel].to_numpy() - elif data_types.index(i_data) == 1: - raw_dc[index_loc,:] = raw_data[channel].to_numpy() - elif data_types.index(i_data) == 2: - raw_ph[index_loc,:] = raw_data[channel].to_numpy() - - ###now combine our data types into a single array with the data### - data_ = np.append(raw_ac, np.append(raw_dc, raw_ph, axis=0),axis=0) + data_[index_loc,:] = boxy_array[:,channel] # Read triggers from event file ###add our markers to the data array based on filetype### - if filetype == 'non-parsed': - if type(digaux) is list and digaux != []: - markers = digaux[np.arange(0,len(digaux),source_num)] - else: - markers = np.zeros(np.size(data_,axis=1)) - elif filetype == 'parsed': - markers = digaux + if type(meta_data['digaux']) is not list: + if filetype == 'non-parsed': + markers = meta_data['digaux'][np.arange(0,len(meta_data['digaux']),source_num)] + elif filetype == 'parsed': + markers = meta_data['digaux'] + data_[-1,:] = markers print('Blank Data shape: ', data.shape) - temp = np.vstack((data_, markers)) - print('Input Data shape: ',temp.shape) + print('Input Data shape: ', data_.shape) # place our data into the data object in place - data[:] = np.vstack((data_, markers)) + data[:] = data_ return data From 788e06ac780949a0977039ebfbecbde34ba078ce Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Thu, 7 May 2020 13:29:53 -0600 Subject: [PATCH 072/167] filetype now determined in RawBOXY and passed to _read_segment_file. Changed subjects_dir to use fetch_fsaverage as I was getting errors about surfaces or fiducial files missing --- mne/io/boxy/boxy.py | 9 ++++++--- tutorials/preprocessing/plot_80_boxy_processing.py | 2 +- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 71192aa6be2..16ddce66763 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -7,6 +7,7 @@ import re as re import os.path as op import numpy as np +import pdb from ..base import BaseRaw from ..constants import FIFF @@ -74,6 +75,7 @@ def __init__(self, fname, preload=False, verbose=None): ###load and read data to get some meta information### ###there is alot of information at the beginning of a file### ###but this only grabs some of it### + filetype = 'parsed' with open(files['001'],'r') as data: for line_num,i_line in enumerate(data,1): if '#DATA ENDS' in i_line: @@ -93,6 +95,8 @@ def __init__(self, fname, preload=False, verbose=None): srate = float(i_line.rsplit(' ')[0]) elif '#DATA BEGINS' in i_line: start_line = line_num + elif 'exmux' in i_line: + filetype = 'non-parsed' # Extract source-detectors ###set up some variables### @@ -241,6 +245,7 @@ def __init__(self, fname, preload=False, verbose=None): 'detect_num': detect_num, 'start_line': start_line, 'end_line': end_line, + 'filetype': filetype, 'files': files,} print('Start Line: ', start_line) @@ -266,6 +271,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): detect_num = self._raw_extras[fi]['detect_num'] start_line = self._raw_extras[fi]['start_line'] end_line = self._raw_extras[fi]['end_line'] + filetype = self._raw_extras[fi]['filetype'] boxy_file = self._raw_extras[fi]['files']['001'] ###load our data### @@ -313,9 +319,6 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): for key in keys: meta_data[key] = (boxy_array[:,np.where(col_names == key)[0][0]] if key in col_names else []) - - ###determine what kind of boxy file we have### - filetype = 'non-parsed' if type(meta_data['exmux']) is not list else 'parsed' ###make some empty variables to store our data### if filetype == 'non-parsed': diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 7d87a69adc8..35a346538fc 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -37,7 +37,7 @@ # # between the optodes, channels (the mid point of source-detector pairs) are # # shown as dots. -subjects_dir = mne.datasets.sample.data_path() + '/subjects' +subjects_dir = os.path.dirname(mne.datasets.fetch_fsaverage()) fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') fig = mne.viz.plot_alignment(raw_intensity.info, From a37a2e739dd9055812a279e5ede0cf2c645e6819 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Thu, 7 May 2020 15:00:40 -0700 Subject: [PATCH 073/167] removed pdb --- mne/io/boxy/boxy.py | 1 - 1 file changed, 1 deletion(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 16ddce66763..203249b136f 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -7,7 +7,6 @@ import re as re import os.path as op import numpy as np -import pdb from ..base import BaseRaw from ..constants import FIFF From 8589a6c1e9749714c69e56991b0860202f51e025 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Thu, 7 May 2020 15:24:57 -0700 Subject: [PATCH 074/167] new data file with more files --- mne/datasets/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py index d171dea4413..93623a52c30 100644 --- a/mne/datasets/utils.py +++ b/mne/datasets/utils.py @@ -263,7 +263,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, testing='https://codeload.github.com/mne-tools/mne-testing-data/' 'tar.gz/%s' % releases['testing'], multimodal='https://ndownloader.figshare.com/files/5999598', - fnirs_motor='https://osf.io/dj3eh/download?version=1', + fnirs_motor='https://osf.io/dj3eh/download?version=3', boxy_example='https://osf.io/hksme/download?version=1', opm='https://osf.io/p6ae7/download?version=2', visual_92_categories=[ @@ -328,7 +328,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, testing='1ef691944239411b869b3ed2f40a69fe', multimodal='26ec847ae9ab80f58f204d09e2c08367', fnirs_motor='c4935d19ddab35422a69f3326a01fef8', - boxy_example='b3793334548b7ba04c1b767c66117414', + boxy_example='0d2a6525a3fc00d010dcb9488bbefa17', opm='370ad1dcfd5c47e029e692c85358a374', visual_92_categories=['74f50bbeb65740903eadc229c9fa759f', '203410a98afc9df9ae8ba9f933370e20'], From 85d348254a4ffd3a2d5e469ca412188e49111df0 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Fri, 8 May 2020 00:44:10 -0700 Subject: [PATCH 075/167] tested new dataset, added four new keys for four montages, and testing loading in each one in place of 001 --- mne/datasets/utils.py | 4 ++-- mne/io/boxy/boxy.py | 7 ++++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py index 93623a52c30..ef25fd07bb8 100644 --- a/mne/datasets/utils.py +++ b/mne/datasets/utils.py @@ -263,8 +263,8 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, testing='https://codeload.github.com/mne-tools/mne-testing-data/' 'tar.gz/%s' % releases['testing'], multimodal='https://ndownloader.figshare.com/files/5999598', - fnirs_motor='https://osf.io/dj3eh/download?version=3', - boxy_example='https://osf.io/hksme/download?version=1', + fnirs_motor='https://osf.io/dj3eh/download?version=1', + boxy_example='https://osf.io/hksme/download?version=3', opm='https://osf.io/p6ae7/download?version=2', visual_92_categories=[ 'https://osf.io/8ejrs/download?version=1', diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 203249b136f..aeb9411c7f5 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -59,7 +59,8 @@ def __init__(self, fname, preload=False, verbose=None): # Check if required files exist and store names for later use files = dict() - keys = ('mtg', 'elp', 'tol', '001') + keys = ('mtg', 'elp', 'tol', 'a.001', 'b.001', 'c.001', 'd.001') + print(fname) for key in keys: files[key] = glob.glob('%s/*%s' % (fname, key)) if len(files[key]) != 1: @@ -75,7 +76,7 @@ def __init__(self, fname, preload=False, verbose=None): ###there is alot of information at the beginning of a file### ###but this only grabs some of it### filetype = 'parsed' - with open(files['001'],'r') as data: + with open(files['d.001'],'r') as data: for line_num,i_line in enumerate(data,1): if '#DATA ENDS' in i_line: end_line = line_num - 1 @@ -271,7 +272,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): start_line = self._raw_extras[fi]['start_line'] end_line = self._raw_extras[fi]['end_line'] filetype = self._raw_extras[fi]['filetype'] - boxy_file = self._raw_extras[fi]['files']['001'] + boxy_file = self._raw_extras[fi]['files']['d.001'] ###load our data### boxy_data = [] From bef2e3f3adaa41eb4c92afcd5d1015ac7e31bfab Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Fri, 8 May 2020 00:50:01 -0700 Subject: [PATCH 076/167] pulling in source detector plotting temporarily --- mne/defaults.py | 7 ++++--- mne/viz/_3d.py | 15 +++++++++++---- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/mne/defaults.py b/mne/defaults.py index c43f8166609..f23f046ad62 100644 --- a/mne/defaults.py +++ b/mne/defaults.py @@ -57,6 +57,8 @@ ecog_scale=5e-3, seeg_scale=5e-3, fnirs_scale=5e-3, + source_scale=5e-3, + detector_scale=5e-3, hpi_scale=15e-3, head_color=(0.988, 0.89, 0.74), @@ -66,6 +68,8 @@ ecog_color=(1., 1., 1.), seeg_color=(1., 1., .3), fnirs_color=(1., .4, .3), + source_color=(1., .2, .3), + detector_color=(.2, 0., 0.), lpa_color=(1., 0., 0.), nasion_color=(0., 1., 0.), rpa_color=(0., 0., 1.), @@ -81,11 +85,8 @@ def _handle_default(k, v=None): """Avoid dicts as default keyword arguments. - Use this function instead to resolve default dict values. Example usage:: - scalings = _handle_default('scalings', scalings) - """ this_mapping = deepcopy(DEFAULTS[k]) if v is not None: diff --git a/mne/viz/_3d.py b/mne/viz/_3d.py index ba912190217..479dcacbc69 100644 --- a/mne/viz/_3d.py +++ b/mne/viz/_3d.py @@ -735,6 +735,7 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, other_keys = sorted(other_bools.keys()) other_picks = {key: pick_types(info, meg=False, ref_meg=False, **{key: True}) for key in other_keys} + if trans == 'auto': # let's try to do this in MRI coordinates so they're easy to plot subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) @@ -1038,10 +1039,16 @@ def plot_alignment(info=None, trans=None, subject=None, subjects_dir=None, del dig for key, picks in other_picks.items(): if other_bools[key] and len(picks): - other_loc[key] = np.array([info['chs'][pick]['loc'][:3] - for pick in picks]) - logger.info('Plotting %d %s location%s' - % (len(other_loc[key]), key, _pl(other_loc[key]))) + if key == 'fnirs': + # other_loc[key] = np.array([info['chs'][pick]['loc'][:3] + # for pick in picks]) + other_loc['source'] = np.array([info['chs'][pick]['loc'][3:6] + for pick in picks]) + other_loc['detector'] = np.array([info['chs'][pick]['loc'][6:9] + for pick in picks]) + other_keys = sorted(other_loc.keys()) + logger.info('Plotting %d %s location%s' + % (len(other_loc[key]), key, _pl(other_loc[key]))) # initialize figure renderer = _get_renderer(fig, bgcolor=(0.5, 0.5, 0.5), size=(800, 800)) From c6b14af15c64b37f456d20e0f36dbf93fbec4ebc Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Fri, 8 May 2020 16:56:06 -0600 Subject: [PATCH 077/167] WIP trying to load multiple boxy files --- mne/io/boxy/boxy.py | 170 +++++++++++++++++++++++++++++--------------- 1 file changed, 112 insertions(+), 58 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index aeb9411c7f5..24a1764c53b 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -50,24 +50,37 @@ class RawBOXY(BaseRaw): -------- mne.io.Raw : Documentation of attribute and methods. """ - + @verbose def __init__(self, fname, preload=False, verbose=None): from ...externals.pymatreader import read_mat from ...coreg import get_mni_fiducials, coregister_fiducials # avoid circular import prob logger.info('Loading %s' % fname) - + import pdb # Check if required files exist and store names for later use files = dict() - keys = ('mtg', 'elp', 'tol', 'a.001', 'b.001', 'c.001', 'd.001') + keys = ('mtg', 'elp', 'tol', '*.[000-999]*') print(fname) for key in keys: - files[key] = glob.glob('%s/*%s' % (fname, key)) + if key == '*.[000-999]*': + files[key] = [glob.glob('%s/*%s' % (fname, key))] + else: + files[key] = glob.glob('%s/*%s' % (fname, key)) if len(files[key]) != 1: raise RuntimeError('Expect one %s file, got %d' % (key, len(files[key]),)) files[key] = files[key][0] - + + ###determine how many blocks we have per montage### + blk_names = [] + mtg_names = [] + mtgs = re.findall('\w\.\d+',str(files['*.[000-999]*'])) + [mtg_names.append(i_mtg[0]) for i_mtg in mtgs if i_mtg[0] not in mtg_names] + for i_mtg in mtg_names: + temp = [] + [temp.append(ii_mtg[2:]) for ii_mtg in mtgs if ii_mtg[0] == i_mtg] + blk_names.append(temp) + # Read header file # Parse required header fields ###this keeps track of the line we're on### @@ -75,32 +88,41 @@ def __init__(self, fname, preload=False, verbose=None): ###load and read data to get some meta information### ###there is alot of information at the beginning of a file### ###but this only grabs some of it### - filetype = 'parsed' - with open(files['d.001'],'r') as data: - for line_num,i_line in enumerate(data,1): - if '#DATA ENDS' in i_line: - end_line = line_num - 1 - break - if 'Detector Channels' in i_line: - detect_num = int(i_line.rsplit(' ')[0]) - elif 'External MUX Channels' in i_line: - source_num = int(i_line.rsplit(' ')[0]) - elif 'Auxiliary Channels' in i_line: - aux_num = int(i_line.rsplit(' ')[0]) - elif 'Waveform (CCF) Frequency (Hz)' in i_line: - ccf_ha = float(i_line.rsplit(' ')[0]) - elif 'Update Rate (Hz)' in i_line: - srate = float(i_line.rsplit(' ')[0]) - elif 'Updata Rate (Hz)' in i_line: - srate = float(i_line.rsplit(' ')[0]) - elif '#DATA BEGINS' in i_line: - start_line = line_num - elif 'exmux' in i_line: - filetype = 'non-parsed' + detect_num = [] + source_num = [] + aux_num = [] + ccf_ha = [] + srate = [] + start_line = [] + end_line = [] + filetype = ['parsed' for i_file in files['*.[000-999]*']] + for file_num,i_file in enumerate(files['*.[000-999]*'],0): + with open(i_file,'r') as data: + for line_num,i_line in enumerate(data,1): + if '#DATA ENDS' in i_line: + end_line.append(line_num - 1) + break + if 'Detector Channels' in i_line: + detect_num.append(int(i_line.rsplit(' ')[0])) + elif 'External MUX Channels' in i_line: + source_num.append(int(i_line.rsplit(' ')[0])) + elif 'Auxiliary Channels' in i_line: + aux_num.append(int(i_line.rsplit(' ')[0])) + elif 'Waveform (CCF) Frequency (Hz)' in i_line: + ccf_ha.append(float(i_line.rsplit(' ')[0])) + elif 'Update Rate (Hz)' in i_line: + srate.append(float(i_line.rsplit(' ')[0])) + elif 'Updata Rate (Hz)' in i_line: + srate.append(float(i_line.rsplit(' ')[0])) + elif '#DATA BEGINS' in i_line: + start_line.append(line_num) + elif 'exmux' in i_line: + filetype[file_num] = 'non-parsed' # Extract source-detectors ###set up some variables### - chan_num = [] + chan_num_1 = [] + chan_num_2 = [] source_label = [] detect_label = [] chan_wavelength = [] @@ -108,15 +130,17 @@ def __init__(self, fname, preload=False, verbose=None): ###load and read each line of the .mtg file### with open(files['mtg'],'r') as data: - for i_ignore in range(2): - next(data) - for i_line in data: - chan1, chan2, source, detector, wavelength, modulation = i_line.split() - chan_num.append(chan1) - source_label.append(source) - detect_label.append(detector) - chan_wavelength.append(wavelength) - chan_modulation.append(modulation) + for line_num, i_line in enumerate(data,1): + if line_num == 2: + mtg_chan_num = [int(num) for num in i_line.split()] + elif line_num > 2: + chan1, chan2, source, detector, wavelength, modulation = i_line.split() + chan_num_1.append(chan1) + chan_num_2.append(chan2) + source_label.append(source) + detect_label.append(detector) + chan_wavelength.append(wavelength) + chan_modulation.append(modulation) # Read information about probe/montage/optodes # A word on terminology used here: @@ -154,28 +178,34 @@ def __init__(self, fname, preload=False, verbose=None): for i_index in range(3): fiducial_coords[i_index] = np.asarray([float(x) for x in fiducial_coords[i_index]]) - ###get coordinates for sources### + ###get coordinates for sources in .mtg file from .elp file### source_coords = [] for i_chan in source_label: if i_chan in all_labels: chan_index = all_labels.index(i_chan) source_coords.append(all_coords[chan_index]) - ###get coordinates for detectors### + ###get coordinates for detectors in .mtg file from .elp file### detect_coords = [] for i_chan in detect_label: if i_chan in all_labels: chan_index = all_labels.index(i_chan) detect_coords.append(all_coords[chan_index]) - # Generate meaningful channel names - ###need to rename labels to make other functions happy### - ###get our unique labels for sources and detectors### + # Generate meaningful channel names for each montage + ###get our unique labels for sources and detectors for each montage### unique_source_labels = [] unique_detect_labels = [] - [unique_source_labels.append(label) for label in source_label if label not in unique_source_labels] - [unique_detect_labels.append(label) for label in detect_label if label not in unique_detect_labels] - + for mtg_num, i_mtg in enumerate(mtg_chan_num,0): + mtg_source_labels = [] + mtg_detect_labels = [] + start = int(np.sum(mtg_chan_num[:mtg_num])) + end = int(np.sum(mtg_chan_num[:mtg_num+1])) + [mtg_source_labels.append(label) for label in source_label[start:end] if label not in mtg_source_labels] + [mtg_detect_labels.append(label) for label in detect_label[start:end] if label not in mtg_detect_labels] + unique_source_labels.append(mtg_source_labels) + unique_detect_labels.append(mtg_detect_labels) + ###now let's label each channel in our data### ###data is channels X timepoint where the first source_num rows correspond to### ###the first detector, and each row within that group is a different source### @@ -186,18 +216,41 @@ def __init__(self, fname, preload=False, verbose=None): boxy_coords = [] boxy_labels = [] data_types = ['AC','DC','Ph'] - total_chans = detect_num*source_num - for i_type in data_types: - for i_coord in range(len(source_coords[0:total_chans])): - boxy_coords.append(np.mean( - np.vstack((source_coords[i_coord], detect_coords[i_coord])), - axis=0).tolist() + source_coords[i_coord] + - detect_coords[i_coord] + [chan_wavelength[i_coord]] + [0] + [0]) - boxy_labels.append('S' + - str(unique_source_labels.index(source_label[i_coord])+1) - + '_D' + - str(unique_detect_labels.index(detect_label[i_coord])+1) - + ' ' + chan_wavelength[i_coord] + ' ' + i_type) + temp_boxy_coords = [] + temp_boxy_labels = [] + blk_num = [len(blk) for blk in blk_names] + for mtg_num, i_mtg in enumerate(mtg_chan_num,0): + temp_coords = [] + temp_labels = [] + start = int(np.sum(mtg_chan_num[:mtg_num])) + end = int(np.sum(mtg_chan_num[:mtg_num+1])) + start_blk = int(np.sum(blk_num[:mtg_num])) + total_chans = detect_num[start_blk]*source_num[start_blk] + for i_type in data_types: + for i_coord in range(start,end): + temp_coords.append(np.mean( + np.vstack((source_coords[i_coord], detect_coords[i_coord])), + axis=0).tolist() + source_coords[i_coord] + + detect_coords[i_coord] + [chan_wavelength[i_coord]] + [0] + [0]) + temp_labels.append('S' + + str(unique_source_labels[mtg_num].index(source_label[i_coord])+1) + + '_D' + + str(unique_detect_labels[mtg_num].index(detect_label[i_coord])+1) + + ' ' + chan_wavelength[i_coord] + ' ' + i_type) + boxy_coords.append(temp_coords[-1]) + boxy_labels.append(temp_labels[-1] + + mtg_names[mtg_num]) + # ###add a channel for markers### + ###this makes separate lists for each montage### + ###maybe we don't want that### + ###maybe we want one big list, like with lines 240-242### + ###if you uncomment lines 248-252, comment out line 256 and 258### + # temp_coords.append(np.zeros((12,)).tolist()) + # temp_coords = np.array(temp_coords, float) + # temp_labels.append('Markers') + # boxy_coords.append(temp_coords) + # boxy_labels.append(temp_labels) + # pdb.set_trace() # add extra column for triggers boxy_labels.append('Markers') @@ -217,7 +270,7 @@ def __init__(self, fname, preload=False, verbose=None): rpa = fiducial_coords[2]) ###create info structure### - info = create_info(boxy_labels, srate, ch_types='fnirs_raw') + info = create_info(boxy_labels, srate[0], ch_types='fnirs_raw') ###add dig info### ## this also applies a transform to the data into neuromag space based on fiducials info.set_montage(my_dig_montage) @@ -240,6 +293,7 @@ def __init__(self, fname, preload=False, verbose=None): temp_other = np.asarray(boxy_coords[i_chan][9:], dtype=np.float64) # add wavelength and placeholders info['chs'][i_chan]['loc'] = np.concatenate((temp_ch_src_det, temp_other), axis=0) info['chs'][-1]['loc'] = np.zeros((12,)) #remove last line? + # pdb.set_trace() raw_extras = {'source_num': source_num, 'detect_num': detect_num, From 007dfbae398aceef177df57036feaa17adf9a6bc Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Mon, 11 May 2020 17:00:05 -0600 Subject: [PATCH 078/167] seems to be working...will find multiple files and combine all the data into a large array, with marker info added to the end from each file --- mne/io/boxy/boxy.py | 299 ++++++++++++++++++++++++-------------------- 1 file changed, 161 insertions(+), 138 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 24a1764c53b..5a1112b3219 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -56,7 +56,7 @@ def __init__(self, fname, preload=False, verbose=None): from ...externals.pymatreader import read_mat from ...coreg import get_mni_fiducials, coregister_fiducials # avoid circular import prob logger.info('Loading %s' % fname) - import pdb + # import pdb # Check if required files exist and store names for later use files = dict() keys = ('mtg', 'elp', 'tol', '*.[000-999]*') @@ -79,7 +79,7 @@ def __init__(self, fname, preload=False, verbose=None): for i_mtg in mtg_names: temp = [] [temp.append(ii_mtg[2:]) for ii_mtg in mtgs if ii_mtg[0] == i_mtg] - blk_names.append(temp) + blk_names.append(temp) # Read header file # Parse required header fields @@ -118,7 +118,7 @@ def __init__(self, fname, preload=False, verbose=None): start_line.append(line_num) elif 'exmux' in i_line: filetype[file_num] = 'non-parsed' - + # Extract source-detectors ###set up some variables### chan_num_1 = [] @@ -141,7 +141,7 @@ def __init__(self, fname, preload=False, verbose=None): detect_label.append(detector) chan_wavelength.append(wavelength) chan_modulation.append(modulation) - + # Read information about probe/montage/optodes # A word on terminology used here: # Sources produce light @@ -215,45 +215,46 @@ def __init__(self, fname, preload=False, verbose=None): ###will label them based on ac, dc, and ph data### boxy_coords = [] boxy_labels = [] + mrk_coords = [] + mrk_labels = [] data_types = ['AC','DC','Ph'] - temp_boxy_coords = [] - temp_boxy_labels = [] + mtg_start = [] + mtg_end = [] + mtg_src_num = [] + mtg_det_num = [] blk_num = [len(blk) for blk in blk_names] for mtg_num, i_mtg in enumerate(mtg_chan_num,0): - temp_coords = [] - temp_labels = [] start = int(np.sum(mtg_chan_num[:mtg_num])) end = int(np.sum(mtg_chan_num[:mtg_num+1])) + ###we will also organise some data for each montage### start_blk = int(np.sum(blk_num[:mtg_num])) - total_chans = detect_num[start_blk]*source_num[start_blk] - for i_type in data_types: - for i_coord in range(start,end): - temp_coords.append(np.mean( - np.vstack((source_coords[i_coord], detect_coords[i_coord])), - axis=0).tolist() + source_coords[i_coord] + - detect_coords[i_coord] + [chan_wavelength[i_coord]] + [0] + [0]) - temp_labels.append('S' + - str(unique_source_labels[mtg_num].index(source_label[i_coord])+1) - + '_D' + - str(unique_detect_labels[mtg_num].index(detect_label[i_coord])+1) - + ' ' + chan_wavelength[i_coord] + ' ' + i_type) - boxy_coords.append(temp_coords[-1]) - boxy_labels.append(temp_labels[-1] + - mtg_names[mtg_num]) - # ###add a channel for markers### - ###this makes separate lists for each montage### - ###maybe we don't want that### - ###maybe we want one big list, like with lines 240-242### - ###if you uncomment lines 248-252, comment out line 256 and 258### - # temp_coords.append(np.zeros((12,)).tolist()) - # temp_coords = np.array(temp_coords, float) - # temp_labels.append('Markers') - # boxy_coords.append(temp_coords) - # boxy_labels.append(temp_labels) - # pdb.set_trace() + ###get stop and stop lines for each montage### + mtg_start.append(start_line[start_blk]) + mtg_end.append(end_line[start_blk]) + ###get source and detector numbers for each montage### + mtg_src_num.append(source_num[start_blk]) + mtg_det_num.append(detect_num[start_blk]) + for i_blk in blk_names[mtg_num]: + for i_type in data_types: + for i_coord in range(start,end): + boxy_coords.append(np.mean( + np.vstack((source_coords[i_coord], detect_coords[i_coord])), + axis=0).tolist() + source_coords[i_coord] + + detect_coords[i_coord] + [chan_wavelength[i_coord]] + [0] + [0]) + boxy_labels.append('S' + + str(unique_source_labels[mtg_num].index(source_label[i_coord])+1) + + '_D' + + str(unique_detect_labels[mtg_num].index(detect_label[i_coord])+1) + + ' ' + chan_wavelength[i_coord] + i_type[0] + mtg_names[mtg_num] + i_blk[1:]) + + # add extra column for triggers + mrk_labels.append('Markers' + ' ' + mtg_names[mtg_num] + i_blk[1:]) + mrk_coords.append(np.zeros((12,))) + + ###add triggers to the end of our data### + boxy_labels.extend(mrk_labels) + boxy_coords.extend(mrk_coords) - # add extra column for triggers - boxy_labels.append('Markers') # convert to floats boxy_coords = np.array(boxy_coords, float) all_coords = np.array(all_coords, float) @@ -288,31 +289,37 @@ def __init__(self, fname, preload=False, verbose=None): fiducial_coords[1], fiducial_coords[2]) - for i_chan in range(len(boxy_labels)-1): + for i_chan in range(len(boxy_labels)): temp_ch_src_det = apply_trans(native_head_t, boxy_coords[i_chan][:9].reshape(3, 3)).ravel() temp_other = np.asarray(boxy_coords[i_chan][9:], dtype=np.float64) # add wavelength and placeholders - info['chs'][i_chan]['loc'] = np.concatenate((temp_ch_src_det, temp_other), axis=0) - info['chs'][-1]['loc'] = np.zeros((12,)) #remove last line? - # pdb.set_trace() - + info['chs'][i_chan]['loc'] = np.concatenate((temp_ch_src_det, temp_other), axis=0) + raw_extras = {'source_num': source_num, 'detect_num': detect_num, 'start_line': start_line, 'end_line': end_line, 'filetype': filetype, 'files': files,} - - print('Start Line: ', start_line) - print('End Line: ', end_line) - print('Original Difference: ', end_line-start_line) - first_samps = start_line + + print('Start Line: ', start_line[0]) + print('End Line: ', end_line[0]) + # print('Original Difference: ', end_line-start_line) + first_samps = start_line[0] print('New first_samps: ', first_samps) - diff = end_line-start_line + diff = end_line[0] - start_line[0] + # diff = [end_line[i_line] - start_line[i_line] for i_line in range(len(end_line))] #input file has rows for each source, output variable rearranges as columns and does not - last_samps = start_line + diff // source_num -1 + if filetype[0] == 'non-parsed': + last_samps = ((diff-2) // (source_num[0])) + start_line[0] - 1 + elif filetype =='parsed': + last_samps = (start_line[0] + diff) + # last_samps = [((start_line[i_line] + diff[i_line]) // (source_num[i_line] -1)) + # for i_line in range(len(start_line))] print('New last_samps: ', last_samps) print('New Difference: ', last_samps-first_samps) - + # print('New Difference: ', [str(last_samps[i_line]-first_samps[i_line]) + # for i_line in range(len(last_samps))]) + # pdb.set_trace() super(RawBOXY, self).__init__( info, preload, filenames=[fname], first_samps=[first_samps], last_samps=[last_samps], @@ -321,114 +328,130 @@ def __init__(self, fname, preload=False, verbose=None): def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a segment of data from a file. """ + # import pdb + # pdb.set_trace() source_num = self._raw_extras[fi]['source_num'] detect_num = self._raw_extras[fi]['detect_num'] start_line = self._raw_extras[fi]['start_line'] end_line = self._raw_extras[fi]['end_line'] filetype = self._raw_extras[fi]['filetype'] - boxy_file = self._raw_extras[fi]['files']['d.001'] + boxy_files = self._raw_extras[fi]['files']['*.[000-999]*'] - ###load our data### - boxy_data = [] - with open(boxy_file,'r') as data_file: - for line_num,i_line in enumerate(data_file,1): - if line_num > start_line and line_num <= end_line: - boxy_data.append(i_line.rsplit(' ')) - ###detectors, sources, and data types### detectors = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'] data_types = ['AC','DC','Ph'] - sources = np.arange(1,source_num+1,1) - - ###get column names from the first row of our boxy data### - col_names = np.asarray(re.findall('\w+\-\w+|\w+\-\d+|\w+',boxy_data[0][0])) - del boxy_data[0] - ###sometimes there is an empty line before our data starts### - ###this should remove them### - while re.findall('[-+]?\d*\.?\d+',boxy_data[0][0]) == []: + ###load our data### + all_data = [] + markers = [] + for file_num, boxy_file in enumerate(boxy_files): + boxy_data = [] + with open(boxy_file,'r') as data_file: + for line_num, i_line in enumerate(data_file,1): + if line_num > start_line[file_num] and line_num <= end_line[file_num]: + boxy_data.append(i_line.rsplit(' ')) + + sources = np.arange(1,source_num[file_num]+1,1) + + ###get column names from the first row of our boxy data### + col_names = np.asarray(re.findall('\w+\-\w+|\w+\-\d+|\w+',boxy_data[0][0])) del boxy_data[0] - ###grba the individual data points for each column### - boxy_data = [re.findall('[-+]?\d*\.?\d+',i_row[0]) for i_row in boxy_data] + ###sometimes there is an empty line before our data starts### + ###this should remove them### + while re.findall('[-+]?\d*\.?\d+',boxy_data[0][0]) == []: + del boxy_data[0] + + ###grab the individual data points for each column### + boxy_data = [re.findall('[-+]?\d*\.?\d+',i_row[0]) for i_row in boxy_data] + + ###make variable to store our data as an array rather than list of strings### + boxy_length = len(col_names) + boxy_array = np.full((len(boxy_data),boxy_length),np.nan) + for ii, i_data in enumerate(boxy_data): + ###need to make sure our rows are the same length### + ###this is done by padding the shorter ones### + padding = boxy_length - len(i_data) + boxy_array[ii] = np.pad(np.asarray(i_data, dtype=float), (0,padding), mode='empty') + + ###grab data from the other columns that don't pertain to AC, DC, or Ph### + meta_data = dict() + keys = ['time','record','group','exmux','step','mark','flag','aux1','digaux'] + for i_detect in detectors[0:detect_num[file_num]]: + keys.append('bias-' + i_detect) - ###make variable to store our data as an array rather than list of strings### - boxy_length = len(boxy_data[0]) - boxy_array = np.full((len(boxy_data),boxy_length),np.nan) - for ii, i_data in enumerate(boxy_data): - ###need to make sure our rows are the same length### - ###this is done by padding the shorter ones### - padding = boxy_length - len(i_data) - boxy_array[ii] = np.pad(np.asarray(i_data, dtype=float), (0,padding), mode='empty') - - ###grab data from the other columns that don't pertain to AC, DC, or Ph### - meta_data = dict() - keys = ['time','record','group','exmux','step','mark','flag','aux1','digaux'] - for i_detect in detectors[0:detect_num]: - keys.append('bias-' + i_detect) - - ###data that isn't in our boxy file will be an empty list### - for key in keys: - meta_data[key] = (boxy_array[:,np.where(col_names == key)[0][0]] if - key in col_names else []) - - ###make some empty variables to store our data### - if filetype == 'non-parsed': - data_ = np.zeros(((((detect_num*source_num)*3)+1), - int(len(boxy_data)/source_num))) - elif filetype == 'parsed': - data_ = np.zeros(((((detect_num*source_num)*3)+1), - int(len(boxy_data)))) - - ###loop through data types### - for i_data in data_types: - - ###loop through detectors### - for i_detect in detectors[0:detect_num]: - - ###loop through sources### - for i_source in sources: - - ###determine where to store our data### - index_loc = (detectors.index(i_detect)*source_num + - (i_source-1) + (data_types.index(i_data)*(source_num*detect_num))) - - ###need to treat our filetypes differently### - if filetype == 'non-parsed': - - ###non-parsed saves timepoints in groups### - ###this should account for that### - time_points = np.arange(i_source-1,int(meta_data['record'][-1])*source_num,source_num) - - ###determine which channel to look for in boxy_array### - channel = np.where(col_names == i_detect + '-' + i_data)[0][0] - - ###save our data based on data type### - data_[index_loc,:] = boxy_array[time_points,channel] - - elif filetype == 'parsed': + ###data that isn't in our boxy file will be an empty list### + for key in keys: + meta_data[key] = (boxy_array[:,np.where(col_names == key)[0][0]] if + key in col_names else []) + + ###make some empty variables to store our data### + if filetype[file_num] == 'non-parsed': + data_ = np.zeros(((((detect_num[file_num]*source_num[file_num])*3)), + int(len(boxy_data)/source_num[file_num]))) + elif filetype[file_num] == 'parsed': + data_ = np.zeros(((((detect_num[file_num]*source_num[file_num])*3)), + int(len(boxy_data)))) + + ###loop through data types### + for i_data in data_types: + + ###loop through detectors### + for i_detect in detectors[0:detect_num[file_num]]: + + ###loop through sources### + for i_source in sources: - ###determine which channel to look for in boxy_array### - channel = np.where(col_names == i_detect + '-' + - i_data + str(i_source))[0][0] + ###determine where to store our data### + index_loc = (detectors.index(i_detect)*source_num[file_num] + + (i_source-1) + (data_types.index(i_data)*(source_num[file_num]*detect_num[file_num]))) - ###save our data based on data type### - data_[index_loc,:] = boxy_array[:,channel] - - # Read triggers from event file - ###add our markers to the data array based on filetype### - if type(meta_data['digaux']) is not list: - if filetype == 'non-parsed': - markers = meta_data['digaux'][np.arange(0,len(meta_data['digaux']),source_num)] - elif filetype == 'parsed': - markers = meta_data['digaux'] - data_[-1,:] = markers + ###need to treat our filetypes differently### + if filetype[file_num] == 'non-parsed': + + ###non-parsed saves timepoints in groups### + ###this should account for that### + time_points = np.arange(i_source-1, + int(meta_data['record'][-1]) + *source_num[file_num], + source_num[file_num]) + + ###determine which channel to look for in boxy_array### + channel = np.where(col_names == i_detect + '-' + i_data)[0][0] + + ###save our data based on data type### + data_[index_loc,:] = boxy_array[time_points,channel] + + elif filetype[file_num] == 'parsed': + + ###determine which channel to look for in boxy_array### + channel = np.where(col_names == i_detect + '-' + + i_data + str(i_source))[0][0] + + ###save our data based on data type### + data_[index_loc,:] = boxy_array[:,channel] + + # Read triggers from event file + ###add our markers to the data array based on filetype### + if type(meta_data['digaux']) is not list: + if filetype[file_num] == 'non-parsed': + markers.append(meta_data['digaux'][np.arange(0,len(meta_data['digaux']),source_num[file_num])]) + elif filetype[file_num] == 'parsed': + markers.append(meta_data['digaux']) + else: + markers.append(np.zeros((len(data_[0,:]),))) + + all_data.extend(data_) + + ###add markers to our data### + all_data.extend(markers) + all_data = np.asarray(all_data) print('Blank Data shape: ', data.shape) - print('Input Data shape: ', data_.shape) + print('Input Data shape: ', all_data.shape) # place our data into the data object in place - data[:] = data_ + data[:] = all_data return data From a9b24a4d75cc56966a5de6e1151355c08680947f Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Tue, 12 May 2020 11:27:47 -0600 Subject: [PATCH 079/167] boxy.py now takes a datatype arguments to return only AC, DC, or Ph data --- mne/io/boxy/boxy.py | 51 ++++++++++--------- .../preprocessing/plot_80_boxy_processing.py | 2 +- 2 files changed, 28 insertions(+), 25 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 5a1112b3219..886c099908f 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -18,7 +18,7 @@ @fill_doc -def read_raw_boxy(fname, preload=False, verbose=None): +def read_raw_boxy(fname, datatype='all', preload=False, verbose=None): """Reader for a BOXY optical imaging recording. Parameters ---------- @@ -34,7 +34,7 @@ def read_raw_boxy(fname, preload=False, verbose=None): -------- mne.io.Raw : Documentation of attribute and methods. """ - return RawBOXY(fname, preload, verbose) + return RawBOXY(fname, datatype, preload, verbose) @fill_doc @@ -52,11 +52,11 @@ class RawBOXY(BaseRaw): """ @verbose - def __init__(self, fname, preload=False, verbose=None): + def __init__(self, fname, datatype='all', preload=False, verbose=None): from ...externals.pymatreader import read_mat from ...coreg import get_mni_fiducials, coregister_fiducials # avoid circular import prob logger.info('Loading %s' % fname) - # import pdb + # Check if required files exist and store names for later use files = dict() keys = ('mtg', 'elp', 'tol', '*.[000-999]*') @@ -71,6 +71,12 @@ def __init__(self, fname, preload=False, verbose=None): (key, len(files[key]),)) files[key] = files[key][0] + ###determine which data type to return### + if datatype in ['AC','DC','Ph']: + data_types = [datatype] + else: + raise RuntimeError('Expect AC, DC, or Ph, got %s' %datatype) + ###determine how many blocks we have per montage### blk_names = [] mtg_names = [] @@ -217,7 +223,6 @@ def __init__(self, fname, preload=False, verbose=None): boxy_labels = [] mrk_coords = [] mrk_labels = [] - data_types = ['AC','DC','Ph'] mtg_start = [] mtg_end = [] mtg_src_num = [] @@ -241,11 +246,12 @@ def __init__(self, fname, preload=False, verbose=None): np.vstack((source_coords[i_coord], detect_coords[i_coord])), axis=0).tolist() + source_coords[i_coord] + detect_coords[i_coord] + [chan_wavelength[i_coord]] + [0] + [0]) - boxy_labels.append('S' + - str(unique_source_labels[mtg_num].index(source_label[i_coord])+1) - + '_D' + - str(unique_detect_labels[mtg_num].index(detect_label[i_coord])+1) - + ' ' + chan_wavelength[i_coord] + i_type[0] + mtg_names[mtg_num] + i_blk[1:]) + boxy_labels.append('S' + str(unique_source_labels[mtg_num].index( + source_label[i_coord])+1) + '_D' + + str(unique_detect_labels[mtg_num].index( + detect_label[i_coord])+1) + ' ' + + chan_wavelength[i_coord] + ' ' + + mtg_names[mtg_num] + i_blk[1:]) # add extra column for triggers mrk_labels.append('Markers' + ' ' + mtg_names[mtg_num] + i_blk[1:]) @@ -299,27 +305,25 @@ def __init__(self, fname, preload=False, verbose=None): 'start_line': start_line, 'end_line': end_line, 'filetype': filetype, - 'files': files,} + 'files': files, + 'data_types': data_types,} print('Start Line: ', start_line[0]) print('End Line: ', end_line[0]) - # print('Original Difference: ', end_line-start_line) + print('Original Difference: ', end_line[0]-start_line[0]) first_samps = start_line[0] print('New first_samps: ', first_samps) diff = end_line[0] - start_line[0] - # diff = [end_line[i_line] - start_line[i_line] for i_line in range(len(end_line))] + #input file has rows for each source, output variable rearranges as columns and does not if filetype[0] == 'non-parsed': last_samps = ((diff-2) // (source_num[0])) + start_line[0] - 1 elif filetype =='parsed': last_samps = (start_line[0] + diff) - # last_samps = [((start_line[i_line] + diff[i_line]) // (source_num[i_line] -1)) - # for i_line in range(len(start_line))] + print('New last_samps: ', last_samps) print('New Difference: ', last_samps-first_samps) - # print('New Difference: ', [str(last_samps[i_line]-first_samps[i_line]) - # for i_line in range(len(last_samps))]) - # pdb.set_trace() + super(RawBOXY, self).__init__( info, preload, filenames=[fname], first_samps=[first_samps], last_samps=[last_samps], @@ -328,20 +332,19 @@ def __init__(self, fname, preload=False, verbose=None): def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a segment of data from a file. """ - # import pdb - # pdb.set_trace() + source_num = self._raw_extras[fi]['source_num'] detect_num = self._raw_extras[fi]['detect_num'] start_line = self._raw_extras[fi]['start_line'] end_line = self._raw_extras[fi]['end_line'] filetype = self._raw_extras[fi]['filetype'] + data_types = self._raw_extras[fi]['data_types'] boxy_files = self._raw_extras[fi]['files']['*.[000-999]*'] ###detectors, sources, and data types### detectors = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'] - data_types = ['AC','DC','Ph'] ###load our data### all_data = [] @@ -389,10 +392,10 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): ###make some empty variables to store our data### if filetype[file_num] == 'non-parsed': - data_ = np.zeros(((((detect_num[file_num]*source_num[file_num])*3)), + data_ = np.zeros(((((detect_num[file_num]*source_num[file_num])*len(data_types))), int(len(boxy_data)/source_num[file_num]))) elif filetype[file_num] == 'parsed': - data_ = np.zeros(((((detect_num[file_num]*source_num[file_num])*3)), + data_ = np.zeros(((((detect_num[file_num]*source_num[file_num])*len(data_types))), int(len(boxy_data)))) ###loop through data types### @@ -406,7 +409,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): ###determine where to store our data### index_loc = (detectors.index(i_detect)*source_num[file_num] + - (i_source-1) + (data_types.index(i_data)*(source_num[file_num]*detect_num[file_num]))) + (i_source-1) + (data_types.index(i_data)*(source_num[file_num]*detect_num[file_num]))) ###need to treat our filetypes differently### if filetype[file_num] == 'non-parsed': diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 35a346538fc..448454f74bd 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -26,7 +26,7 @@ boxy_data_folder = mne.datasets.boxy_example.data_path() boxy_raw_dir = os.path.join(boxy_data_folder, 'Participant-1') -raw_intensity = mne.io.read_raw_boxy(boxy_raw_dir, verbose=True).load_data() +raw_intensity = mne.io.read_raw_boxy(boxy_raw_dir, 'DC', verbose=True).load_data() # ############################################################################### # # View location of sensors over brain surface From 532ce23fcb91a1e3f4e024d724fecc719268018f Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Tue, 12 May 2020 11:43:17 -0600 Subject: [PATCH 080/167] changed tutorial to plot montage a and b --- mne/io/boxy/boxy.py | 4 +- .../preprocessing/plot_80_boxy_processing.py | 38 ++++++++++++++++++- 2 files changed, 39 insertions(+), 3 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 886c099908f..00eef6b7f48 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -18,7 +18,7 @@ @fill_doc -def read_raw_boxy(fname, datatype='all', preload=False, verbose=None): +def read_raw_boxy(fname, datatype=None, preload=False, verbose=None): """Reader for a BOXY optical imaging recording. Parameters ---------- @@ -52,7 +52,7 @@ class RawBOXY(BaseRaw): """ @verbose - def __init__(self, fname, datatype='all', preload=False, verbose=None): + def __init__(self, fname, datatype=None, preload=False, verbose=None): from ...externals.pymatreader import read_mat from ...coreg import get_mni_fiducials, coregister_fiducials # avoid circular import prob logger.info('Loading %s' % fname) diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 448454f74bd..96fb5857988 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -20,6 +20,7 @@ import numpy as np import matplotlib.pyplot as plt from itertools import compress +import copy import mne @@ -28,6 +29,15 @@ boxy_raw_dir = os.path.join(boxy_data_folder, 'Participant-1') raw_intensity = mne.io.read_raw_boxy(boxy_raw_dir, 'DC', verbose=True).load_data() +###separate data based on montages### +mtg_a_indices = [i_index for i_index,i_label in enumerate(raw_intensity.info['ch_names']) if 'a01' in i_label] +mtg_b_indices = [i_index for i_index,i_label in enumerate(raw_intensity.info['ch_names']) if 'b01' in i_label] +mtg_a_data = copy.deepcopy(raw_intensity) +mtg_b_data = copy.deepcopy(raw_intensity) + +mtg_a_data.pick(mtg_a_indices) +mtg_b_data.pick(mtg_b_indices) + # ############################################################################### # # View location of sensors over brain surface # # ------------------------------------------- @@ -45,7 +55,33 @@ subject='fsaverage', trans='fsaverage', surfaces=['head-dense', 'brain'], - fnirs=['channels', 'pairs'], + fnirs=['sources','detectors', 'pairs'], + mri_fiducials=True, + dig=True, + subjects_dir=subjects_dir, + fig=fig) +mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) + +fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') +fig = mne.viz.plot_alignment(mtg_a_data.info, + show_axes=True, + subject='fsaverage', + trans='fsaverage', + surfaces=['head-dense', 'brain'], + fnirs=['sources','detectors', 'pairs'], + mri_fiducials=True, + dig=True, + subjects_dir=subjects_dir, + fig=fig) +mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) + +fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') +fig = mne.viz.plot_alignment(mtg_b_data.info, + show_axes=True, + subject='fsaverage', + trans='fsaverage', + surfaces=['head-dense', 'brain'], + fnirs=['sources','detectors', 'pairs'], mri_fiducials=True, dig=True, subjects_dir=subjects_dir, From c5585061c1581183fd801d870edcc165e613f438 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Tue, 12 May 2020 11:59:01 -0700 Subject: [PATCH 081/167] updated dataset P1 with arr --- mne/datasets/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py index ef7e0915e1b..9ac62249942 100644 --- a/mne/datasets/utils.py +++ b/mne/datasets/utils.py @@ -264,7 +264,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, 'tar.gz/%s' % releases['testing'], multimodal='https://ndownloader.figshare.com/files/5999598', fnirs_motor='https://osf.io/dj3eh/download?version=1', - boxy_example='https://osf.io/hksme/download?version=3', + boxy_example='https://osf.io/hksme/download?version=4', opm='https://osf.io/p6ae7/download?version=2', visual_92_categories=[ 'https://osf.io/8ejrs/download?version=1', @@ -328,7 +328,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, testing='1ef691944239411b869b3ed2f40a69fe', multimodal='26ec847ae9ab80f58f204d09e2c08367', fnirs_motor='c4935d19ddab35422a69f3326a01fef8', - boxy_example='0d2a6525a3fc00d010dcb9488bbefa17', + boxy_example='9e3c09cf0a581f0ac102d1b11b4f8303', opm='370ad1dcfd5c47e029e692c85358a374', visual_92_categories=['74f50bbeb65740903eadc229c9fa759f', '203410a98afc9df9ae8ba9f933370e20'], From a59fc00a9db97052d5c7e8104a7a3669cd4d58c8 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Tue, 12 May 2020 12:18:27 -0700 Subject: [PATCH 082/167] added new dataset with better variable names, removed look for .tol file --- mne/datasets/utils.py | 4 ++-- mne/io/boxy/boxy.py | 2 +- tutorials/preprocessing/plot_80_boxy_processing.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py index 9ac62249942..44975afb4fd 100644 --- a/mne/datasets/utils.py +++ b/mne/datasets/utils.py @@ -264,7 +264,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, 'tar.gz/%s' % releases['testing'], multimodal='https://ndownloader.figshare.com/files/5999598', fnirs_motor='https://osf.io/dj3eh/download?version=1', - boxy_example='https://osf.io/hksme/download?version=4', + boxy_example='https://osf.io/hksme/download?version=5', opm='https://osf.io/p6ae7/download?version=2', visual_92_categories=[ 'https://osf.io/8ejrs/download?version=1', @@ -328,7 +328,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, testing='1ef691944239411b869b3ed2f40a69fe', multimodal='26ec847ae9ab80f58f204d09e2c08367', fnirs_motor='c4935d19ddab35422a69f3326a01fef8', - boxy_example='9e3c09cf0a581f0ac102d1b11b4f8303', + boxy_example='cfd625fedc27e5ba3ce3e3f6a4ee0a3e', opm='370ad1dcfd5c47e029e692c85358a374', visual_92_categories=['74f50bbeb65740903eadc229c9fa759f', '203410a98afc9df9ae8ba9f933370e20'], diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 00eef6b7f48..b594b6baa8a 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -59,7 +59,7 @@ def __init__(self, fname, datatype=None, preload=False, verbose=None): # Check if required files exist and store names for later use files = dict() - keys = ('mtg', 'elp', 'tol', '*.[000-999]*') + keys = ('mtg', 'elp', '*.[000-999]*') print(fname) for key in keys: if key == '*.[000-999]*': diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 96fb5857988..2747b3aa54e 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -27,7 +27,7 @@ boxy_data_folder = mne.datasets.boxy_example.data_path() boxy_raw_dir = os.path.join(boxy_data_folder, 'Participant-1') -raw_intensity = mne.io.read_raw_boxy(boxy_raw_dir, 'DC', verbose=True).load_data() +raw_intensity = mne.io.read_raw_boxy(boxy_raw_dir, 'Ph', verbose=True).load_data() ###separate data based on montages### mtg_a_indices = [i_index for i_index,i_label in enumerate(raw_intensity.info['ch_names']) if 'a01' in i_label] From 33134d32309c8364f3378dd9bbb623700fb3e474 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Tue, 12 May 2020 16:54:07 -0600 Subject: [PATCH 083/167] made some changes based on comments. re-organised channels so lower wavelength is first, to keep scalp_coupling_index happy --- mne/io/boxy/boxy.py | 20 +++++++++++++++---- .../preprocessing/plot_80_boxy_processing.py | 5 +++-- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index b594b6baa8a..face1c9acb5 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -18,12 +18,14 @@ @fill_doc -def read_raw_boxy(fname, datatype=None, preload=False, verbose=None): +def read_raw_boxy(fname, datatype='AC', preload=False, verbose=None): """Reader for a BOXY optical imaging recording. Parameters ---------- fname : str Path to the BOXY data folder. + datatype : str + Type of data to return (AC, DC, or Ph) %(preload)s %(verbose)s Returns @@ -44,6 +46,8 @@ class RawBOXY(BaseRaw): ---------- fname : str Path to the BOXY data folder. + datatype : str + Type of data to return (AC, DC, or Ph) %(preload)s %(verbose)s See Also @@ -52,7 +56,7 @@ class RawBOXY(BaseRaw): """ @verbose - def __init__(self, fname, datatype=None, preload=False, verbose=None): + def __init__(self, fname, datatype='AC', preload=False, verbose=None): from ...externals.pymatreader import read_mat from ...coreg import get_mni_fiducials, coregister_fiducials # avoid circular import prob logger.info('Loading %s' % fname) @@ -212,6 +216,11 @@ def __init__(self, fname, datatype=None, preload=False, verbose=None): unique_source_labels.append(mtg_source_labels) unique_detect_labels.append(mtg_detect_labels) + ###swap order to have lower wavelength first### + for i_chan in range(0,len(chan_wavelength),2): + chan_wavelength[i_chan], chan_wavelength[i_chan+1] = ( + chan_wavelength[i_chan+1],chan_wavelength[i_chan]) + ###now let's label each channel in our data### ###data is channels X timepoint where the first source_num rows correspond to### ###the first detector, and each row within that group is a different source### @@ -256,7 +265,7 @@ def __init__(self, fname, datatype=None, preload=False, verbose=None): # add extra column for triggers mrk_labels.append('Markers' + ' ' + mtg_names[mtg_num] + i_blk[1:]) mrk_coords.append(np.zeros((12,))) - + ###add triggers to the end of our data### boxy_labels.extend(mrk_labels) boxy_coords.extend(mrk_coords) @@ -332,7 +341,6 @@ def __init__(self, fname, datatype=None, preload=False, verbose=None): def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a segment of data from a file. """ - source_num = self._raw_extras[fi]['source_num'] detect_num = self._raw_extras[fi]['detect_num'] start_line = self._raw_extras[fi]['start_line'] @@ -436,6 +444,10 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): ###save our data based on data type### data_[index_loc,:] = boxy_array[:,channel] + ###swap channels to match new wavelength order### + for i_chan in range(0,len(data_),2): + data_[[i_chan,i_chan+1]] = data_[[i_chan+1,i_chan]] + # Read triggers from event file ###add our markers to the data array based on filetype### if type(meta_data['digaux']) is not list: diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 2747b3aa54e..c00d1e57004 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -32,8 +32,9 @@ ###separate data based on montages### mtg_a_indices = [i_index for i_index,i_label in enumerate(raw_intensity.info['ch_names']) if 'a01' in i_label] mtg_b_indices = [i_index for i_index,i_label in enumerate(raw_intensity.info['ch_names']) if 'b01' in i_label] -mtg_a_data = copy.deepcopy(raw_intensity) -mtg_b_data = copy.deepcopy(raw_intensity) + +mtg_a_data = raw_intensity.copy() +mtg_b_data = raw_intensity.copy() mtg_a_data.pick(mtg_a_indices) mtg_b_data.pick(mtg_b_indices) From 10ca706326c9bbf88c17893595b7563fe0deeade Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Tue, 12 May 2020 17:07:48 -0600 Subject: [PATCH 084/167] pushed the wrong tutorial file, this should fix that --- .../preprocessing/plot_80_boxy_processing.py | 58 +++++++++++++++---- 1 file changed, 48 insertions(+), 10 deletions(-) diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index c00d1e57004..4bf64039c73 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -30,14 +30,19 @@ raw_intensity = mne.io.read_raw_boxy(boxy_raw_dir, 'Ph', verbose=True).load_data() ###separate data based on montages### -mtg_a_indices = [i_index for i_index,i_label in enumerate(raw_intensity.info['ch_names']) if 'a01' in i_label] -mtg_b_indices = [i_index for i_index,i_label in enumerate(raw_intensity.info['ch_names']) if 'b01' in i_label] +no_mrk_indices = [i_index for i_index,i_label in enumerate(raw_intensity.info['ch_names']) + if 'Markers' not in i_label] +mtg_a_indices = [i_index for i_index,i_label in enumerate(raw_intensity.info['ch_names']) + if ' a' in i_label and 'Markers' not in i_label] +mtg_b_indices = [i_index for i_index,i_label in enumerate(raw_intensity.info['ch_names']) + if ' b' in i_label and 'Markers' not in i_label] mtg_a_data = raw_intensity.copy() mtg_b_data = raw_intensity.copy() mtg_a_data.pick(mtg_a_indices) mtg_b_data.pick(mtg_b_indices) +raw_intensity.pick(no_mrk_indices) # ############################################################################### # # View location of sensors over brain surface @@ -100,12 +105,27 @@ # # To achieve this we pick all the channels that are not considered to be short. picks = mne.pick_types(raw_intensity.info, meg=False, fnirs=True) +picks_a = mne.pick_types(mtg_a_data.info, meg=False, fnirs=True) +picks_b = mne.pick_types(mtg_b_data.info, meg=False, fnirs=True) + dists = mne.preprocessing.nirs.source_detector_distances( raw_intensity.info, picks=picks) -raw_intensity.pick(picks[dists < 0.06]) +dists_a = mne.preprocessing.nirs.source_detector_distances( + raw_intensity.info, picks=picks_a) +dists_b = mne.preprocessing.nirs.source_detector_distances( + raw_intensity.info, picks=picks_b) + +raw_intensity.pick(picks[dists < 0.08]) +mtg_a_data.pick(picks_a[dists_a < 0.08]) +mtg_b_data.pick(picks_b[dists_b < 0.08]) + scalings = dict(fnirs_raw=1e2) raw_intensity.plot(n_channels=10, duration=1000, scalings=scalings, show_scrollbars=True) +mtg_a_data.plot(n_channels=10, + duration=1000, scalings=scalings, show_scrollbars=True) +mtg_b_data.plot(n_channels=10, + duration=1000, scalings=scalings, show_scrollbars=True) # ############################################################################### @@ -114,9 +134,16 @@ # # # # The raw intensity values are then converted to optical density. -# raw_od = mne.preprocessing.nirs.optical_density(raw_intensity) -# raw_od.plot(n_channels=len(raw_od.ch_names), -# duration=500, show_scrollbars=False) +raw_od = mne.preprocessing.nirs.optical_density(raw_intensity) +raw_od_a = mne.preprocessing.nirs.optical_density(mtg_a_data) +raw_od_b = mne.preprocessing.nirs.optical_density(mtg_b_data) + +raw_od.plot(n_channels=len(raw_od.ch_names), + duration=500, show_scrollbars=False) +raw_od_a.plot(n_channels=len(raw_od_a.ch_names), + duration=500, show_scrollbars=False) +raw_od_b.plot(n_channels=len(raw_od_b.ch_names), + duration=500, show_scrollbars=False) # ############################################################################### @@ -132,10 +159,21 @@ # # channels, so we will not mark any channels as bad based on the scalp # # coupling index. -# sci = mne.preprocessing.nirs.scalp_coupling_index(raw_od) -# fig, ax = plt.subplots() -# ax.hist(sci) -# ax.set(xlabel='Scalp Coupling Index', ylabel='Count', xlim=[0, 1]) +sci = mne.preprocessing.nirs.scalp_coupling_index(raw_od) +sci_a = mne.preprocessing.nirs.scalp_coupling_index(raw_od_a) +sci_b = mne.preprocessing.nirs.scalp_coupling_index(raw_od_b) + +fig, ax = plt.subplots() +ax.hist(sci) +ax.set(xlabel='Scalp Coupling Index', ylabel='Count', xlim=[0, 1]) + +fig, ax = plt.subplots() +ax.hist(sci_a) +ax.set(xlabel='Scalp Coupling Index-A', ylabel='Count', xlim=[0, 1]) + +fig, ax = plt.subplots() +ax.hist(sci_b) +ax.set(xlabel='Scalp Coupling Index-B', ylabel='Count', xlim=[0, 1]) # ############################################################################### From 3a617fb3f782668c68aeffc7d87beaf9e3900487 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Wed, 13 May 2020 00:12:34 -0700 Subject: [PATCH 085/167] style changes for flake8 on boxy.py --- mne/io/boxy/boxy.py | 461 ++++++++++++++++++++++++-------------------- 1 file changed, 248 insertions(+), 213 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index face1c9acb5..8a339c47255 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -2,17 +2,13 @@ # # License: BSD (3-clause) -from configparser import ConfigParser, RawConfigParser import glob as glob import re as re -import os.path as op import numpy as np from ..base import BaseRaw -from ..constants import FIFF -from ..meas_info import create_info, _format_dig_points, read_fiducials -from ...annotations import Annotations -from ...transforms import apply_trans, _get_trans, get_ras_to_neuromag_trans +from ..meas_info import create_info +from ...transforms import apply_trans, get_ras_to_neuromag_trans from ...utils import logger, verbose, fill_doc from ...channels.montage import make_dig_montage @@ -54,11 +50,9 @@ class RawBOXY(BaseRaw): -------- mne.io.Raw : Documentation of attribute and methods. """ - + @verbose def __init__(self, fname, datatype='AC', preload=False, verbose=None): - from ...externals.pymatreader import read_mat - from ...coreg import get_mni_fiducials, coregister_fiducials # avoid circular import prob logger.info('Loading %s' % fname) # Check if required files exist and store names for later use @@ -74,30 +68,32 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): raise RuntimeError('Expect one %s file, got %d' % (key, len(files[key]),)) files[key] = files[key][0] - - ###determine which data type to return### - if datatype in ['AC','DC','Ph']: + + # determine which data type to return### + if datatype in ['AC', 'DC', 'Ph']: data_types = [datatype] else: - raise RuntimeError('Expect AC, DC, or Ph, got %s' %datatype) - - ###determine how many blocks we have per montage### + raise RuntimeError('Expect AC, DC, or Ph, got %s' % datatype) + + # determine how many blocks we have per montage blk_names = [] mtg_names = [] - mtgs = re.findall('\w\.\d+',str(files['*.[000-999]*'])) - [mtg_names.append(i_mtg[0]) for i_mtg in mtgs if i_mtg[0] not in mtg_names] + mtgs = re.findall('\w\.\d+', str(files['*.[000-999]*'])) + [mtg_names.append(i_mtg[0]) for i_mtg in mtgs + if i_mtg[0] not in mtg_names] for i_mtg in mtg_names: temp = [] [temp.append(ii_mtg[2:]) for ii_mtg in mtgs if ii_mtg[0] == i_mtg] - blk_names.append(temp) - + blk_names.append(temp) + # Read header file # Parse required header fields - ###this keeps track of the line we're on### - ###mostly to know the start and stop of data (probably an easier way)### - ###load and read data to get some meta information### - ###there is alot of information at the beginning of a file### - ###but this only grabs some of it### + # this keeps track of the line we're on + # mostly to know the start and stop of data (probably an easier way) + # load and read data to get some meta information + # there is alot of information at the beginning of a file + # but this only grabs some of it + detect_num = [] source_num = [] aux_num = [] @@ -106,9 +102,9 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): start_line = [] end_line = [] filetype = ['parsed' for i_file in files['*.[000-999]*']] - for file_num,i_file in enumerate(files['*.[000-999]*'],0): - with open(i_file,'r') as data: - for line_num,i_line in enumerate(data,1): + for file_num, i_file in enumerate(files['*.[000-999]*'], 0): + with open(i_file, 'r') as data: + for line_num, i_line in enumerate(data, 1): if '#DATA ENDS' in i_line: end_line.append(line_num - 1) break @@ -128,9 +124,9 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): start_line.append(line_num) elif 'exmux' in i_line: filetype[file_num] = 'non-parsed' - + # Extract source-detectors - ###set up some variables### + # set up some variables chan_num_1 = [] chan_num_2 = [] source_label = [] @@ -138,9 +134,9 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): chan_wavelength = [] chan_modulation = [] - ###load and read each line of the .mtg file### - with open(files['mtg'],'r') as data: - for line_num, i_line in enumerate(data,1): + # load and read each line of the .mtg file + with open(files['mtg'], 'r') as data: + for line_num, i_line in enumerate(data, 1): if line_num == 2: mtg_chan_num = [int(num) for num in i_line.split()] elif line_num > 2: @@ -151,83 +147,92 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): detect_label.append(detector) chan_wavelength.append(wavelength) chan_modulation.append(modulation) - - # Read information about probe/montage/optodes + + # Read information about probe/montage/optodes # A word on terminology used here: - # Sources produce light - # Detectors measure light - # Sources and detectors are both called optodes - # Each source - detector pair produces a channel - # Channels are defined as the midpoint between source and detector + # Sources produce light + # Detectors measure light + # Sources and detectors are both called optodes + # Each source - detector pair produces a channel + # Channels are defined as the midpoint between source and detector - ###check if we are given .elp file### + # check if we are given .elp file all_labels = [] all_coords = [] fiducial_coords = [] get_label = 0 get_coords = 0 - ###load and read .elp file### - with open(files['elp'],'r') as data: + + # load and read .elp file + with open(files['elp'], 'r') as data: for i_line in data: - ###first let's get our fiducial coordinates### + # first let's get our fiducial coordinates if '%F' in i_line: fiducial_coords.append(i_line.split()[1:]) - ###check where sensor info starts### + # check where sensor info starts if '//Sensor name' in i_line: get_label = 1 elif get_label == 1: - ###grab the part after '%N' for the label### + # grab the part after '%N' for the label label = i_line.split()[1] all_labels.append(label) get_label = 0 get_coords = 1 elif get_coords == 1: X, Y, Z = i_line.split() - all_coords.append([float(X),float(Y),float(Z)]) + all_coords.append([float(X), float(Y), float(Z)]) get_coords = 0 for i_index in range(3): - fiducial_coords[i_index] = np.asarray([float(x) for x in fiducial_coords[i_index]]) + fiducial_coords[i_index] = np.asarray([float(x) + for x in + fiducial_coords[i_index]]) - ###get coordinates for sources in .mtg file from .elp file### + # get coordinates for sources in .mtg file from .elp file source_coords = [] for i_chan in source_label: if i_chan in all_labels: chan_index = all_labels.index(i_chan) source_coords.append(all_coords[chan_index]) - - ###get coordinates for detectors in .mtg file from .elp file### + + # get coordinates for detectors in .mtg file from .elp file detect_coords = [] for i_chan in detect_label: if i_chan in all_labels: chan_index = all_labels.index(i_chan) detect_coords.append(all_coords[chan_index]) - + # Generate meaningful channel names for each montage - ###get our unique labels for sources and detectors for each montage### + # get our unique labels for sources and detectors for each montage unique_source_labels = [] unique_detect_labels = [] - for mtg_num, i_mtg in enumerate(mtg_chan_num,0): + for mtg_num, i_mtg in enumerate(mtg_chan_num, 0): mtg_source_labels = [] mtg_detect_labels = [] start = int(np.sum(mtg_chan_num[:mtg_num])) - end = int(np.sum(mtg_chan_num[:mtg_num+1])) - [mtg_source_labels.append(label) for label in source_label[start:end] if label not in mtg_source_labels] - [mtg_detect_labels.append(label) for label in detect_label[start:end] if label not in mtg_detect_labels] + end = int(np.sum(mtg_chan_num[:mtg_num + 1])) + [mtg_source_labels.append(label) + for label in source_label[start:end] + if label not in mtg_source_labels] + [mtg_detect_labels.append(label) + for label in detect_label[start:end] + if label not in mtg_detect_labels] unique_source_labels.append(mtg_source_labels) unique_detect_labels.append(mtg_detect_labels) - - ###swap order to have lower wavelength first### - for i_chan in range(0,len(chan_wavelength),2): - chan_wavelength[i_chan], chan_wavelength[i_chan+1] = ( - chan_wavelength[i_chan+1],chan_wavelength[i_chan]) - - ###now let's label each channel in our data### - ###data is channels X timepoint where the first source_num rows correspond to### - ###the first detector, and each row within that group is a different source### - ###should note that current .mtg files contain channels for multiple data files### - ###going to move to have a single .mtg file per participant, condition, and montage### - ###combine coordinates and label our channels### - ###will label them based on ac, dc, and ph data### + + # swap order to have lower wavelength first + for i_chan in range(0, len(chan_wavelength), 2): + chan_wavelength[i_chan], chan_wavelength[i_chan + 1] = ( + chan_wavelength[i_chan + 1], chan_wavelength[i_chan]) + + # now let's label each channel in our data + # data is channels X timepoint where the first source_num rows + # correspond to the first detector, and each row within that + # group is a different source should note that + # current .mtg files contain channels for multiple + # data files going to move to have a single .mtg file + # per participant, condition, and montage + # combine coordinates and label our channels + # will label them based on ac, dc, and ph data boxy_coords = [] boxy_labels = [] mrk_coords = [] @@ -237,58 +242,65 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): mtg_src_num = [] mtg_det_num = [] blk_num = [len(blk) for blk in blk_names] - for mtg_num, i_mtg in enumerate(mtg_chan_num,0): + for mtg_num, i_mtg in enumerate(mtg_chan_num, 0): start = int(np.sum(mtg_chan_num[:mtg_num])) - end = int(np.sum(mtg_chan_num[:mtg_num+1])) - ###we will also organise some data for each montage### + end = int(np.sum(mtg_chan_num[:mtg_num + 1])) + # we will also organise some data for each montage start_blk = int(np.sum(blk_num[:mtg_num])) - ###get stop and stop lines for each montage### + # get stop and stop lines for each montage mtg_start.append(start_line[start_blk]) mtg_end.append(end_line[start_blk]) - ###get source and detector numbers for each montage### + # get source and detector numbers for each montage mtg_src_num.append(source_num[start_blk]) mtg_det_num.append(detect_num[start_blk]) for i_blk in blk_names[mtg_num]: for i_type in data_types: - for i_coord in range(start,end): + for i_coord in range(start, end): boxy_coords.append(np.mean( - np.vstack((source_coords[i_coord], detect_coords[i_coord])), - axis=0).tolist() + source_coords[i_coord] + - detect_coords[i_coord] + [chan_wavelength[i_coord]] + [0] + [0]) - boxy_labels.append('S' + str(unique_source_labels[mtg_num].index( - source_label[i_coord])+1) + '_D' + - str(unique_detect_labels[mtg_num].index( - detect_label[i_coord])+1) + ' ' + - chan_wavelength[i_coord] + ' ' + - mtg_names[mtg_num] + i_blk[1:]) - + np.vstack((source_coords[i_coord], + detect_coords[i_coord])), + axis=0).tolist() + source_coords[i_coord] + + detect_coords[i_coord] + + [chan_wavelength[i_coord]] + + [0] + [0]) + boxy_labels.append('S' + str( + unique_source_labels[mtg_num].index( + source_label[i_coord]) + 1) + '_D' + + str(unique_detect_labels[mtg_num].index( + detect_label[i_coord]) + 1) + + ' ' + chan_wavelength[i_coord] + ' ' + + mtg_names[mtg_num] + i_blk[1:]) + # add extra column for triggers - mrk_labels.append('Markers' + ' ' + mtg_names[mtg_num] + i_blk[1:]) + mrk_labels.append('Markers' + ' ' + + mtg_names[mtg_num] + i_blk[1:]) mrk_coords.append(np.zeros((12,))) - - ###add triggers to the end of our data### + + # add triggers to the end of our data boxy_labels.extend(mrk_labels) boxy_coords.extend(mrk_coords) - + # convert to floats boxy_coords = np.array(boxy_coords, float) all_coords = np.array(all_coords, float) - ###make our montage### - ###montage only wants channel coords, so need to grab those, convert to### - ###array, then make a dict with labels### - all_chan_dict = dict(zip(all_labels,all_coords)) + # make our montage + # montage only wants channel coords, so need to grab those, convert to + # array, then make a dict with labels + all_chan_dict = dict(zip(all_labels, all_coords)) my_dig_montage = make_dig_montage(ch_pos=all_chan_dict, - coord_frame='unknown', - nasion = fiducial_coords[0], - lpa = fiducial_coords[1], - rpa = fiducial_coords[2]) - - ###create info structure### + coord_frame='unknown', + nasion=fiducial_coords[0], + lpa=fiducial_coords[1], + rpa=fiducial_coords[2]) + + # create info structure info = create_info(boxy_labels, srate[0], ch_types='fnirs_raw') - ###add dig info### - ## this also applies a transform to the data into neuromag space based on fiducials + # add dig info + + # this also applies a transform to the data into neuromag space + # based on fiducials info.set_montage(my_dig_montage) # Store channel, source, and detector locations @@ -296,48 +308,53 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): # The source location is stored in the second 3 entries of loc. # The detector location is stored in the third 3 entries of loc. # Also encode the light frequency in the structure. - - ###place our coordinates and wavelengths for each channel### - # # These are all in actual 3d individual coordinates, so let's transform them to - # # the Neuromag head coordinate frame - native_head_t = get_ras_to_neuromag_trans(fiducial_coords[0], - fiducial_coords[1], - fiducial_coords[2]) - + + # place our coordinates and wavelengths for each channel + # These are all in actual 3d individual coordinates, + # so let's transform them to the Neuromag head coordinate frame + native_head_t = get_ras_to_neuromag_trans(fiducial_coords[0], + fiducial_coords[1], + fiducial_coords[2]) + for i_chan in range(len(boxy_labels)): - temp_ch_src_det = apply_trans(native_head_t, boxy_coords[i_chan][:9].reshape(3, 3)).ravel() - temp_other = np.asarray(boxy_coords[i_chan][9:], dtype=np.float64) # add wavelength and placeholders - info['chs'][i_chan]['loc'] = np.concatenate((temp_ch_src_det, temp_other), axis=0) - + temp_ch_src_det = apply_trans(native_head_t, + boxy_coords[i_chan][:9].reshape(3, 3) + ).ravel() + # add wavelength and placeholders + temp_other = np.asarray(boxy_coords[i_chan][9:], dtype=np.float64) + info['chs'][i_chan]['loc'] = np.concatenate((temp_ch_src_det, + temp_other), axis=0) + raw_extras = {'source_num': source_num, - 'detect_num': detect_num, - 'start_line': start_line, - 'end_line': end_line, - 'filetype': filetype, - 'files': files, - 'data_types': data_types,} - + 'detect_num': detect_num, + 'start_line': start_line, + 'end_line': end_line, + 'filetype': filetype, + 'files': files, + 'data_types': data_types} + print('Start Line: ', start_line[0]) print('End Line: ', end_line[0]) - print('Original Difference: ', end_line[0]-start_line[0]) + print('Original Difference: ', end_line[0] - start_line[0]) first_samps = start_line[0] print('New first_samps: ', first_samps) diff = end_line[0] - start_line[0] - #input file has rows for each source, output variable rearranges as columns and does not + # input file has rows for each source, + # output variable rearranges as columns and does not if filetype[0] == 'non-parsed': - last_samps = ((diff-2) // (source_num[0])) + start_line[0] - 1 - elif filetype =='parsed': + last_samps = ((diff - 2) // (source_num[0])) + start_line[0] - 1 + elif filetype == 'parsed': last_samps = (start_line[0] + diff) print('New last_samps: ', last_samps) - print('New Difference: ', last_samps-first_samps) + print('New Difference: ', last_samps - first_samps) super(RawBOXY, self).__init__( - info, preload, filenames=[fname], first_samps=[first_samps], + info, preload, filenames=[fname], first_samps=[first_samps], last_samps=[last_samps], raw_extras=[raw_extras], verbose=verbose) - + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a segment of data from a file. """ @@ -348,125 +365,143 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): filetype = self._raw_extras[fi]['filetype'] data_types = self._raw_extras[fi]['data_types'] boxy_files = self._raw_extras[fi]['files']['*.[000-999]*'] - - ###detectors, sources, and data types### - detectors = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', - 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', - 'Y', 'Z'] - - ###load our data### + + # detectors, sources, and data types + detectors = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', + 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', + 'W', 'X', 'Y', 'Z'] + + # load our data all_data = [] markers = [] for file_num, boxy_file in enumerate(boxy_files): boxy_data = [] - with open(boxy_file,'r') as data_file: - for line_num, i_line in enumerate(data_file,1): + with open(boxy_file, 'r') as data_file: + for line_num, i_line in enumerate(data_file, 1): if line_num > start_line[file_num] and line_num <= end_line[file_num]: boxy_data.append(i_line.rsplit(' ')) - - sources = np.arange(1,source_num[file_num]+1,1) - - ###get column names from the first row of our boxy data### - col_names = np.asarray(re.findall('\w+\-\w+|\w+\-\d+|\w+',boxy_data[0][0])) + + sources = np.arange(1, source_num[file_num] + 1, 1) + + # get column names from the first row of our boxy data + col_names = np.asarray(re.findall('\w+\-\w+|\w+\-\d+|\w+', + boxy_data[0][0])) del boxy_data[0] - - ###sometimes there is an empty line before our data starts### - ###this should remove them### - while re.findall('[-+]?\d*\.?\d+',boxy_data[0][0]) == []: + + # sometimes there is an empty line before our data starts + # this should remove them + while re.findall('[-+]?\d*\.?\d+', boxy_data[0][0]) == []: del boxy_data[0] - - ###grab the individual data points for each column### - boxy_data = [re.findall('[-+]?\d*\.?\d+',i_row[0]) for i_row in boxy_data] - - ###make variable to store our data as an array rather than list of strings### + + # grab the individual data points for each column + boxy_data = [re.findall('[-+]?\d*\.?\d+', i_row[0]) + for i_row in boxy_data] + + # make variable to store our data as an array + # rather than list of strings boxy_length = len(col_names) - boxy_array = np.full((len(boxy_data),boxy_length),np.nan) + boxy_array = np.full((len(boxy_data), boxy_length), np.nan) for ii, i_data in enumerate(boxy_data): - ###need to make sure our rows are the same length### - ###this is done by padding the shorter ones### + # need to make sure our rows are the same length + # this is done by padding the shorter ones padding = boxy_length - len(i_data) - boxy_array[ii] = np.pad(np.asarray(i_data, dtype=float), (0,padding), mode='empty') - - ###grab data from the other columns that don't pertain to AC, DC, or Ph### + boxy_array[ii] = np.pad(np.asarray(i_data, dtype=float), + (0, padding), mode='empty') + + # grab data from the other columns + # that don't pertain to AC, DC, or Ph meta_data = dict() - keys = ['time','record','group','exmux','step','mark','flag','aux1','digaux'] + keys = ['time', 'record', 'group', 'exmux', 'step', 'mark', + 'flag', 'aux1', 'digaux'] for i_detect in detectors[0:detect_num[file_num]]: keys.append('bias-' + i_detect) - - ###data that isn't in our boxy file will be an empty list### + + # data that isn't in our boxy file will be an empty list for key in keys: - meta_data[key] = (boxy_array[:,np.where(col_names == key)[0][0]] if - key in col_names else []) - - ###make some empty variables to store our data### + meta_data[key] = (boxy_array[:, + np.where(col_names == key)[0][0]] if + key in col_names else []) + + # make some empty variables to store our data if filetype[file_num] == 'non-parsed': - data_ = np.zeros(((((detect_num[file_num]*source_num[file_num])*len(data_types))), - int(len(boxy_data)/source_num[file_num]))) + data_ = np.zeros(((((detect_num[file_num] * + source_num[file_num]) * len(data_types))), + int(len(boxy_data) / source_num[file_num]))) elif filetype[file_num] == 'parsed': - data_ = np.zeros(((((detect_num[file_num]*source_num[file_num])*len(data_types))), - int(len(boxy_data)))) - - ###loop through data types### + data_ = np.zeros(((((detect_num[file_num] * + source_num[file_num]) * len(data_types))), + int(len(boxy_data)))) + + # loop through data types for i_data in data_types: - - ###loop through detectors### + + # loop through detectors for i_detect in detectors[0:detect_num[file_num]]: - - ###loop through sources### - for i_source in sources: - - ###determine where to store our data### - index_loc = (detectors.index(i_detect)*source_num[file_num] + - (i_source-1) + (data_types.index(i_data)*(source_num[file_num]*detect_num[file_num]))) - - ###need to treat our filetypes differently### + + # loop through sources + for i_source in sources: + + # determine where to store our data + index_loc = (detectors.index(i_detect) * + source_num[file_num] + + (i_source - 1) + + (data_types.index(i_data) * + (source_num[file_num] * + detect_num[file_num]))) + + # need to treat our filetypes differently if filetype[file_num] == 'non-parsed': - - ###non-parsed saves timepoints in groups### - ###this should account for that### - time_points = np.arange(i_source-1, - int(meta_data['record'][-1]) - *source_num[file_num], + + # non-parsed saves timepoints in groups + # this should account for that + time_points = np.arange(i_source - 1, + int( + meta_data['record'][-1] + ) * source_num[file_num], source_num[file_num]) - - ###determine which channel to look for in boxy_array### - channel = np.where(col_names == i_detect + '-' + i_data)[0][0] - - ###save our data based on data type### - data_[index_loc,:] = boxy_array[time_points,channel] - - elif filetype[file_num] == 'parsed': - - ###determine which channel to look for in boxy_array### - channel = np.where(col_names == i_detect + '-' + + + # determine which channel to look for in boxy_array + channel = np.where(col_names == i_detect + + '-' + i_data)[0][0] + + # save our data based on data type + data_[index_loc, :] = boxy_array[time_points, + channel] + + elif filetype[file_num] == 'parsed': + + # determine which channel to look for in boxy_array + channel = np.where(col_names == i_detect + '-' + i_data + str(i_source))[0][0] - - ###save our data based on data type### - data_[index_loc,:] = boxy_array[:,channel] - - ###swap channels to match new wavelength order### - for i_chan in range(0,len(data_),2): - data_[[i_chan,i_chan+1]] = data_[[i_chan+1,i_chan]] - + + # save our data based on data type + data_[index_loc, :] = boxy_array[:, channel] + + # swap channels to match new wavelength order + for i_chan in range(0, len(data_), 2): + data_[[i_chan, i_chan + 1]] = data_[[i_chan + 1, i_chan]] + # Read triggers from event file - ###add our markers to the data array based on filetype### + # add our markers to the data array based on filetype### if type(meta_data['digaux']) is not list: if filetype[file_num] == 'non-parsed': - markers.append(meta_data['digaux'][np.arange(0,len(meta_data['digaux']),source_num[file_num])]) + markers.append(meta_data['digaux'][np.arange(0, + len(meta_data['digaux']), + source_num[file_num])]) elif filetype[file_num] == 'parsed': markers.append(meta_data['digaux']) else: - markers.append(np.zeros((len(data_[0,:]),))) - + markers.append(np.zeros((len(data_[0, :]),))) + all_data.extend(data_) - - ###add markers to our data### + + # add markers to our data all_data.extend(markers) all_data = np.asarray(all_data) - + print('Blank Data shape: ', data.shape) print('Input Data shape: ', all_data.shape) # place our data into the data object in place data[:] = all_data - + return data From dc62a0314c94679f4921e4a71564ca3b9f3166bd Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Wed, 13 May 2020 12:16:41 -0600 Subject: [PATCH 086/167] tested parsed data set, channel numbers considered across all montages, added messages indicating data size for all blocks and montages --- mne/io/boxy/boxy.py | 51 ++++++++++++++++++++++++++++++--------------- 1 file changed, 34 insertions(+), 17 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 8a339c47255..4ebdc3c840c 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -85,7 +85,7 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): temp = [] [temp.append(ii_mtg[2:]) for ii_mtg in mtgs if ii_mtg[0] == i_mtg] blk_names.append(temp) - + # Read header file # Parse required header fields # this keeps track of the line we're on @@ -193,6 +193,8 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): if i_chan in all_labels: chan_index = all_labels.index(i_chan) source_coords.append(all_coords[chan_index]) + else: + print(i_chan) # get coordinates for detectors in .mtg file from .elp file detect_coords = [] @@ -202,22 +204,18 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): detect_coords.append(all_coords[chan_index]) # Generate meaningful channel names for each montage - # get our unique labels for sources and detectors for each montage + # get our unique labels for sources and detectors for each montage unique_source_labels = [] unique_detect_labels = [] for mtg_num, i_mtg in enumerate(mtg_chan_num, 0): - mtg_source_labels = [] - mtg_detect_labels = [] start = int(np.sum(mtg_chan_num[:mtg_num])) end = int(np.sum(mtg_chan_num[:mtg_num + 1])) - [mtg_source_labels.append(label) + [unique_source_labels.append(label) for label in source_label[start:end] - if label not in mtg_source_labels] - [mtg_detect_labels.append(label) + if label not in unique_source_labels] + [unique_detect_labels.append(label) for label in detect_label[start:end] - if label not in mtg_detect_labels] - unique_source_labels.append(mtg_source_labels) - unique_detect_labels.append(mtg_detect_labels) + if label not in unique_detect_labels] # swap order to have lower wavelength first for i_chan in range(0, len(chan_wavelength), 2): @@ -264,9 +262,9 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): [chan_wavelength[i_coord]] + [0] + [0]) boxy_labels.append('S' + str( - unique_source_labels[mtg_num].index( + unique_source_labels.index( source_label[i_coord]) + 1) + '_D' + - str(unique_detect_labels[mtg_num].index( + str(unique_detect_labels.index( detect_label[i_coord]) + 1) + ' ' + chan_wavelength[i_coord] + ' ' + mtg_names[mtg_num] + i_blk[1:]) @@ -275,7 +273,7 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): mrk_labels.append('Markers' + ' ' + mtg_names[mtg_num] + i_blk[1:]) mrk_coords.append(np.zeros((12,))) - + # add triggers to the end of our data boxy_labels.extend(mrk_labels) boxy_coords.extend(mrk_coords) @@ -332,7 +330,26 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): 'filetype': filetype, 'files': files, 'data_types': data_types} - + + ###check to make sure data is the same length for each file + ###boxy can be set to only record so many sample points per recording + ###so start and stop lines may differ between files for a given + ###participant/experiment, but amount of data should be the same + ###check start lines + (print('Start lines the same!') if len(set(start_line)) == 1 else + print('Start lines different!')) + + ###check end lines + (print('End lines the same!') if len(set(end_line)) == 1 else + print('End lines different!')) + + ###now make sure data lengths are the same + data_length = ([end_line[i_line] - start_line[i_line] for i_line, + line_num in enumerate(start_line)]) + + (print('Data sizes are the same!') if len(set(data_length)) == 1 else + print('Data sizes are different!')) + print('Start Line: ', start_line[0]) print('End Line: ', end_line[0]) print('Original Difference: ', end_line[0] - start_line[0]) @@ -344,9 +361,9 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): # output variable rearranges as columns and does not if filetype[0] == 'non-parsed': last_samps = ((diff - 2) // (source_num[0])) + start_line[0] - 1 - elif filetype == 'parsed': - last_samps = (start_line[0] + diff) - + elif filetype[0] == 'parsed': + last_samps = (start_line[0] + diff - 3) + print('New last_samps: ', last_samps) print('New Difference: ', last_samps - first_samps) From 7fe90ff4a85b998e35e00ba7e4ade8c339bddfe2 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Wed, 13 May 2020 16:28:18 -0600 Subject: [PATCH 087/167] blocks are now appended together, so data is now n channels by (m timepoints x b blocks). removed block and montage indicators from channel labels, and change plot_80 tutorial to take the label changes into account --- mne/io/boxy/boxy.py | 291 +++++++++--------- .../preprocessing/plot_80_boxy_processing.py | 6 +- 2 files changed, 154 insertions(+), 143 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 4ebdc3c840c..1e94447df2f 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -251,27 +251,25 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): # get source and detector numbers for each montage mtg_src_num.append(source_num[start_blk]) mtg_det_num.append(detect_num[start_blk]) - for i_blk in blk_names[mtg_num]: - for i_type in data_types: - for i_coord in range(start, end): - boxy_coords.append(np.mean( - np.vstack((source_coords[i_coord], - detect_coords[i_coord])), - axis=0).tolist() + source_coords[i_coord] + - detect_coords[i_coord] + - [chan_wavelength[i_coord]] + - [0] + [0]) - boxy_labels.append('S' + str( - unique_source_labels.index( - source_label[i_coord]) + 1) + '_D' + - str(unique_detect_labels.index( - detect_label[i_coord]) + 1) + - ' ' + chan_wavelength[i_coord] + ' ' + - mtg_names[mtg_num] + i_blk[1:]) + for i_type in data_types: + for i_coord in range(start, end): + boxy_coords.append(np.mean( + np.vstack((source_coords[i_coord], + detect_coords[i_coord])), + axis=0).tolist() + source_coords[i_coord] + + detect_coords[i_coord] + + [chan_wavelength[i_coord]] + + [0] + [0]) + boxy_labels.append('S' + str( + unique_source_labels.index( + source_label[i_coord]) + 1) + '_D' + + str(unique_detect_labels.index( + detect_label[i_coord]) + 1) + + ' ' + chan_wavelength[i_coord]) # add extra column for triggers mrk_labels.append('Markers' + ' ' + - mtg_names[mtg_num] + i_blk[1:]) + mtg_names[mtg_num]) mrk_coords.append(np.zeros((12,))) # add triggers to the end of our data @@ -329,7 +327,9 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): 'end_line': end_line, 'filetype': filetype, 'files': files, - 'data_types': data_types} + 'montages': mtg_names, + 'blocks': blk_names, + 'data_types': data_types,} ###check to make sure data is the same length for each file ###boxy can be set to only record so many sample points per recording @@ -360,13 +360,14 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): # input file has rows for each source, # output variable rearranges as columns and does not if filetype[0] == 'non-parsed': - last_samps = ((diff - 2) // (source_num[0])) + start_line[0] - 1 + last_samps = ((((diff - 2)*len(blk_names)) // (source_num[0])) + + start_line[0] - 1) elif filetype[0] == 'parsed': - last_samps = (start_line[0] + diff - 3) + last_samps = (start_line[0] + ((diff - 3)*len(blk_names))) print('New last_samps: ', last_samps) print('New Difference: ', last_samps - first_samps) - + super(RawBOXY, self).__init__( info, preload, filenames=[fname], first_samps=[first_samps], last_samps=[last_samps], @@ -381,6 +382,8 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): end_line = self._raw_extras[fi]['end_line'] filetype = self._raw_extras[fi]['filetype'] data_types = self._raw_extras[fi]['data_types'] + montages = self._raw_extras[fi]['montages'] + blocks = self._raw_extras[fi]['blocks'] boxy_files = self._raw_extras[fi]['files']['*.[000-999]*'] # detectors, sources, and data types @@ -390,130 +393,138 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): # load our data all_data = [] - markers = [] - for file_num, boxy_file in enumerate(boxy_files): - boxy_data = [] - with open(boxy_file, 'r') as data_file: - for line_num, i_line in enumerate(data_file, 1): - if line_num > start_line[file_num] and line_num <= end_line[file_num]: - boxy_data.append(i_line.rsplit(' ')) - - sources = np.arange(1, source_num[file_num] + 1, 1) - - # get column names from the first row of our boxy data - col_names = np.asarray(re.findall('\w+\-\w+|\w+\-\d+|\w+', - boxy_data[0][0])) - del boxy_data[0] - - # sometimes there is an empty line before our data starts - # this should remove them - while re.findall('[-+]?\d*\.?\d+', boxy_data[0][0]) == []: + all_markers = [] + for i_mtg, mtg_name in enumerate(montages): + all_blocks = [] + block_markers = [] + for i_blk, blk_name in enumerate(blocks[i_mtg]): + file_num = i_blk + (i_mtg*len(blocks[i_mtg])) + boxy_file = boxy_files[file_num] + boxy_data = [] + with open(boxy_file, 'r') as data_file: + for line_num, i_line in enumerate(data_file, 1): + if line_num > start_line[file_num] and line_num <= end_line[file_num]: + boxy_data.append(i_line.rsplit(' ')) + + sources = np.arange(1, source_num[file_num] + 1, 1) + + # get column names from the first row of our boxy data + col_names = np.asarray(re.findall('\w+\-\w+|\w+\-\d+|\w+', + boxy_data[0][0])) del boxy_data[0] - - # grab the individual data points for each column - boxy_data = [re.findall('[-+]?\d*\.?\d+', i_row[0]) - for i_row in boxy_data] - - # make variable to store our data as an array - # rather than list of strings - boxy_length = len(col_names) - boxy_array = np.full((len(boxy_data), boxy_length), np.nan) - for ii, i_data in enumerate(boxy_data): - # need to make sure our rows are the same length - # this is done by padding the shorter ones - padding = boxy_length - len(i_data) - boxy_array[ii] = np.pad(np.asarray(i_data, dtype=float), - (0, padding), mode='empty') - - # grab data from the other columns - # that don't pertain to AC, DC, or Ph - meta_data = dict() - keys = ['time', 'record', 'group', 'exmux', 'step', 'mark', - 'flag', 'aux1', 'digaux'] - for i_detect in detectors[0:detect_num[file_num]]: - keys.append('bias-' + i_detect) - - # data that isn't in our boxy file will be an empty list - for key in keys: - meta_data[key] = (boxy_array[:, - np.where(col_names == key)[0][0]] if - key in col_names else []) - - # make some empty variables to store our data - if filetype[file_num] == 'non-parsed': - data_ = np.zeros(((((detect_num[file_num] * - source_num[file_num]) * len(data_types))), - int(len(boxy_data) / source_num[file_num]))) - elif filetype[file_num] == 'parsed': - data_ = np.zeros(((((detect_num[file_num] * - source_num[file_num]) * len(data_types))), - int(len(boxy_data)))) - - # loop through data types - for i_data in data_types: - - # loop through detectors + + # sometimes there is an empty line before our data starts + # this should remove them + while re.findall('[-+]?\d*\.?\d+', boxy_data[0][0]) == []: + del boxy_data[0] + + # grab the individual data points for each column + boxy_data = [re.findall('[-+]?\d*\.?\d+', i_row[0]) + for i_row in boxy_data] + + # make variable to store our data as an array + # rather than list of strings + boxy_length = len(col_names) + boxy_array = np.full((len(boxy_data), boxy_length), np.nan) + for ii, i_data in enumerate(boxy_data): + # need to make sure our rows are the same length + # this is done by padding the shorter ones + padding = boxy_length - len(i_data) + boxy_array[ii] = np.pad(np.asarray(i_data, dtype=float), + (0, padding), mode='empty') + + # grab data from the other columns + # that don't pertain to AC, DC, or Ph + meta_data = dict() + keys = ['time', 'record', 'group', 'exmux', 'step', 'mark', + 'flag', 'aux1', 'digaux'] for i_detect in detectors[0:detect_num[file_num]]: - - # loop through sources - for i_source in sources: - - # determine where to store our data - index_loc = (detectors.index(i_detect) * - source_num[file_num] + - (i_source - 1) + - (data_types.index(i_data) * - (source_num[file_num] * - detect_num[file_num]))) - - # need to treat our filetypes differently - if filetype[file_num] == 'non-parsed': - - # non-parsed saves timepoints in groups - # this should account for that - time_points = np.arange(i_source - 1, - int( - meta_data['record'][-1] - ) * source_num[file_num], - source_num[file_num]) - - # determine which channel to look for in boxy_array - channel = np.where(col_names == i_detect + - '-' + i_data)[0][0] - - # save our data based on data type - data_[index_loc, :] = boxy_array[time_points, - channel] - - elif filetype[file_num] == 'parsed': - - # determine which channel to look for in boxy_array - channel = np.where(col_names == i_detect + '-' + - i_data + str(i_source))[0][0] - - # save our data based on data type - data_[index_loc, :] = boxy_array[:, channel] - - # swap channels to match new wavelength order - for i_chan in range(0, len(data_), 2): - data_[[i_chan, i_chan + 1]] = data_[[i_chan + 1, i_chan]] - - # Read triggers from event file - # add our markers to the data array based on filetype### - if type(meta_data['digaux']) is not list: + keys.append('bias-' + i_detect) + + # data that isn't in our boxy file will be an empty list + for key in keys: + meta_data[key] = (boxy_array[:, + np.where(col_names == key)[0][0]] if + key in col_names else []) + + # make some empty variables to store our data if filetype[file_num] == 'non-parsed': - markers.append(meta_data['digaux'][np.arange(0, - len(meta_data['digaux']), - source_num[file_num])]) + data_ = np.zeros(((((detect_num[file_num] * + source_num[file_num]) * len(data_types))), + int(len(boxy_data) / source_num[file_num]))) elif filetype[file_num] == 'parsed': - markers.append(meta_data['digaux']) - else: - markers.append(np.zeros((len(data_[0, :]),))) - - all_data.extend(data_) + data_ = np.zeros(((((detect_num[file_num] * + source_num[file_num]) * len(data_types))), + int(len(boxy_data)))) + + # loop through data types + for i_data in data_types: + + # loop through detectors + for i_detect in detectors[0:detect_num[file_num]]: + + # loop through sources + for i_source in sources: + + # determine where to store our data + index_loc = (detectors.index(i_detect) * + source_num[file_num] + + (i_source - 1) + + (data_types.index(i_data) * + (source_num[file_num] * + detect_num[file_num]))) + + # need to treat our filetypes differently + if filetype[file_num] == 'non-parsed': + + # non-parsed saves timepoints in groups + # this should account for that + time_points = np.arange(i_source - 1, + int( + meta_data['record'][-1] + ) * source_num[file_num], + source_num[file_num]) + + # determine which channel to look for in boxy_array + channel = np.where(col_names == i_detect + + '-' + i_data)[0][0] + + # save our data based on data type + data_[index_loc, :] = boxy_array[time_points, + channel] + + elif filetype[file_num] == 'parsed': + + # determine which channel to look for in boxy_array + channel = np.where(col_names == i_detect + '-' + + i_data + str(i_source))[0][0] + + # save our data based on data type + data_[index_loc, :] = boxy_array[:, channel] + + # swap channels to match new wavelength order + for i_chan in range(0, len(data_), 2): + data_[[i_chan, i_chan + 1]] = data_[[i_chan + 1, i_chan]] + + # Read triggers from event file + # add our markers to the data array based on filetype### + if type(meta_data['digaux']) is not list: + if filetype[file_num] == 'non-parsed': + block_markers.append(meta_data['digaux'][np.arange(0, + len(meta_data['digaux']), + source_num[file_num])]) + elif filetype[file_num] == 'parsed': + block_markers.append(meta_data['digaux']) + else: + block_markers.append(np.zeros((len(data_[0, :]),))) + + all_blocks.append(data_) + + all_data.extend(np.hstack(all_blocks)) + all_markers.append(np.hstack(block_markers)) # add markers to our data - all_data.extend(markers) + all_data.extend(all_markers) all_data = np.asarray(all_data) print('Blank Data shape: ', data.shape) diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 4bf64039c73..79108607884 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -20,7 +20,7 @@ import numpy as np import matplotlib.pyplot as plt from itertools import compress -import copy +import re as re import mne @@ -33,9 +33,9 @@ no_mrk_indices = [i_index for i_index,i_label in enumerate(raw_intensity.info['ch_names']) if 'Markers' not in i_label] mtg_a_indices = [i_index for i_index,i_label in enumerate(raw_intensity.info['ch_names']) - if ' a' in i_label and 'Markers' not in i_label] + if re.search(r'S[1-5]_', i_label)] mtg_b_indices = [i_index for i_index,i_label in enumerate(raw_intensity.info['ch_names']) - if ' b' in i_label and 'Markers' not in i_label] + if re.search(r'S([6-9]|10)_', i_label)] mtg_a_data = raw_intensity.copy() mtg_b_data = raw_intensity.copy() From 9199c2a522b4a7c1329338185b10b4251440e7c7 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Thu, 14 May 2020 13:25:43 -0600 Subject: [PATCH 088/167] added markers to indicate the end of each block. will also check for event files and place those in the marker channels, if the files exist --- mne/io/boxy/boxy.py | 65 ++++++++++++++----- .../preprocessing/plot_80_boxy_processing.py | 14 ++-- 2 files changed, 57 insertions(+), 22 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 1e94447df2f..39c45040306 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -5,6 +5,8 @@ import glob as glob import re as re import numpy as np +import scipy.io +import os from ..base import BaseRaw from ..meas_info import create_info @@ -193,8 +195,6 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): if i_chan in all_labels: chan_index = all_labels.index(i_chan) source_coords.append(all_coords[chan_index]) - else: - print(i_chan) # get coordinates for detectors in .mtg file from .elp file detect_coords = [] @@ -360,10 +360,10 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): # input file has rows for each source, # output variable rearranges as columns and does not if filetype[0] == 'non-parsed': - last_samps = ((((diff - 2)*len(blk_names)) // (source_num[0])) + + last_samps = ((((diff - 2)*len(blk_names[0])) // (source_num[0])) + start_line[0] - 1) elif filetype[0] == 'parsed': - last_samps = (start_line[0] + ((diff - 3)*len(blk_names))) + last_samps = (start_line[0] + ((diff - 3)*len(blk_names[0]))) print('New last_samps: ', last_samps) print('New Difference: ', last_samps - first_samps) @@ -385,6 +385,27 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): montages = self._raw_extras[fi]['montages'] blocks = self._raw_extras[fi]['blocks'] boxy_files = self._raw_extras[fi]['files']['*.[000-999]*'] + event_fname = os.path.join(self._filenames[fi], 'evt') + + # Check if event files are available + # mostly for older boxy files since we'll be using the digaux channel + # for markers in further recordings + try: + event_files = dict() + key = ('*.[000-999]*') + print(event_fname) + event_files[key] = [glob.glob('%s/*%s' % (event_fname, key))] + event_files[key] = event_files[key][0] + event_data = [] + + for file_num, i_file in enumerate(event_files[key]): + event_data.append(scipy.io.loadmat( + event_files[key][0])['event']) + if event_data != []: print('Event file found!') + else: print('No event file found. Using digaux!') + + except: + pass # detectors, sources, and data types detectors = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', @@ -506,17 +527,31 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): for i_chan in range(0, len(data_), 2): data_[[i_chan, i_chan + 1]] = data_[[i_chan + 1, i_chan]] - # Read triggers from event file - # add our markers to the data array based on filetype### - if type(meta_data['digaux']) is not list: - if filetype[file_num] == 'non-parsed': - block_markers.append(meta_data['digaux'][np.arange(0, - len(meta_data['digaux']), - source_num[file_num])]) - elif filetype[file_num] == 'parsed': - block_markers.append(meta_data['digaux']) - else: - block_markers.append(np.zeros((len(data_[0, :]),))) + # If there was an event file, place those events in our data + # If no, use digaux for our events + try: + temp_markers = np.zeros((len(data_[0, :]),)) + for event_num, event_info in enumerate(event_data[file_num]): + temp_markers[event_info[0]-1] = event_info[1] + block_markers.append(temp_markers) + except: + # add our markers to the data array based on filetype### + if type(meta_data['digaux']) is not list: + if filetype[file_num] == 'non-parsed': + block_markers.append(meta_data['digaux'][np.arange(0, + len(meta_data['digaux']), + source_num[file_num])]) + elif filetype[file_num] == 'parsed': + block_markers.append(meta_data['digaux']) + else: + block_markers.append(np.zeros((len(data_[0, :]),))) + + #change marker for last timepoint to indicate end of block + #we'll be using digaux to send markers, which is a serial port + #so we can send values between 1-255 + #we'll multiply our block start/end markers by 1000 to ensure + #we aren't within the 1-255 range + block_markers[i_blk][-1] = int(blk_name) * 1000 all_blocks.append(data_) diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 79108607884..6ee1ce73a03 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -27,7 +27,7 @@ boxy_data_folder = mne.datasets.boxy_example.data_path() boxy_raw_dir = os.path.join(boxy_data_folder, 'Participant-1') -raw_intensity = mne.io.read_raw_boxy(boxy_raw_dir, 'Ph', verbose=True).load_data() +raw_intensity = mne.io.read_raw_boxy(boxy_raw_dir, 'AC', verbose=True).load_data() ###separate data based on montages### no_mrk_indices = [i_index for i_index,i_label in enumerate(raw_intensity.info['ch_names']) @@ -120,12 +120,12 @@ mtg_b_data.pick(picks_b[dists_b < 0.08]) scalings = dict(fnirs_raw=1e2) -raw_intensity.plot(n_channels=10, - duration=1000, scalings=scalings, show_scrollbars=True) -mtg_a_data.plot(n_channels=10, - duration=1000, scalings=scalings, show_scrollbars=True) -mtg_b_data.plot(n_channels=10, - duration=1000, scalings=scalings, show_scrollbars=True) +raw_intensity.plot(n_channels=5, + duration=20, scalings=100, show_scrollbars=True) +mtg_a_data.plot(n_channels=5, + duration=20, scalings=100, show_scrollbars=True) +mtg_b_data.plot(n_channels=5, + duration=20, scalings=100, show_scrollbars=True) # ############################################################################### From 99dfe9eedf4a005642c83e6b4f78f03039630369 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Fri, 15 May 2020 16:56:10 -0600 Subject: [PATCH 089/167] marker channel kind is now stim instead of fnirs_raw, coords are no longer transformed --- mne/io/boxy/boxy.py | 21 ++- .../preprocessing/plot_80_boxy_processing.py | 137 +++++++++++------- 2 files changed, 101 insertions(+), 57 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 39c45040306..07126197bcb 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -13,6 +13,7 @@ from ...transforms import apply_trans, get_ras_to_neuromag_trans from ...utils import logger, verbose, fill_doc from ...channels.montage import make_dig_montage +from ...annotations import Annotations @fill_doc @@ -204,7 +205,7 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): detect_coords.append(all_coords[chan_index]) # Generate meaningful channel names for each montage - # get our unique labels for sources and detectors for each montage + # get our unique labels for sources and detectors for each montage unique_source_labels = [] unique_detect_labels = [] for mtg_num, i_mtg in enumerate(mtg_chan_num, 0): @@ -292,9 +293,11 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): rpa=fiducial_coords[2]) # create info structure - info = create_info(boxy_labels, srate[0], ch_types='fnirs_raw') + ch_types = (['fnirs_raw' if i_chan < np.sum(mtg_chan_num) else 'stim' + for i_chan, _ in enumerate(boxy_labels)]) + info = create_info(boxy_labels, srate[0], ch_types=ch_types) + # add dig info - # this also applies a transform to the data into neuromag space # based on fiducials info.set_montage(my_dig_montage) @@ -313,9 +316,12 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): fiducial_coords[2]) for i_chan in range(len(boxy_labels)): - temp_ch_src_det = apply_trans(native_head_t, - boxy_coords[i_chan][:9].reshape(3, 3) - ).ravel() + if i_chan < np.sum(mtg_chan_num): + temp_ch_src_det = apply_trans(native_head_t, + boxy_coords[i_chan][:9].reshape(3, 3) + ).ravel() + else: + temp_ch_src_det = np.zeros(9,)#don't want to transform markers # add wavelength and placeholders temp_other = np.asarray(boxy_coords[i_chan][9:], dtype=np.float64) info['chs'][i_chan]['loc'] = np.concatenate((temp_ch_src_det, @@ -329,7 +335,8 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): 'files': files, 'montages': mtg_names, 'blocks': blk_names, - 'data_types': data_types,} + 'data_types': data_types, + } ###check to make sure data is the same length for each file ###boxy can be set to only record so many sample points per recording diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 6ee1ce73a03..9b33a3a495b 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -30,19 +30,16 @@ raw_intensity = mne.io.read_raw_boxy(boxy_raw_dir, 'AC', verbose=True).load_data() ###separate data based on montages### -no_mrk_indices = [i_index for i_index,i_label in enumerate(raw_intensity.info['ch_names']) - if 'Markers' not in i_label] mtg_a_indices = [i_index for i_index,i_label in enumerate(raw_intensity.info['ch_names']) - if re.search(r'S[1-5]_', i_label)] + if re.search(r'(S[1-5]_|\bMarkers a\b)', i_label)] mtg_b_indices = [i_index for i_index,i_label in enumerate(raw_intensity.info['ch_names']) - if re.search(r'S([6-9]|10)_', i_label)] + if re.search(r'(S([6-9]|10)_|\bMarkers b\b)', i_label)] -mtg_a_data = raw_intensity.copy() -mtg_b_data = raw_intensity.copy() +mtg_a_intensity = raw_intensity.copy() +mtg_b_intensity = raw_intensity.copy() -mtg_a_data.pick(mtg_a_indices) -mtg_b_data.pick(mtg_b_indices) -raw_intensity.pick(no_mrk_indices) +mtg_a_intensity.pick(mtg_a_indices) +mtg_b_intensity.pick(mtg_b_indices) # ############################################################################### # # View location of sensors over brain surface @@ -69,7 +66,7 @@ mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') -fig = mne.viz.plot_alignment(mtg_a_data.info, +fig = mne.viz.plot_alignment(mtg_a_intensity.info, show_axes=True, subject='fsaverage', trans='fsaverage', @@ -82,7 +79,7 @@ mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') -fig = mne.viz.plot_alignment(mtg_b_data.info, +fig = mne.viz.plot_alignment(mtg_b_intensity.info, show_axes=True, subject='fsaverage', trans='fsaverage', @@ -104,27 +101,27 @@ # # These short channels can be seen in the figure above. # # To achieve this we pick all the channels that are not considered to be short. -picks = mne.pick_types(raw_intensity.info, meg=False, fnirs=True) -picks_a = mne.pick_types(mtg_a_data.info, meg=False, fnirs=True) -picks_b = mne.pick_types(mtg_b_data.info, meg=False, fnirs=True) +picks = mne.pick_types(raw_intensity.info, meg=False, fnirs=True, stim=True) +picks_a = mne.pick_types(mtg_a_intensity.info, meg=False, fnirs=True, stim=True) +picks_b = mne.pick_types(mtg_b_intensity.info, meg=False, fnirs=True, stim=True) dists = mne.preprocessing.nirs.source_detector_distances( raw_intensity.info, picks=picks) dists_a = mne.preprocessing.nirs.source_detector_distances( - raw_intensity.info, picks=picks_a) + mtg_a_intensity.info, picks=picks_a) dists_b = mne.preprocessing.nirs.source_detector_distances( - raw_intensity.info, picks=picks_b) + mtg_b_intensity.info, picks=picks_b) raw_intensity.pick(picks[dists < 0.08]) -mtg_a_data.pick(picks_a[dists_a < 0.08]) -mtg_b_data.pick(picks_b[dists_b < 0.08]) +mtg_a_intensity.pick(picks_a[dists_a < 0.08]) +mtg_b_intensity.pick(picks_b[dists_b < 0.08]) scalings = dict(fnirs_raw=1e2) raw_intensity.plot(n_channels=5, duration=20, scalings=100, show_scrollbars=True) -mtg_a_data.plot(n_channels=5, +mtg_a_intensity.plot(n_channels=5, duration=20, scalings=100, show_scrollbars=True) -mtg_b_data.plot(n_channels=5, +mtg_b_intensity.plot(n_channels=5, duration=20, scalings=100, show_scrollbars=True) @@ -135,8 +132,8 @@ # # The raw intensity values are then converted to optical density. raw_od = mne.preprocessing.nirs.optical_density(raw_intensity) -raw_od_a = mne.preprocessing.nirs.optical_density(mtg_a_data) -raw_od_b = mne.preprocessing.nirs.optical_density(mtg_b_data) +raw_od_a = mne.preprocessing.nirs.optical_density(mtg_a_intensity) +raw_od_b = mne.preprocessing.nirs.optical_density(mtg_b_intensity) raw_od.plot(n_channels=len(raw_od.ch_names), duration=500, show_scrollbars=False) @@ -180,8 +177,9 @@ # # In this example we will mark all channels with a SCI less than 0.5 as bad # # (this dataset is quite clean, so no channels are marked as bad). -# raw_od.info['bads'] = list(compress(raw_od.ch_names, sci < 0.5)) - +raw_od.info['bads'] = list(compress(raw_od.ch_names, sci < 0.5)) +raw_od_a.info['bads'] = list(compress(raw_od_a.ch_names, sci_a < 0.5)) +raw_od_b.info['bads'] = list(compress(raw_od_b.ch_names, sci_b < 0.5)) # ############################################################################### # # At this stage it is appropriate to inspect your data @@ -199,9 +197,18 @@ # # Next we convert the optical density data to haemoglobin concentration using # # the modified Beer-Lambert law. -# raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od) -# raw_haemo.plot(n_channels=len(raw_haemo.ch_names), -# duration=500, show_scrollbars=False) +raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od) +raw_haemo_a = mne.preprocessing.nirs.beer_lambert_law(raw_od_a) +raw_haemo_b = mne.preprocessing.nirs.beer_lambert_law(raw_od_b) + +raw_haemo.plot(n_channels=len(raw_haemo.ch_names), + duration=500, show_scrollbars=False) + +raw_haemo_a.plot(n_channels=len(raw_haemo_a.ch_names), + duration=500, show_scrollbars=False) + +raw_haemo_b.plot(n_channels=len(raw_haemo_b.ch_names), + duration=500, show_scrollbars=False) # ############################################################################### @@ -214,14 +221,34 @@ # # remove this. A high pass filter is also included to remove slow drifts # # in the data. -# fig = raw_haemo.plot_psd(average=True) -# fig.suptitle('Before filtering', weight='bold', size='x-large') -# fig.subplots_adjust(top=0.88) -# raw_haemo = raw_haemo.filter(0.05, 0.7, h_trans_bandwidth=0.2, -# l_trans_bandwidth=0.02) -# fig = raw_haemo.plot_psd(average=True) -# fig.suptitle('After filtering', weight='bold', size='x-large') -# fig.subplots_adjust(top=0.88) +fig = raw_haemo.plot_psd(average=True) +fig.suptitle('Before filtering', weight='bold', size='x-large') +fig.subplots_adjust(top=0.88) +raw_haemo = raw_haemo.filter(0.05, 0.7, h_trans_bandwidth=0.2, + l_trans_bandwidth=0.02) +fig = raw_haemo.plot_psd(average=True) +fig.suptitle('After filtering', weight='bold', size='x-large') +fig.subplots_adjust(top=0.88) + + +fig = raw_haemo_a.plot_psd(average=True) +fig.suptitle('Before filtering Montage A', weight='bold', size='x-large') +fig.subplots_adjust(top=0.88) +raw_haemo_a = raw_haemo_a.filter(0.05, 0.7, h_trans_bandwidth=0.2, + l_trans_bandwidth=0.02) +fig = raw_haemo_a.plot_psd(average=True) +fig.suptitle('After filtering Montage A', weight='bold', size='x-large') +fig.subplots_adjust(top=0.88) + + +fig = raw_haemo_b.plot_psd(average=True) +fig.suptitle('Before filtering Montage B', weight='bold', size='x-large') +fig.subplots_adjust(top=0.88) +raw_haemo_b = raw_haemo_b.filter(0.05, 0.7, h_trans_bandwidth=0.2, + l_trans_bandwidth=0.02) +fig = raw_haemo_b.plot_psd(average=True) +fig.suptitle('After filtering Montage B', weight='bold', size='x-large') +fig.subplots_adjust(top=0.88) # ############################################################################### # # Extract epochs @@ -234,13 +261,15 @@ # # First we extract the events of interest and visualise them to ensure they are # # correct. -# events, _ = mne.events_from_annotations(raw_haemo, event_id={'1.0': 1, -# '2.0': 2, -# '3.0': 3}) -# event_dict = {'Control': 1, 'Tapping/Left': 2, 'Tapping/Right': 3} -# fig = mne.viz.plot_events(events, event_id=event_dict, -# sfreq=raw_haemo.info['sfreq']) -# fig.subplots_adjust(right=0.7) # make room for the legend +mtg_a_events = mne.find_events(mtg_a_intensity, stim_channel='Markers a') + +fig = mne.viz.plot_events(mtg_a_events) +fig.subplots_adjust(right=0.7) # make room for the legend + +mtg_b_events = mne.find_events(mtg_b_intensity, stim_channel='Markers b') + +fig = mne.viz.plot_events(mtg_b_events) +fig.subplots_adjust(right=0.7) # make room for the legend # ############################################################################### @@ -248,15 +277,23 @@ # # baseline correction, and extract the epochs. We visualise the log of which # # epochs were dropped. -# reject_criteria = dict(hbo=80e-6) -# tmin, tmax = -5, 15 +reject_criteria = dict(hbo=80e-6) +tmin, tmax = -5, 15 + +epochs = mne.Epochs(raw_haemo_a, mtg_a_events, + tmin=tmin, tmax=tmax, + reject=reject_criteria, reject_by_annotation=False, + proj=True, baseline=(None, 0), preload=True, + detrend=None, verbose=True) +epochs.plot_drop_log() + -# epochs = mne.Epochs(raw_haemo, events, event_id=event_dict, -# tmin=tmin, tmax=tmax, -# reject=reject_criteria, reject_by_annotation=True, -# proj=True, baseline=(None, 0), preload=True, -# detrend=None, verbose=True) -# epochs.plot_drop_log() +epochs = mne.Epochs(raw_haemo_b, mtg_b_events, + tmin=tmin, tmax=tmax, + reject=reject_criteria, reject_by_annotation=False, + proj=True, baseline=(None, 0), preload=True, + detrend=None, verbose=True) +epochs.plot_drop_log() # ############################################################################### From 9a8364245215ad0f3a5087ec0664c8a5e339374a Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Tue, 19 May 2020 16:41:25 -0600 Subject: [PATCH 090/167] went through entire boxy tutorial to create and compare epochs --- .../preprocessing/plot_80_boxy_processing.py | 283 ++++++++++++------ 1 file changed, 184 insertions(+), 99 deletions(-) diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 9b33a3a495b..0b57a91c574 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -271,29 +271,52 @@ fig = mne.viz.plot_events(mtg_b_events) fig.subplots_adjust(right=0.7) # make room for the legend - # ############################################################################### # # Next we define the range of our epochs, the rejection criteria, # # baseline correction, and extract the epochs. We visualise the log of which # # epochs were dropped. -reject_criteria = dict(hbo=80e-6) -tmin, tmax = -5, 15 +# reject_criteria = dict(hbo=80e-6) +reject_criteria = None +tmin, tmax = -0.2, 1 -epochs = mne.Epochs(raw_haemo_a, mtg_a_events, +mtg_a_haemo_epochs = mne.Epochs(raw_haemo_a, mtg_a_events, tmin=tmin, tmax=tmax, reject=reject_criteria, reject_by_annotation=False, proj=True, baseline=(None, 0), preload=True, detrend=None, verbose=True) -epochs.plot_drop_log() +mtg_a_haemo_epochs.plot_drop_log() -epochs = mne.Epochs(raw_haemo_b, mtg_b_events, +mtg_b_haemo_epochs = mne.Epochs(raw_haemo_b, mtg_b_events, tmin=tmin, tmax=tmax, reject=reject_criteria, reject_by_annotation=False, proj=True, baseline=(None, 0), preload=True, detrend=None, verbose=True) -epochs.plot_drop_log() +mtg_b_haemo_epochs.plot_drop_log() + + +#get epochs from the raw intensities +mtg_a_epochs = mne.Epochs(mtg_a_intensity, mtg_a_events, + event_id=dict(event_1=1,event_2=2), + tmin=tmin, tmax=tmax, + reject=None, reject_by_annotation=False, + proj=False, baseline=(-0.2, 0), preload=True, + detrend=None, verbose=True) + +mtg_b_epochs = mne.Epochs(mtg_b_intensity, mtg_b_events, + event_id=dict(event_1=1,event_2=2), + tmin=tmin, tmax=tmax, + reject=None, reject_by_annotation=False, + proj=False, baseline=(-0.2, 0), preload=True, + detrend=None, verbose=True) + +#two ways to plot epochs, should be the same +fig = mne.viz.plot_epochs(mtg_a_epochs,n_epochs=5,n_channels=5, scalings='auto') +fig = mtg_a_epochs.plot(n_epochs=5,n_channels=5, scalings='auto') + +fig = mne.viz.plot_epochs(mtg_b_epochs,n_epochs=5,n_channels=5, scalings='auto') +fig = mtg_b_epochs.plot(n_epochs=5,n_channels=5, scalings='auto') # ############################################################################### @@ -306,19 +329,61 @@ # # trials, and the consistent dip in HbR that is slightly delayed relative to # # the HbO peak. -# epochs['Tapping'].plot_image(combine='mean', vmin=-30, vmax=30, -# ts_args=dict(ylim=dict(hbo=[-15, 15], -# hbr=[-15, 15]))) - - -# ############################################################################### -# # We can also view the epoched data for the control condition and observe -# # that it does not show the expected morphology. - -# epochs['Control'].plot_image(combine='mean', vmin=-30, vmax=30, -# ts_args=dict(ylim=dict(hbo=[-15, 15], -# hbr=[-15, 15]))) - +#haemo plots +mtg_a_haemo_epochs['1'].plot_image(combine='mean', vmin=-30, vmax=30, + ts_args=dict(ylim=dict(hbo=[-15, 15], + hbr=[-15, 15]))) + +mtg_a_haemo_epochs['2'].plot_image(combine='mean', vmin=-30, vmax=30, + ts_args=dict(ylim=dict(hbo=[-15, 15], + hbr=[-15, 15]))) + +mtg_b_haemo_epochs['1'].plot_image(combine='mean', vmin=-30, vmax=30, + ts_args=dict(ylim=dict(hbo=[-15, 15], + hbr=[-15, 15]))) + +mtg_b_haemo_epochs['2'].plot_image(combine='mean', vmin=-30, vmax=30, + ts_args=dict(ylim=dict(hbo=[-15, 15], + hbr=[-15, 15]))) + +#raw epochs +#separate first and last detectors +mtg_a_first_det = ([i_index for i_index,i_label in + enumerate(mtg_a_epochs.info['ch_names']) if + re.search(r'_D[1-4]', i_label)]) + +mtg_a_last_det = ([i_index for i_index,i_label in + enumerate(mtg_a_epochs.info['ch_names']) if + re.search(r'_D[5-8]', i_label)]) + +mtg_b_first_det = ([i_index for i_index,i_label in + enumerate(mtg_b_epochs.info['ch_names']) if + re.search(r'_D(9|1[0-2])', i_label)]) + +mtg_b_last_det = ([i_index for i_index,i_label in + enumerate(mtg_b_epochs.info['ch_names']) if + re.search(r'_D1[3-6]', i_label)]) + +#plot our two events for both montages +fig = mtg_a_epochs['event_1'].plot_image(combine='mean', vmin=-20, vmax=20, + colorbar=True, title='Montage A Event 1', + group_by=dict(FIRST_DET=mtg_a_first_det, + LAST_DET=mtg_a_last_det)) + +fig = mtg_a_epochs['event_2'].plot_image(combine='mean', vmin=-20, vmax=20, + colorbar=True, title='Montage A Event 2', + group_by=dict(FIRST_DET=mtg_a_first_det, + LAST_DET=mtg_a_last_det)) + +fig = mtg_b_epochs['event_1'].plot_image(combine='mean', vmin=-20, vmax=20, + colorbar=True, title='Montage B Event 1', + group_by=dict(FIRST_DET=mtg_b_first_det, + LAST_DET=mtg_b_last_det)) + +fig = mtg_b_epochs['event_2'].plot_image(combine='mean', vmin=-20, vmax=20, + colorbar=True, title='Montage B Event 2', + group_by=dict(FIRST_DET=mtg_b_first_det, + LAST_DET=mtg_b_last_det)) # ############################################################################### # # View consistency of responses across channels @@ -328,14 +393,12 @@ # # pairs that we selected. All the channels in this data are located over the # # motor cortex, and all channels show a similar pattern in the data. -# fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(15, 6)) -# clims = dict(hbo=[-20, 20], hbr=[-20, 20]) -# epochs['Control'].average().plot_image(axes=axes[:, 0], clim=clims) -# epochs['Tapping'].average().plot_image(axes=axes[:, 1], clim=clims) -# for column, condition in enumerate(['Control', 'Tapping']): -# for ax in axes[:, column]: -# ax.set_title('{}: {}'.format(condition, ax.get_title())) - +fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(15, 6)) +clim=dict(fnirs_raw=[-20,20]) +mtg_a_epochs['event_1'].average().plot_image(axes=axes[0, 0],titles='Montage A Event 1', clim=clim) +mtg_a_epochs['event_2'].average().plot_image(axes=axes[1, 0],titles='Montage A Event 2', clim=clim) +mtg_b_epochs['event_1'].average().plot_image(axes=axes[0, 1],titles='Montage B Event 1', clim=clim) +mtg_b_epochs['event_2'].average().plot_image(axes=axes[1, 1],titles='Montage B Event 2', clim=clim) # ############################################################################### # # Plot standard fNIRS response image @@ -345,21 +408,22 @@ # # both the HbO and HbR on the same figure to illustrate the relation between # # the two signals. -# evoked_dict = {'Tapping/HbO': epochs['Tapping'].average(picks='hbo'), -# 'Tapping/HbR': epochs['Tapping'].average(picks='hbr'), -# 'Control/HbO': epochs['Control'].average(picks='hbo'), -# 'Control/HbR': epochs['Control'].average(picks='hbr')} - -# # Rename channels until the encoding of frequency in ch_name is fixed -# for condition in evoked_dict: -# evoked_dict[condition].rename_channels(lambda x: x[:-4]) +mtg_a_evoked_dict = {'Montage_A_Event_1': mtg_a_epochs['event_1'].average(), + 'Montage_A_Event_2': mtg_a_epochs['event_2'].average()} -# color_dict = dict(HbO='#AA3377', HbR='b') -# styles_dict = dict(Control=dict(linestyle='dashed')) +mtg_b_evoked_dict = {'Montage_B_Event_1': mtg_b_epochs['event_1'].average(), + 'Montage_B_Event_2': mtg_b_epochs['event_2'].average()} -# mne.viz.plot_compare_evokeds(evoked_dict, combine="mean", ci=0.95, -# colors=color_dict, styles=styles_dict) +###this seems to what our conditions/events to have the same number of channels, +###and the same channel names. Maybe we can't use this to compare montages?? +###Gives an error if I try to compare both montages and events +color_dict = dict(Montage_A_Event_1='r', Montage_A_Event_2='b') +mne.viz.plot_compare_evokeds(mtg_a_evoked_dict, combine="mean", ci=0.95, + colors=color_dict) +color_dict = dict(Montage_B_Event_1='r', Montage_B_Event_2='b') +mne.viz.plot_compare_evokeds(mtg_b_evoked_dict, combine="mean", ci=0.95, + colors=color_dict) # ############################################################################### # # View topographic representation of activity @@ -367,11 +431,17 @@ # # # # Next we view how the topographic activity changes throughout the response. -# times = np.arange(-3.5, 13.2, 3.0) -# topomap_args = dict(extrapolate='local') -# epochs['Tapping'].average(picks='hbo').plot_joint( -# times=times, topomap_args=topomap_args) +times = np.arange(-0.2, 1.0, 0.2) +topomap_args = dict(extrapolate='local') +fig = mtg_a_epochs['event_1'].average().plot_joint(times=times, + topomap_args=topomap_args) +fig = mtg_a_epochs['event_2'].average().plot_joint(times=times, + topomap_args=topomap_args) +fig = mtg_b_epochs['event_1'].average().plot_joint(times=times, + topomap_args=topomap_args) +fig = mtg_b_epochs['event_2'].average().plot_joint(times=times, + topomap_args=topomap_args) # ############################################################################### # # Compare tapping of left and right hands @@ -380,69 +450,84 @@ # # Finally we generate topo maps for the left and right conditions to view # # the location of activity. First we visualise the HbO activity. -# times = np.arange(4.0, 11.0, 1.0) -# epochs['Tapping/Left'].average(picks='hbo').plot_topomap( -# times=times, **topomap_args) -# epochs['Tapping/Right'].average(picks='hbo').plot_topomap( -# times=times, **topomap_args) +times = np.arange(0.0, 1.0, 0.2) +mtg_a_epochs['event_1'].average().plot_topomap(times=times, title='Montage A Event 1', **topomap_args) +mtg_a_epochs['event_2'].average().plot_topomap(times=times, title='Montage A Event 2', **topomap_args) +mtg_b_epochs['event_1'].average().plot_topomap(times=times, title='Montage B Event 1', **topomap_args) +mtg_b_epochs['event_2'].average().plot_topomap(times=times, title='Montage B Event 2', **topomap_args) # ############################################################################### -# # And we also view the HbR activity for the two conditions. +# # And we can plot the comparison at a single time point for two conditions. -# epochs['Tapping/Left'].average(picks='hbr').plot_topomap( -# times=times, **topomap_args) -# epochs['Tapping/Right'].average(picks='hbr').plot_topomap( -# times=times, **topomap_args) +fig, axes = plt.subplots(nrows=2, ncols=4, figsize=(9, 5), + gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1])) +vmin, vmax, ts = -0.192, 0.992, 0.1 +vmin = -20 +vmax = 20 -# ############################################################################### -# # And we can plot the comparison at a single time point for two conditions. +mtg_a_epochs['event_1'].average().plot_topomap(times=ts, + axes=axes[0, 0], vmin=vmin, vmax=vmax, colorbar=False, + **topomap_args) + +mtg_a_epochs['event_2'].average().plot_topomap(times=ts, + axes=axes[1, 0], vmin=vmin, vmax=vmax, colorbar=False, + **topomap_args) + +mtg_b_epochs['event_1'].average().plot_topomap(times=ts, + axes=axes[0, 1], vmin=vmin, vmax=vmax, colorbar=False, + **topomap_args) + +mtg_b_epochs['event_2'].average().plot_topomap(times=ts, + axes=axes[1, 1], vmin=vmin, vmax=vmax, colorbar=False, + **topomap_args) -# fig, axes = plt.subplots(nrows=2, ncols=4, figsize=(9, 5), -# gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1])) -# vmin, vmax, ts = -8, 8, 9.0 - -# evoked_left = epochs['Tapping/Left'].average() -# evoked_right = epochs['Tapping/Right'].average() - -# evoked_left.plot_topomap(ch_type='hbo', times=ts, axes=axes[0, 0], -# vmin=vmin, vmax=vmax, colorbar=False, -# **topomap_args) -# evoked_left.plot_topomap(ch_type='hbr', times=ts, axes=axes[1, 0], -# vmin=vmin, vmax=vmax, colorbar=False, -# **topomap_args) -# evoked_right.plot_topomap(ch_type='hbo', times=ts, axes=axes[0, 1], -# vmin=vmin, vmax=vmax, colorbar=False, -# **topomap_args) -# evoked_right.plot_topomap(ch_type='hbr', times=ts, axes=axes[1, 1], -# vmin=vmin, vmax=vmax, colorbar=False, -# **topomap_args) - -# evoked_diff = mne.combine_evoked([evoked_left, -evoked_right], weights='equal') - -# evoked_diff.plot_topomap(ch_type='hbo', times=ts, axes=axes[0, 2:], -# vmin=vmin, vmax=vmax, colorbar=True, -# **topomap_args) -# evoked_diff.plot_topomap(ch_type='hbr', times=ts, axes=axes[1, 2:], -# vmin=vmin, vmax=vmax, colorbar=True, -# **topomap_args) - -# for column, condition in enumerate( -# ['Tapping Left', 'Tapping Right', 'Left-Right']): -# for row, chroma in enumerate(['HbO', 'HbR']): -# axes[row, column].set_title('{}: {}'.format(chroma, condition)) -# fig.tight_layout() + +###can't compare events across montages, for this data set, since they +#don't have the same channel names +mtg_a_evoked_diff = mne.combine_evoked([mtg_a_epochs['event_1'].average(), + -mtg_a_epochs['event_2'].average()], + weights='equal') + +mtg_b_evoked_diff = mne.combine_evoked([mtg_b_epochs['event_1'].average(), + -mtg_b_epochs['event_2'].average()], + weights='equal') + +mtg_a_evoked_diff.plot_topomap(times=ts, axes=axes[0, 2:], + vmin=vmin, vmax=vmax, colorbar=True, + **topomap_args) +mtg_b_evoked_diff.plot_topomap(times=ts, axes=axes[1, 2:], + vmin=vmin, vmax=vmax, colorbar=True, + **topomap_args) + +for column, condition in enumerate( + ['Event 1', 'Event 2', 'Difference']): + for row, chroma in enumerate(['Montage A', 'Montage B']): + axes[row, column].set_title('{}: {}'.format(chroma, condition)) +fig.tight_layout() # ############################################################################### # # Lastly, we can also look at the individual waveforms to see what is # # driving the topographic plot above. -# fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(6, 4)) -# mne.viz.plot_evoked_topo(epochs['Left'].average(picks='hbo'), color='b', -# axes=axes, legend=False) -# mne.viz.plot_evoked_topo(epochs['Right'].average(picks='hbo'), color='r', -# axes=axes, legend=False) +fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(6, 4)) +mne.viz.plot_evoked_topo(mtg_a_epochs['event_1'].average(), color='b', + axes=axes, legend=False) +mne.viz.plot_evoked_topo(mtg_a_epochs['event_2'].average(), color='r', + axes=axes, legend=False) + +# Tidy the legend +leg_lines = [line for line in axes.lines if line.get_c() == 'b'][:1] +leg_lines.append([line for line in axes.lines if line.get_c() == 'r'][0]) +fig.legend(leg_lines, ['Montage A Event 1', 'Montage A Event 2'], loc='lower right') + + +fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(6, 4)) +mne.viz.plot_evoked_topo(mtg_b_epochs['event_1'].average(), color='b', + axes=axes, legend=False) +mne.viz.plot_evoked_topo(mtg_b_epochs['event_2'].average(), color='r', + axes=axes, legend=False) -# # Tidy the legend -# leg_lines = [line for line in axes.lines if line.get_c() == 'b'][:1] -# leg_lines.append([line for line in axes.lines if line.get_c() == 'r'][0]) -# fig.legend(leg_lines, ['Left', 'Right'], loc='lower right') +# Tidy the legend +leg_lines = [line for line in axes.lines if line.get_c() == 'b'][:1] +leg_lines.append([line for line in axes.lines if line.get_c() == 'r'][0]) +fig.legend(leg_lines, ['Montage A Event 1', 'Montage A Event 2'], loc='lower right') From b6665122f0cd1dc605749cbedd37abc745fa6948 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Wed, 20 May 2020 11:43:10 -0600 Subject: [PATCH 091/167] added a few messages to print if markers are found or not, for each montage and block --- mne/io/boxy/boxy.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 07126197bcb..840dd1d3cef 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -412,6 +412,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): else: print('No event file found. Using digaux!') except: + print('No event file found. Using digaux!') pass # detectors, sources, and data types @@ -552,6 +553,15 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): block_markers.append(meta_data['digaux']) else: block_markers.append(np.zeros((len(data_[0, :]),))) + + ###check our markers to see if anything is actually in there### + if (all(i_mrk == 0 for i_mrk in block_markers[i_blk]) or + all(i_mrk == 255 for i_mrk in block_markers[i_blk])): + print('No markers for montage ' + mtg_name + + ' and block ' + blk_name) + else: + print('Found markers for montage ' + mtg_name + + ' and block ' + blk_name + '!') #change marker for last timepoint to indicate end of block #we'll be using digaux to send markers, which is a serial port From 91ba501d881622c9774028f4fbe4ed9dcf825215 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Wed, 20 May 2020 13:13:00 -0700 Subject: [PATCH 092/167] updated dataset --- mne/datasets/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py index eb5cb1c1712..583538e65c7 100644 --- a/mne/datasets/utils.py +++ b/mne/datasets/utils.py @@ -264,7 +264,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, 'tar.gz/%s' % releases['testing'], multimodal='https://ndownloader.figshare.com/files/5999598', fnirs_motor='https://osf.io/dj3eh/download?version=1', - boxy_example='https://osf.io/hksme/download?version=5', + boxy_example='https://osf.io/hksme/download?version=6', opm='https://osf.io/p6ae7/download?version=2', visual_92_categories=[ 'https://osf.io/8ejrs/download?version=1', @@ -328,7 +328,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, testing='a6e18de6405d84599c6d4dfb4c1d2b14', multimodal='26ec847ae9ab80f58f204d09e2c08367', fnirs_motor='c4935d19ddab35422a69f3326a01fef8', - boxy_example='cfd625fedc27e5ba3ce3e3f6a4ee0a3e', + boxy_example='d567e80b8063e90096861297638e2eef', opm='370ad1dcfd5c47e029e692c85358a374', visual_92_categories=['74f50bbeb65740903eadc229c9fa759f', '203410a98afc9df9ae8ba9f933370e20'], From 39deb729ae7354f9718151012a316e50e703fec6 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Wed, 20 May 2020 17:24:47 -0600 Subject: [PATCH 093/167] marker times were off, had to change so first_samp and last_samp refer to sample number in the data structure, not lines from the file --- mne/io/boxy/boxy.py | 33 +++++++++---------- .../preprocessing/plot_80_boxy_processing.py | 6 ++++ 2 files changed, 21 insertions(+), 18 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 840dd1d3cef..167c22fa90c 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -109,6 +109,7 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): with open(i_file, 'r') as data: for line_num, i_line in enumerate(data, 1): if '#DATA ENDS' in i_line: + #data ends just before this end_line.append(line_num - 1) break if 'Detector Channels' in i_line: @@ -124,7 +125,8 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): elif 'Updata Rate (Hz)' in i_line: srate.append(float(i_line.rsplit(' ')[0])) elif '#DATA BEGINS' in i_line: - start_line.append(line_num) + #data starts a couple lines later + start_line.append(line_num + 2) elif 'exmux' in i_line: filetype[file_num] = 'non-parsed' @@ -362,22 +364,24 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): print('Original Difference: ', end_line[0] - start_line[0]) first_samps = start_line[0] print('New first_samps: ', first_samps) - diff = end_line[0] - start_line[0] + diff = end_line[0] - (start_line[0]) # input file has rows for each source, # output variable rearranges as columns and does not if filetype[0] == 'non-parsed': - last_samps = ((((diff - 2)*len(blk_names[0])) // (source_num[0])) + - start_line[0] - 1) + last_samps = ((diff*len(blk_names[0])) // (source_num[0])) elif filetype[0] == 'parsed': - last_samps = (start_line[0] + ((diff - 3)*len(blk_names[0]))) - + last_samps = diff*len(blk_names[0]) + + # first sample is technically sample 0, not the start line in the file + first_samps = 0 + print('New last_samps: ', last_samps) print('New Difference: ', last_samps - first_samps) - + super(RawBOXY, self).__init__( info, preload, filenames=[fname], first_samps=[first_samps], - last_samps=[last_samps], + last_samps=[last_samps-1], raw_extras=[raw_extras], verbose=verbose) def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): @@ -432,21 +436,14 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): boxy_data = [] with open(boxy_file, 'r') as data_file: for line_num, i_line in enumerate(data_file, 1): + if line_num == (start_line[i_blk] - 1):# grab column names + col_names = np.asarray(re.findall('\w+\-\w+|\w+\-\d+|\w+', + i_line.rsplit(' ')[0])) if line_num > start_line[file_num] and line_num <= end_line[file_num]: boxy_data.append(i_line.rsplit(' ')) sources = np.arange(1, source_num[file_num] + 1, 1) - # get column names from the first row of our boxy data - col_names = np.asarray(re.findall('\w+\-\w+|\w+\-\d+|\w+', - boxy_data[0][0])) - del boxy_data[0] - - # sometimes there is an empty line before our data starts - # this should remove them - while re.findall('[-+]?\d*\.?\d+', boxy_data[0][0]) == []: - del boxy_data[0] - # grab the individual data points for each column boxy_data = [re.findall('[-+]?\d*\.?\d+', i_row[0]) for i_row in boxy_data] diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 0b57a91c574..c98bd029847 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -266,11 +266,17 @@ fig = mne.viz.plot_events(mtg_a_events) fig.subplots_adjust(right=0.7) # make room for the legend +mtg_a_intensity.plot(events=mtg_a_events, start=0, duration=10,color='gray', + event_color={1: 'r', 2: 'b', 1000: 'k'}) + mtg_b_events = mne.find_events(mtg_b_intensity, stim_channel='Markers b') fig = mne.viz.plot_events(mtg_b_events) fig.subplots_adjust(right=0.7) # make room for the legend +mtg_b_intensity.plot(events=mtg_b_events, start=0, duration=10,color='gray', + event_color={1: 'r', 2: 'b', 2000: 'k'}) + # ############################################################################### # # Next we define the range of our epochs, the rejection criteria, # # baseline correction, and extract the epochs. We visualise the log of which From f897bd66ce806b72cf1e3d3021c4ee62be3d4641 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Thu, 21 May 2020 17:05:00 -0600 Subject: [PATCH 094/167] phase data will be unwrapped, detrended, and outliers removed --- mne/io/boxy/boxy.py | 79 +++++++++++++++++++ .../preprocessing/plot_80_boxy_processing.py | 2 +- 2 files changed, 80 insertions(+), 1 deletion(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 167c22fa90c..0c0fccc5093 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -527,6 +527,85 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): # save our data based on data type data_[index_loc, :] = boxy_array[:, channel] + + ###phase unwrapping### + # thresh = 0.00000001 + # scipy.io.savemat(file_name = r"C:\Users\spork\Desktop\data_matlab.mat", + # mdict=dict(data=data_)) + if i_data == 'Ph': + print('Fixing phase wrap') + for i_chan in range(np.size(data_, axis=0)): + if np.mean(data_[i_chan,:50]) < 180: + wrapped_points = data_[i_chan, :] > 270 + data_[i_chan, wrapped_points] -= 360 + else: + wrapped_points = data_[i_chan,:] < 90 + data_[i_chan, wrapped_points] += 360 + + # unwrapped_data = scipy.io.loadmat(r"C:\Users\spork\Desktop\data_unwrap_python.mat") + + # test1 = abs(unwrapped_data['data'] - data_) <= thresh + # test1.all() + + print('Detrending phase data') + # scipy.io.savemat(file_name = r"C:\Users\spork\Desktop\data_unwrap_matlab.mat", + # mdict=dict(data=data_)) + + y = np.linspace(0, np.size(data_, axis=1)-1, + np.size(data_, axis=1)) + x = np.transpose(y) + for i_chan in range(np.size(data_, axis=0)): + poly_coeffs = np.polyfit(x,data_[i_chan, :] ,3) + tmp_ph = data_[i_chan, :] - np.polyval(poly_coeffs,x) + data_[i_chan, :] = tmp_ph + + # detrend_data = scipy.io.loadmat(r"C:\Users\spork\Desktop\data_detrend_python.mat") + + # test2 = abs(detrend_data['data'] - data_) <= thresh + # test2.all() + + print('Removing phase mean') + # scipy.io.savemat(file_name = r"C:\Users\spork\Desktop\data_detrend_matlab.mat", + # mdict=dict(data=data_)) + + mrph = np.mean(data_,axis=1); + for i_chan in range(np.size(data_, axis=0)): + data_[i_chan,:]=(data_[i_chan,:]-mrph[i_chan]) + + # mean_data = scipy.io.loadmat(r"C:\Users\spork\Desktop\data_mean_python.mat") + + # test3 = abs(mean_data['data'] - data_) <= thresh + # test3.all() + + print('Removing phase outliers') + # scipy.io.savemat(file_name = r"C:\Users\spork\Desktop\data_mean_matlab.mat", + # mdict=dict(data=data_)) + + ph_out_thr=3; # always set to "3" per Kathy & Gabriele Oct 12 2012 + sdph=np.std(data_,1, ddof = 1); #set ddof to 1 to mimic matlab + n_ph_out = np.zeros(np.size(data_, axis=0), dtype= np.int8) + + for i_chan in range(np.size(data_, axis=0)): + outliers = np.where(np.abs(data_[i_chan,:]) > + (ph_out_thr*sdph[i_chan])) + outliers = outliers[0] + if len(outliers) > 0: + if outliers[0] == 0: + outliers = outliers[1:] + if len(outliers) > 0: + if outliers[-1] == np.size(data_, axis=1) - 1: + outliers = outliers[:-1] + n_ph_out[i_chan] = int(len(outliers)) + for i_pt in range(n_ph_out[i_chan]): + j_pt = outliers[i_pt] + data_[i_chan,j_pt] = ( + (data_[i_chan,j_pt-1] + + data_[i_chan,j_pt+1])/2) + + # outlier_data = scipy.io.loadmat(r"C:\Users\spork\Desktop\data_outliers_python.mat") + + # test4 = abs(outlier_data['data'] - data_) <= thresh + # test4.all() # swap channels to match new wavelength order for i_chan in range(0, len(data_), 2): diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index c98bd029847..8932bc4338c 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -27,7 +27,7 @@ boxy_data_folder = mne.datasets.boxy_example.data_path() boxy_raw_dir = os.path.join(boxy_data_folder, 'Participant-1') -raw_intensity = mne.io.read_raw_boxy(boxy_raw_dir, 'AC', verbose=True).load_data() +raw_intensity = mne.io.read_raw_boxy(boxy_raw_dir, 'Ph', verbose=True).load_data() ###separate data based on montages### mtg_a_indices = [i_index for i_index,i_label in enumerate(raw_intensity.info['ch_names']) From 93ce003c982db6efdecf62877bc424fbd6b07316 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Fri, 22 May 2020 11:01:01 -0600 Subject: [PATCH 095/167] added conversion of phase to pico seconds --- mne/io/boxy/boxy.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 0c0fccc5093..f2ee8544a42 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -242,6 +242,7 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): mtg_end = [] mtg_src_num = [] mtg_det_num = [] + mtg_mdf = [] blk_num = [len(blk) for blk in blk_names] for mtg_num, i_mtg in enumerate(mtg_chan_num, 0): start = int(np.sum(mtg_chan_num[:mtg_num])) @@ -254,6 +255,9 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): # get source and detector numbers for each montage mtg_src_num.append(source_num[start_blk]) mtg_det_num.append(detect_num[start_blk]) + # get modulation frequency for each channel and montage + # assuming modulation freq in MHz + mtg_mdf.append([int(chan_mdf)*1e6 for chan_mdf in chan_modulation[start:end]]) for i_type in data_types: for i_coord in range(start, end): boxy_coords.append(np.mean( @@ -328,7 +332,7 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): temp_other = np.asarray(boxy_coords[i_chan][9:], dtype=np.float64) info['chs'][i_chan]['loc'] = np.concatenate((temp_ch_src_det, temp_other), axis=0) - + raw_extras = {'source_num': source_num, 'detect_num': detect_num, 'start_line': start_line, @@ -338,6 +342,7 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): 'montages': mtg_names, 'blocks': blk_names, 'data_types': data_types, + 'mtg_mdf': mtg_mdf, } ###check to make sure data is the same length for each file @@ -395,6 +400,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): data_types = self._raw_extras[fi]['data_types'] montages = self._raw_extras[fi]['montages'] blocks = self._raw_extras[fi]['blocks'] + mtg_mdf = self._raw_extras[fi]['mtg_mdf'] boxy_files = self._raw_extras[fi]['files']['*.[000-999]*'] event_fname = os.path.join(self._filenames[fi], 'evt') @@ -606,6 +612,11 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): # test4 = abs(outlier_data['data'] - data_) <= thresh # test4.all() + + #convert phase to pico seconds + for i_chan in range(np.size(data_, axis=0)): + data_[i_chan,:] = ((1e12*data_[i_chan,:])/ + (360*mtg_mdf[i_mtg][i_chan])) # swap channels to match new wavelength order for i_chan in range(0, len(data_), 2): From 47c056516bd244781eecdca318178d069a3a4fbf Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Fri, 22 May 2020 13:53:23 -0600 Subject: [PATCH 096/167] initial push to create PR --- .../preprocessing/plot_80_boxy_processing.py | 82 +++++++++++++++++-- 1 file changed, 75 insertions(+), 7 deletions(-) diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index c98bd029847..e8256768e2a 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -261,6 +261,16 @@ # # First we extract the events of interest and visualise them to ensure they are # # correct. +# all montages +all_mtg_events = mne.find_events(raw_intensity, stim_channel=['Markers a','Markers b']) + +fig = mne.viz.plot_events(all_mtg_events) +fig.subplots_adjust(right=0.7) # make room for the legend + +raw_intensity.plot(events=all_mtg_events, start=0, duration=10,color='gray', + event_color={1: 'r', 2: 'b', 1000: 'k', 2000: 'k'}) + +# montage a mtg_a_events = mne.find_events(mtg_a_intensity, stim_channel='Markers a') fig = mne.viz.plot_events(mtg_a_events) @@ -269,6 +279,7 @@ mtg_a_intensity.plot(events=mtg_a_events, start=0, duration=10,color='gray', event_color={1: 'r', 2: 'b', 1000: 'k'}) +# montage b mtg_b_events = mne.find_events(mtg_b_intensity, stim_channel='Markers b') fig = mne.viz.plot_events(mtg_b_events) @@ -286,6 +297,15 @@ reject_criteria = None tmin, tmax = -0.2, 1 +# all montage +all_mtg_haemo_epochs = mne.Epochs(raw_haemo, all_mtg_events, + tmin=tmin, tmax=tmax, + reject=reject_criteria, reject_by_annotation=False, + proj=True, baseline=(None, 0), preload=True, + detrend=None, verbose=True) +all_mtg_haemo_epochs.plot_drop_log() + +# montage a mtg_a_haemo_epochs = mne.Epochs(raw_haemo_a, mtg_a_events, tmin=tmin, tmax=tmax, reject=reject_criteria, reject_by_annotation=False, @@ -293,7 +313,7 @@ detrend=None, verbose=True) mtg_a_haemo_epochs.plot_drop_log() - +#montage b mtg_b_haemo_epochs = mne.Epochs(raw_haemo_b, mtg_b_events, tmin=tmin, tmax=tmax, reject=reject_criteria, reject_by_annotation=False, @@ -303,6 +323,16 @@ #get epochs from the raw intensities + +# all montages +all_mtg_epochs = mne.Epochs(raw_intensity, all_mtg_events, + event_id=dict(event_1=1,event_2=2), + tmin=tmin, tmax=tmax, + reject=None, reject_by_annotation=False, + proj=False, baseline=(-0.2, 0), preload=True, + detrend=None, verbose=True) + +#montage a mtg_a_epochs = mne.Epochs(mtg_a_intensity, mtg_a_events, event_id=dict(event_1=1,event_2=2), tmin=tmin, tmax=tmax, @@ -310,6 +340,7 @@ proj=False, baseline=(-0.2, 0), preload=True, detrend=None, verbose=True) +#montage b mtg_b_epochs = mne.Epochs(mtg_b_intensity, mtg_b_events, event_id=dict(event_1=1,event_2=2), tmin=tmin, tmax=tmax, @@ -318,9 +349,16 @@ detrend=None, verbose=True) #two ways to plot epochs, should be the same + +#all montages +fig = mne.viz.plot_epochs(all_mtg_epochs,n_epochs=5,n_channels=5, scalings='auto') +fig = all_mtg_epochs.plot(n_epochs=5,n_channels=5, scalings='auto') + +#montage a fig = mne.viz.plot_epochs(mtg_a_epochs,n_epochs=5,n_channels=5, scalings='auto') fig = mtg_a_epochs.plot(n_epochs=5,n_channels=5, scalings='auto') +#montage b fig = mne.viz.plot_epochs(mtg_b_epochs,n_epochs=5,n_channels=5, scalings='auto') fig = mtg_b_epochs.plot(n_epochs=5,n_channels=5, scalings='auto') @@ -336,6 +374,17 @@ # # the HbO peak. #haemo plots + +# all montages +all_mtg_haemo_epochs['1'].plot_image(combine='mean', vmin=-30, vmax=30, + ts_args=dict(ylim=dict(hbo=[-15, 15], + hbr=[-15, 15]))) + +all_mtg_haemo_epochs['2'].plot_image(combine='mean', vmin=-30, vmax=30, + ts_args=dict(ylim=dict(hbo=[-15, 15], + hbr=[-15, 15]))) + +# montage a mtg_a_haemo_epochs['1'].plot_image(combine='mean', vmin=-30, vmax=30, ts_args=dict(ylim=dict(hbo=[-15, 15], hbr=[-15, 15]))) @@ -344,6 +393,7 @@ ts_args=dict(ylim=dict(hbo=[-15, 15], hbr=[-15, 15]))) +# montage b mtg_b_haemo_epochs['1'].plot_image(combine='mean', vmin=-30, vmax=30, ts_args=dict(ylim=dict(hbo=[-15, 15], hbr=[-15, 15]))) @@ -354,6 +404,17 @@ #raw epochs #separate first and last detectors + +# all montages +mtg_a_first_det = ([i_index for i_index,i_label in + enumerate(mtg_a_epochs.info['ch_names']) if + re.search(r'_D[1-4]', i_label)]) + +mtg_a_last_det = ([i_index for i_index,i_label in + enumerate(mtg_a_epochs.info['ch_names']) if + re.search(r'_D[5-8]', i_label)]) + +# montage a mtg_a_first_det = ([i_index for i_index,i_label in enumerate(mtg_a_epochs.info['ch_names']) if re.search(r'_D[1-4]', i_label)]) @@ -362,6 +423,7 @@ enumerate(mtg_a_epochs.info['ch_names']) if re.search(r'_D[5-8]', i_label)]) +#montage b mtg_b_first_det = ([i_index for i_index,i_label in enumerate(mtg_b_epochs.info['ch_names']) if re.search(r'_D(9|1[0-2])', i_label)]) @@ -371,6 +433,8 @@ re.search(r'_D1[3-6]', i_label)]) #plot our two events for both montages + +# all montages fig = mtg_a_epochs['event_1'].plot_image(combine='mean', vmin=-20, vmax=20, colorbar=True, title='Montage A Event 1', group_by=dict(FIRST_DET=mtg_a_first_det, @@ -381,15 +445,19 @@ group_by=dict(FIRST_DET=mtg_a_first_det, LAST_DET=mtg_a_last_det)) +# montage a +fig = mtg_a_epochs['event_1'].plot_image(combine='mean', vmin=-20, vmax=20, + colorbar=True, title='Montage A Event 1') + +fig = mtg_a_epochs['event_2'].plot_image(combine='mean', vmin=-20, vmax=20, + colorbar=True, title='Montage A Event 2') + +# montage b fig = mtg_b_epochs['event_1'].plot_image(combine='mean', vmin=-20, vmax=20, - colorbar=True, title='Montage B Event 1', - group_by=dict(FIRST_DET=mtg_b_first_det, - LAST_DET=mtg_b_last_det)) + colorbar=True, title='Montage B Event 1') fig = mtg_b_epochs['event_2'].plot_image(combine='mean', vmin=-20, vmax=20, - colorbar=True, title='Montage B Event 2', - group_by=dict(FIRST_DET=mtg_b_first_det, - LAST_DET=mtg_b_last_det)) + colorbar=True, title='Montage B Event 2') # ############################################################################### # # View consistency of responses across channels From 2e91987376e4275f6c63613da9ec965b22c0c0e3 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Tue, 26 May 2020 14:49:38 -0600 Subject: [PATCH 097/167] tutorial will now combine montages and plot results of this combination --- mne/io/boxy/boxy.py | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 167c22fa90c..78f52572580 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -411,7 +411,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): for file_num, i_file in enumerate(event_files[key]): event_data.append(scipy.io.loadmat( - event_files[key][0])['event']) + event_files[key][file_num])['event']) if event_data != []: print('Event file found!') else: print('No event file found. Using digaux!') @@ -565,7 +565,27 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): #so we can send values between 1-255 #we'll multiply our block start/end markers by 1000 to ensure #we aren't within the 1-255 range + # import pdb + # pdb.set_trace() + # if i_mtg == 0: + # block_markers[i_blk][100:200] = 1 + # block_markers[i_blk][400:600] = 1 + # block_markers[i_blk][700:900] = 1 + # block_markers[i_blk][1000:1200] = 1 + # block_markers[i_blk][1300:1500] = 1 + # elif i_mtg == 1: + # block_markers[i_blk][100:200] = 2 + # block_markers[i_blk][400:600] = 2 + # block_markers[i_blk][700:900] = 2 + # block_markers[i_blk][1000:1200] = 2 + # block_markers[i_blk][1300:1500] = 2 block_markers[i_blk][-1] = int(blk_name) * 1000 + + # # indicate which montage our markers belong to + # block_markers[i_blk] = ([(i_mrk+((i_mtg+1)*10000)) + # if i_mrk > 0 + # else 0 for i_mrk + # in block_markers[i_blk]]) all_blocks.append(data_) From 5c7b3c00c705265224f62c53c69956b8038658d2 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Tue, 26 May 2020 14:57:43 -0600 Subject: [PATCH 098/167] added wrong file to earlier commit --- .../preprocessing/plot_80_boxy_processing.py | 490 ++++++++---------- 1 file changed, 205 insertions(+), 285 deletions(-) diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index e8256768e2a..c743a0597bc 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -29,17 +29,11 @@ boxy_raw_dir = os.path.join(boxy_data_folder, 'Participant-1') raw_intensity = mne.io.read_raw_boxy(boxy_raw_dir, 'AC', verbose=True).load_data() -###separate data based on montages### -mtg_a_indices = [i_index for i_index,i_label in enumerate(raw_intensity.info['ch_names']) - if re.search(r'(S[1-5]_|\bMarkers a\b)', i_label)] -mtg_b_indices = [i_index for i_index,i_label in enumerate(raw_intensity.info['ch_names']) - if re.search(r'(S([6-9]|10)_|\bMarkers b\b)', i_label)] - -mtg_a_intensity = raw_intensity.copy() -mtg_b_intensity = raw_intensity.copy() - -mtg_a_intensity.pick(mtg_a_indices) -mtg_b_intensity.pick(mtg_b_indices) +# get channel indices for our two montages +mtg_a = [raw_intensity.ch_names[i_index] for i_index,i_label in enumerate(raw_intensity.info['ch_names']) + if re.search(r'S[1-5]_', i_label)] +mtg_b = [raw_intensity.ch_names[i_index] for i_index,i_label in enumerate(raw_intensity.info['ch_names']) + if re.search(r'S([6-9]|10)_', i_label)] # ############################################################################### # # View location of sensors over brain surface @@ -52,6 +46,7 @@ subjects_dir = os.path.dirname(mne.datasets.fetch_fsaverage()) +# plot all montages fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') fig = mne.viz.plot_alignment(raw_intensity.info, show_axes=True, @@ -65,8 +60,9 @@ fig=fig) mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) +# montage A fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') -fig = mne.viz.plot_alignment(mtg_a_intensity.info, +fig = mne.viz.plot_alignment(raw_intensity.copy().pick_channels(mtg_a).info, show_axes=True, subject='fsaverage', trans='fsaverage', @@ -78,8 +74,9 @@ fig=fig) mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) +# montage B fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') -fig = mne.viz.plot_alignment(mtg_b_intensity.info, +fig = mne.viz.plot_alignment(raw_intensity.copy().pick_channels(mtg_b).info, show_axes=True, subject='fsaverage', trans='fsaverage', @@ -91,7 +88,6 @@ fig=fig) mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) - # ############################################################################### # # Selecting channels appropriate for detecting neural responses # # ------------------------------------------------------------- @@ -102,28 +98,15 @@ # # To achieve this we pick all the channels that are not considered to be short. picks = mne.pick_types(raw_intensity.info, meg=False, fnirs=True, stim=True) -picks_a = mne.pick_types(mtg_a_intensity.info, meg=False, fnirs=True, stim=True) -picks_b = mne.pick_types(mtg_b_intensity.info, meg=False, fnirs=True, stim=True) dists = mne.preprocessing.nirs.source_detector_distances( raw_intensity.info, picks=picks) -dists_a = mne.preprocessing.nirs.source_detector_distances( - mtg_a_intensity.info, picks=picks_a) -dists_b = mne.preprocessing.nirs.source_detector_distances( - mtg_b_intensity.info, picks=picks_b) raw_intensity.pick(picks[dists < 0.08]) -mtg_a_intensity.pick(picks_a[dists_a < 0.08]) -mtg_b_intensity.pick(picks_b[dists_b < 0.08]) scalings = dict(fnirs_raw=1e2) raw_intensity.plot(n_channels=5, duration=20, scalings=100, show_scrollbars=True) -mtg_a_intensity.plot(n_channels=5, - duration=20, scalings=100, show_scrollbars=True) -mtg_b_intensity.plot(n_channels=5, - duration=20, scalings=100, show_scrollbars=True) - # ############################################################################### # # Converting from raw intensity to optical density @@ -132,16 +115,9 @@ # # The raw intensity values are then converted to optical density. raw_od = mne.preprocessing.nirs.optical_density(raw_intensity) -raw_od_a = mne.preprocessing.nirs.optical_density(mtg_a_intensity) -raw_od_b = mne.preprocessing.nirs.optical_density(mtg_b_intensity) raw_od.plot(n_channels=len(raw_od.ch_names), duration=500, show_scrollbars=False) -raw_od_a.plot(n_channels=len(raw_od_a.ch_names), - duration=500, show_scrollbars=False) -raw_od_b.plot(n_channels=len(raw_od_b.ch_names), - duration=500, show_scrollbars=False) - # ############################################################################### # # Evaluating the quality of the data @@ -157,29 +133,16 @@ # # coupling index. sci = mne.preprocessing.nirs.scalp_coupling_index(raw_od) -sci_a = mne.preprocessing.nirs.scalp_coupling_index(raw_od_a) -sci_b = mne.preprocessing.nirs.scalp_coupling_index(raw_od_b) fig, ax = plt.subplots() ax.hist(sci) ax.set(xlabel='Scalp Coupling Index', ylabel='Count', xlim=[0, 1]) -fig, ax = plt.subplots() -ax.hist(sci_a) -ax.set(xlabel='Scalp Coupling Index-A', ylabel='Count', xlim=[0, 1]) - -fig, ax = plt.subplots() -ax.hist(sci_b) -ax.set(xlabel='Scalp Coupling Index-B', ylabel='Count', xlim=[0, 1]) - - # ############################################################################### # # In this example we will mark all channels with a SCI less than 0.5 as bad # # (this dataset is quite clean, so no channels are marked as bad). raw_od.info['bads'] = list(compress(raw_od.ch_names, sci < 0.5)) -raw_od_a.info['bads'] = list(compress(raw_od_a.ch_names, sci_a < 0.5)) -raw_od_b.info['bads'] = list(compress(raw_od_b.ch_names, sci_b < 0.5)) # ############################################################################### # # At this stage it is appropriate to inspect your data @@ -198,19 +161,10 @@ # # the modified Beer-Lambert law. raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od) -raw_haemo_a = mne.preprocessing.nirs.beer_lambert_law(raw_od_a) -raw_haemo_b = mne.preprocessing.nirs.beer_lambert_law(raw_od_b) raw_haemo.plot(n_channels=len(raw_haemo.ch_names), duration=500, show_scrollbars=False) -raw_haemo_a.plot(n_channels=len(raw_haemo_a.ch_names), - duration=500, show_scrollbars=False) - -raw_haemo_b.plot(n_channels=len(raw_haemo_b.ch_names), - duration=500, show_scrollbars=False) - - # ############################################################################### # # Removing heart rate from signal # # ------------------------------- @@ -230,26 +184,6 @@ fig.suptitle('After filtering', weight='bold', size='x-large') fig.subplots_adjust(top=0.88) - -fig = raw_haemo_a.plot_psd(average=True) -fig.suptitle('Before filtering Montage A', weight='bold', size='x-large') -fig.subplots_adjust(top=0.88) -raw_haemo_a = raw_haemo_a.filter(0.05, 0.7, h_trans_bandwidth=0.2, - l_trans_bandwidth=0.02) -fig = raw_haemo_a.plot_psd(average=True) -fig.suptitle('After filtering Montage A', weight='bold', size='x-large') -fig.subplots_adjust(top=0.88) - - -fig = raw_haemo_b.plot_psd(average=True) -fig.suptitle('Before filtering Montage B', weight='bold', size='x-large') -fig.subplots_adjust(top=0.88) -raw_haemo_b = raw_haemo_b.filter(0.05, 0.7, h_trans_bandwidth=0.2, - l_trans_bandwidth=0.02) -fig = raw_haemo_b.plot_psd(average=True) -fig.suptitle('After filtering Montage B', weight='bold', size='x-large') -fig.subplots_adjust(top=0.88) - # ############################################################################### # # Extract epochs # # -------------- @@ -261,32 +195,34 @@ # # First we extract the events of interest and visualise them to ensure they are # # correct. -# all montages -all_mtg_events = mne.find_events(raw_intensity, stim_channel=['Markers a','Markers b']) - -fig = mne.viz.plot_events(all_mtg_events) -fig.subplots_adjust(right=0.7) # make room for the legend +# Since our events and timings for this data set are the same across montages, +# we are going to find events for each montage separately and combine them later -raw_intensity.plot(events=all_mtg_events, start=0, duration=10,color='gray', - event_color={1: 'r', 2: 'b', 1000: 'k', 2000: 'k'}) +# Montage A Events +mtg_a_events = mne.find_events(raw_intensity, stim_channel=['Markers a']) -# montage a -mtg_a_events = mne.find_events(mtg_a_intensity, stim_channel='Markers a') +mtg_a_event_dict = {'Montage_A/Event_1': 1, 'Montage_A/Event_2': 2, + 'Montage A/Block 1 End': 1000, 'Montage A/Block 2 End': 2000} fig = mne.viz.plot_events(mtg_a_events) fig.subplots_adjust(right=0.7) # make room for the legend -mtg_a_intensity.plot(events=mtg_a_events, start=0, duration=10,color='gray', - event_color={1: 'r', 2: 'b', 1000: 'k'}) +raw_intensity.copy().pick_channels(mtg_a).plot( + events=mtg_a_events, start=0, duration=10,color='gray', + event_color={1: 'r', 2: 'b', 1000: 'k', 2000: 'k'}) + +# Montage B Events +mtg_b_events = mne.find_events(raw_intensity, stim_channel=['Markers b']) -# montage b -mtg_b_events = mne.find_events(mtg_b_intensity, stim_channel='Markers b') +mtg_b_event_dict = {'Montage_B/Event_1': 1, 'Montage_B/Event_2': 2, + 'Montage B/Block 1 End': 1000, 'Montage B/Block 2 End': 2000} fig = mne.viz.plot_events(mtg_b_events) fig.subplots_adjust(right=0.7) # make room for the legend -mtg_b_intensity.plot(events=mtg_b_events, start=0, duration=10,color='gray', - event_color={1: 'r', 2: 'b', 2000: 'k'}) +raw_intensity.copy().pick_channels(mtg_b).plot( + events=mtg_b_events, start=0, duration=10,color='gray', + event_color={1: 'r', 2: 'b', 1000: 'k', 2000: 'k'}) # ############################################################################### # # Next we define the range of our epochs, the rejection criteria, @@ -295,73 +231,70 @@ # reject_criteria = dict(hbo=80e-6) reject_criteria = None -tmin, tmax = -0.2, 1 +tmin, tmax = -0.2, 2 -# all montage -all_mtg_haemo_epochs = mne.Epochs(raw_haemo, all_mtg_events, - tmin=tmin, tmax=tmax, - reject=reject_criteria, reject_by_annotation=False, - proj=True, baseline=(None, 0), preload=True, - detrend=None, verbose=True) -all_mtg_haemo_epochs.plot_drop_log() +# Montage A +mtg_a = [i_index for i_index,i_label in enumerate(raw_haemo.info['ch_names']) + if re.search(r'S[1-5]_', i_label)] -# montage a -mtg_a_haemo_epochs = mne.Epochs(raw_haemo_a, mtg_a_events, +mtg_a_haemo_epochs = mne.Epochs(raw_haemo, + mtg_a_events, event_id = mtg_a_event_dict, tmin=tmin, tmax=tmax, reject=reject_criteria, reject_by_annotation=False, proj=True, baseline=(None, 0), preload=True, - detrend=None, verbose=True) + detrend=None, verbose=True, event_repeated='drop') mtg_a_haemo_epochs.plot_drop_log() -#montage b -mtg_b_haemo_epochs = mne.Epochs(raw_haemo_b, mtg_b_events, - tmin=tmin, tmax=tmax, - reject=reject_criteria, reject_by_annotation=False, - proj=True, baseline=(None, 0), preload=True, - detrend=None, verbose=True) -mtg_b_haemo_epochs.plot_drop_log() - - #get epochs from the raw intensities - -# all montages -all_mtg_epochs = mne.Epochs(raw_intensity, all_mtg_events, - event_id=dict(event_1=1,event_2=2), +mtg_a_epochs = mne.Epochs(raw_intensity, + mtg_a_events, event_id=mtg_a_event_dict, tmin=tmin, tmax=tmax, reject=None, reject_by_annotation=False, proj=False, baseline=(-0.2, 0), preload=True, detrend=None, verbose=True) -#montage a -mtg_a_epochs = mne.Epochs(mtg_a_intensity, mtg_a_events, - event_id=dict(event_1=1,event_2=2), +#two ways to plot epochs, should be the same +fig = mne.viz.plot_epochs(mtg_a_haemo_epochs,n_epochs=5,n_channels=5, + scalings='auto', picks = mtg_a) +fig = mtg_a_haemo_epochs.plot(n_epochs=5,n_channels=5, scalings='auto', + picks = mtg_a) + +fig = mne.viz.plot_epochs(mtg_a_epochs,n_epochs=5,n_channels=5, + scalings='auto', picks = mtg_a) +fig = mtg_a_epochs.plot(n_epochs=5,n_channels=5, scalings='auto', + picks = mtg_a) + + +# Montage B +mtg_b = [i_index for i_index,i_label in enumerate(raw_haemo.info['ch_names']) + if re.search(r'S([6-9]|10)_', i_label)] + +mtg_b_haemo_epochs = mne.Epochs(raw_haemo, + mtg_b_events, event_id = mtg_b_event_dict, tmin=tmin, tmax=tmax, - reject=None, reject_by_annotation=False, - proj=False, baseline=(-0.2, 0), preload=True, - detrend=None, verbose=True) + reject=reject_criteria, reject_by_annotation=False, + proj=True, baseline=(None, 0), preload=True, + detrend=None, verbose=True, event_repeated='drop') +mtg_b_haemo_epochs.plot_drop_log() -#montage b -mtg_b_epochs = mne.Epochs(mtg_b_intensity, mtg_b_events, - event_id=dict(event_1=1,event_2=2), +#get epochs from the raw intensities +mtg_b_epochs = mne.Epochs(raw_intensity, + mtg_b_events, event_id=mtg_b_event_dict, tmin=tmin, tmax=tmax, reject=None, reject_by_annotation=False, proj=False, baseline=(-0.2, 0), preload=True, detrend=None, verbose=True) #two ways to plot epochs, should be the same +fig = mne.viz.plot_epochs(mtg_b_haemo_epochs,n_epochs=5,n_channels=5, + scalings='auto', picks = mtg_b) +fig = mtg_b_haemo_epochs.plot(n_epochs=5,n_channels=5, scalings='auto', + picks = mtg_b) -#all montages -fig = mne.viz.plot_epochs(all_mtg_epochs,n_epochs=5,n_channels=5, scalings='auto') -fig = all_mtg_epochs.plot(n_epochs=5,n_channels=5, scalings='auto') - -#montage a -fig = mne.viz.plot_epochs(mtg_a_epochs,n_epochs=5,n_channels=5, scalings='auto') -fig = mtg_a_epochs.plot(n_epochs=5,n_channels=5, scalings='auto') - -#montage b -fig = mne.viz.plot_epochs(mtg_b_epochs,n_epochs=5,n_channels=5, scalings='auto') -fig = mtg_b_epochs.plot(n_epochs=5,n_channels=5, scalings='auto') - +fig = mne.viz.plot_epochs(mtg_b_epochs,n_epochs=5,n_channels=5, + scalings='auto', picks = mtg_b) +fig = mtg_b_epochs.plot(n_epochs=5,n_channels=5, scalings='auto', + picks = mtg_b) # ############################################################################### # # View consistency of responses across trials @@ -374,90 +307,70 @@ # # the HbO peak. #haemo plots - -# all montages -all_mtg_haemo_epochs['1'].plot_image(combine='mean', vmin=-30, vmax=30, +# Montage A +hbo = [i_index for i_index,i_label + in enumerate(mtg_a_haemo_epochs.info['ch_names']) + if re.search(r'S[1-5]_D[0-9] hbo', i_label)] + +hbr = [i_index for i_index,i_label + in enumerate(mtg_a_haemo_epochs.info['ch_names']) + if re.search(r'S[1-5]_D[0-9] hbr', i_label)] + +mtg_a_haemo_epochs['Montage_A/Event_1'].plot_image( + combine='mean', vmin=-30, vmax=30, + group_by = {'Oxy':hbo,'De-Oxy':hbr}, ts_args=dict(ylim=dict(hbo=[-15, 15], hbr=[-15, 15]))) -all_mtg_haemo_epochs['2'].plot_image(combine='mean', vmin=-30, vmax=30, +mtg_a_haemo_epochs['Montage_A/Event_2'].plot_image( + combine='mean', vmin=-30, vmax=30, + group_by = {'Oxy':hbo,'De-Oxy':hbr}, ts_args=dict(ylim=dict(hbo=[-15, 15], hbr=[-15, 15]))) -# montage a -mtg_a_haemo_epochs['1'].plot_image(combine='mean', vmin=-30, vmax=30, - ts_args=dict(ylim=dict(hbo=[-15, 15], - hbr=[-15, 15]))) +# raw epochs +fig = mtg_a_epochs['Montage_A/Event_1'].plot_image( + combine='mean', vmin=-20, vmax=20, + picks = mtg_a, colorbar=True, + title='Montage A Event 1') -mtg_a_haemo_epochs['2'].plot_image(combine='mean', vmin=-30, vmax=30, - ts_args=dict(ylim=dict(hbo=[-15, 15], - hbr=[-15, 15]))) +fig = mtg_a_epochs['Montage_A/Event_2'].plot_image( + combine='mean', vmin=-20, vmax=20, + picks = mtg_a, colorbar=True, + title='Montage A Event 2') + + +# Montage B +hbo = [i_index for i_index,i_label + in enumerate(mtg_a_haemo_epochs.info['ch_names']) + if re.search(r'S([6-9]|10)_D([0-9]|1[0-6]) hbo', i_label)] -# montage b -mtg_b_haemo_epochs['1'].plot_image(combine='mean', vmin=-30, vmax=30, +hbr = [i_index for i_index,i_label + in enumerate(mtg_a_haemo_epochs.info['ch_names']) + if re.search(r'S([6-9]|10)_D([0-9]|1[0-6]) hbr', i_label)] + +mtg_b_haemo_epochs['Montage_B/Event_1'].plot_image( + combine='mean', vmin=-30, vmax=30, + group_by = {'Oxy':hbo,'De-Oxy':hbr}, ts_args=dict(ylim=dict(hbo=[-15, 15], hbr=[-15, 15]))) -mtg_b_haemo_epochs['2'].plot_image(combine='mean', vmin=-30, vmax=30, +mtg_b_haemo_epochs['Montage_B/Event_2'].plot_image( + combine='mean', vmin=-30, vmax=30, + group_by = {'Oxy':hbo,'De-Oxy':hbr}, ts_args=dict(ylim=dict(hbo=[-15, 15], hbr=[-15, 15]))) -#raw epochs -#separate first and last detectors - -# all montages -mtg_a_first_det = ([i_index for i_index,i_label in - enumerate(mtg_a_epochs.info['ch_names']) if - re.search(r'_D[1-4]', i_label)]) - -mtg_a_last_det = ([i_index for i_index,i_label in - enumerate(mtg_a_epochs.info['ch_names']) if - re.search(r'_D[5-8]', i_label)]) +# raw epochs +fig = mtg_b_epochs['Montage_B/Event_1'].plot_image( + combine='mean', vmin=-20, vmax=20, + picks = mtg_b, colorbar=True, + title='Montage B Event 1') -# montage a -mtg_a_first_det = ([i_index for i_index,i_label in - enumerate(mtg_a_epochs.info['ch_names']) if - re.search(r'_D[1-4]', i_label)]) - -mtg_a_last_det = ([i_index for i_index,i_label in - enumerate(mtg_a_epochs.info['ch_names']) if - re.search(r'_D[5-8]', i_label)]) - -#montage b -mtg_b_first_det = ([i_index for i_index,i_label in - enumerate(mtg_b_epochs.info['ch_names']) if - re.search(r'_D(9|1[0-2])', i_label)]) - -mtg_b_last_det = ([i_index for i_index,i_label in - enumerate(mtg_b_epochs.info['ch_names']) if - re.search(r'_D1[3-6]', i_label)]) - -#plot our two events for both montages - -# all montages -fig = mtg_a_epochs['event_1'].plot_image(combine='mean', vmin=-20, vmax=20, - colorbar=True, title='Montage A Event 1', - group_by=dict(FIRST_DET=mtg_a_first_det, - LAST_DET=mtg_a_last_det)) - -fig = mtg_a_epochs['event_2'].plot_image(combine='mean', vmin=-20, vmax=20, - colorbar=True, title='Montage A Event 2', - group_by=dict(FIRST_DET=mtg_a_first_det, - LAST_DET=mtg_a_last_det)) - -# montage a -fig = mtg_a_epochs['event_1'].plot_image(combine='mean', vmin=-20, vmax=20, - colorbar=True, title='Montage A Event 1') - -fig = mtg_a_epochs['event_2'].plot_image(combine='mean', vmin=-20, vmax=20, - colorbar=True, title='Montage A Event 2') - -# montage b -fig = mtg_b_epochs['event_1'].plot_image(combine='mean', vmin=-20, vmax=20, - colorbar=True, title='Montage B Event 1') - -fig = mtg_b_epochs['event_2'].plot_image(combine='mean', vmin=-20, vmax=20, - colorbar=True, title='Montage B Event 2') +fig = mtg_b_epochs['Montage_B/Event_2'].plot_image( + combine='mean', vmin=-20, vmax=20, + picks = mtg_b, colorbar=True, + title='Montage B Event 2') # ############################################################################### # # View consistency of responses across channels @@ -467,12 +380,49 @@ # # pairs that we selected. All the channels in this data are located over the # # motor cortex, and all channels show a similar pattern in the data. +# individual montages fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(15, 6)) clim=dict(fnirs_raw=[-20,20]) -mtg_a_epochs['event_1'].average().plot_image(axes=axes[0, 0],titles='Montage A Event 1', clim=clim) -mtg_a_epochs['event_2'].average().plot_image(axes=axes[1, 0],titles='Montage A Event 2', clim=clim) -mtg_b_epochs['event_1'].average().plot_image(axes=axes[0, 1],titles='Montage B Event 1', clim=clim) -mtg_b_epochs['event_2'].average().plot_image(axes=axes[1, 1],titles='Montage B Event 2', clim=clim) + +mtg_a_1_evoked = mtg_a_epochs['Montage_A/Event_1'].average() +mtg_a_2_evoked = mtg_a_epochs['Montage_A/Event_2'].average() +mtg_b_1_evoked = mtg_b_epochs['Montage_B/Event_1'].average() +mtg_b_2_evoked = mtg_b_epochs['Montage_B/Event_2'].average() + +mtg_a_1_evoked.plot_image(axes=axes[0, 0], picks = mtg_a, + titles='Montage A Event 1', clim=clim) +mtg_a_2_evoked.plot_image(axes=axes[1, 0], picks = mtg_a, + titles='Montage A Event 2', clim=clim) +mtg_b_1_evoked.plot_image(axes=axes[0, 1], picks = mtg_b, + titles='Montage B Event 1', clim=clim) +mtg_b_2_evoked.plot_image(axes=axes[1, 1], picks = mtg_b, + titles='Montage B Event 2', clim=clim) + +# Combine Montages +evoked_1 = mtg_a_epochs['Montage_A/Event_1'].average() +evoked_2 = mtg_a_epochs['Montage_A/Event_2'].average() +evoked_3 = mtg_b_epochs['Montage_B/Event_1'].average() +evoked_4 = mtg_b_epochs['Montage_B/Event_2'].average() + +mtg_a_channels = [i_index for i_index,i_label in enumerate(evoked_1.info['ch_names']) + if re.search(r'S[1-5]_', i_label)] + +mtg_b_channels = [i_index for i_index,i_label in enumerate(evoked_3.info['ch_names']) + if re.search(r'S([6-9]|10)_', i_label)] + +evoked_1._data[mtg_b_channels,:] = 0 +evoked_2._data[mtg_b_channels,:] = 0 +evoked_3._data[mtg_a_channels,:] = 0 +evoked_4._data[mtg_a_channels,:] = 0 + +evoked_event_1 = mne.combine_evoked([evoked_1,evoked_3],'equal') +evoked_event_2 = mne.combine_evoked([evoked_2,evoked_4],'equal') + +fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 6)) +clim=dict(fnirs_raw=[-20,20]) + +evoked_event_1.plot_image(axes=axes[0], titles='Event_1', clim=clim) +evoked_event_2.plot_image(axes=axes[1], titles='Event_2', clim=clim) # ############################################################################### # # Plot standard fNIRS response image @@ -482,21 +432,11 @@ # # both the HbO and HbR on the same figure to illustrate the relation between # # the two signals. -mtg_a_evoked_dict = {'Montage_A_Event_1': mtg_a_epochs['event_1'].average(), - 'Montage_A_Event_2': mtg_a_epochs['event_2'].average()} +evoked_dict = {'Event_1': evoked_event_1,'Event_2': evoked_event_2} -mtg_b_evoked_dict = {'Montage_B_Event_1': mtg_b_epochs['event_1'].average(), - 'Montage_B_Event_2': mtg_b_epochs['event_2'].average()} +color_dict = {'Event_1':'r','Event_2':'b'} -###this seems to what our conditions/events to have the same number of channels, -###and the same channel names. Maybe we can't use this to compare montages?? -###Gives an error if I try to compare both montages and events -color_dict = dict(Montage_A_Event_1='r', Montage_A_Event_2='b') -mne.viz.plot_compare_evokeds(mtg_a_evoked_dict, combine="mean", ci=0.95, - colors=color_dict) - -color_dict = dict(Montage_B_Event_1='r', Montage_B_Event_2='b') -mne.viz.plot_compare_evokeds(mtg_b_evoked_dict, combine="mean", ci=0.95, +mne.viz.plot_compare_evokeds(evoked_dict, combine="mean", ci=0.95, colors=color_dict) # ############################################################################### @@ -505,17 +445,11 @@ # # # # Next we view how the topographic activity changes throughout the response. -times = np.arange(-0.2, 1.0, 0.2) +times = np.arange(0.0, 2.0, 0.5) topomap_args = dict(extrapolate='local') -fig = mtg_a_epochs['event_1'].average().plot_joint(times=times, - topomap_args=topomap_args) -fig = mtg_a_epochs['event_2'].average().plot_joint(times=times, - topomap_args=topomap_args) -fig = mtg_b_epochs['event_1'].average().plot_joint(times=times, - topomap_args=topomap_args) -fig = mtg_b_epochs['event_2'].average().plot_joint(times=times, - topomap_args=topomap_args) +fig = evoked_event_1.plot_joint(times=times, topomap_args=topomap_args) +fig = evoked_event_2.plot_joint(times=times, topomap_args=topomap_args) # ############################################################################### # # Compare tapping of left and right hands @@ -524,59 +458,59 @@ # # Finally we generate topo maps for the left and right conditions to view # # the location of activity. First we visualise the HbO activity. -times = np.arange(0.0, 1.0, 0.2) -mtg_a_epochs['event_1'].average().plot_topomap(times=times, title='Montage A Event 1', **topomap_args) -mtg_a_epochs['event_2'].average().plot_topomap(times=times, title='Montage A Event 2', **topomap_args) -mtg_b_epochs['event_1'].average().plot_topomap(times=times, title='Montage B Event 1', **topomap_args) -mtg_b_epochs['event_2'].average().plot_topomap(times=times, title='Montage B Event 2', **topomap_args) +fig, axes = plt.subplots(nrows=2, ncols=4, figsize=(9, 5), + gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1])) + +topomap_args = dict(extrapolate='local', size=3,res=256, sensors='k.') +times = 1.0 + +evoked_1.copy().pick(mtg_a_channels).plot_topomap(times=times, axes=axes[0,0], + colorbar=False,**topomap_args) + +evoked_2.copy().pick(mtg_a_channels).plot_topomap(times=times, axes=axes[1,0], + colorbar=False,**topomap_args) + +evoked_3.copy().pick(mtg_b_channels).plot_topomap(times=times, axes=axes[0,1], + colorbar=False,**topomap_args) + +evoked_4.copy().pick(mtg_b_channels).plot_topomap(times=times, axes=axes[1,1], + colorbar=False, **topomap_args) + +evoked_event_1.plot_topomap(times=times, axes=axes[0,2:], colorbar=True, + **topomap_args) +evoked_event_2.plot_topomap(times=times, axes=axes[1,2:], colorbar=True, + **topomap_args) + +for column, condition in enumerate( + ['Montage A', 'Montage B','Combined']): + for row, chroma in enumerate(['Event 1', 'Event 2']): + axes[row, column].set_title('{}: {}'.format(chroma, condition)) +fig.tight_layout() # ############################################################################### # # And we can plot the comparison at a single time point for two conditions. -fig, axes = plt.subplots(nrows=2, ncols=4, figsize=(9, 5), +fig, axes = plt.subplots(nrows=1, ncols=4, figsize=(9, 5), gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1])) vmin, vmax, ts = -0.192, 0.992, 0.1 vmin = -20 vmax = 20 -mtg_a_epochs['event_1'].average().plot_topomap(times=ts, - axes=axes[0, 0], vmin=vmin, vmax=vmax, colorbar=False, - **topomap_args) +evoked_1.plot_topomap(times=ts, axes=axes[0], vmin=vmin, vmax=vmax, + colorbar=False,**topomap_args) -mtg_a_epochs['event_2'].average().plot_topomap(times=ts, - axes=axes[1, 0], vmin=vmin, vmax=vmax, colorbar=False, - **topomap_args) +evoked_2.plot_topomap(times=ts, axes=axes[1], vmin=vmin, vmax=vmax, + colorbar=False,**topomap_args) -mtg_b_epochs['event_1'].average().plot_topomap(times=ts, - axes=axes[0, 1], vmin=vmin, vmax=vmax, colorbar=False, - **topomap_args) - -mtg_b_epochs['event_2'].average().plot_topomap(times=ts, - axes=axes[1, 1], vmin=vmin, vmax=vmax, colorbar=False, - **topomap_args) - - -###can't compare events across montages, for this data set, since they -#don't have the same channel names -mtg_a_evoked_diff = mne.combine_evoked([mtg_a_epochs['event_1'].average(), - -mtg_a_epochs['event_2'].average()], +evoked_diff = mne.combine_evoked([evoked_1, -evoked_2], weights='equal') -mtg_b_evoked_diff = mne.combine_evoked([mtg_b_epochs['event_1'].average(), - -mtg_b_epochs['event_2'].average()], - weights='equal') - -mtg_a_evoked_diff.plot_topomap(times=ts, axes=axes[0, 2:], - vmin=vmin, vmax=vmax, colorbar=True, - **topomap_args) -mtg_b_evoked_diff.plot_topomap(times=ts, axes=axes[1, 2:], - vmin=vmin, vmax=vmax, colorbar=True, - **topomap_args) +evoked_diff.plot_topomap(times=ts, axes=axes[2:],vmin=vmin, vmax=vmax, + colorbar=True,**topomap_args) for column, condition in enumerate( ['Event 1', 'Event 2', 'Difference']): - for row, chroma in enumerate(['Montage A', 'Montage B']): - axes[row, column].set_title('{}: {}'.format(chroma, condition)) + axes[column].set_title('{}'.format(condition)) fig.tight_layout() # ############################################################################### @@ -584,24 +518,10 @@ # # driving the topographic plot above. fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(6, 4)) -mne.viz.plot_evoked_topo(mtg_a_epochs['event_1'].average(), color='b', - axes=axes, legend=False) -mne.viz.plot_evoked_topo(mtg_a_epochs['event_2'].average(), color='r', - axes=axes, legend=False) - -# Tidy the legend -leg_lines = [line for line in axes.lines if line.get_c() == 'b'][:1] -leg_lines.append([line for line in axes.lines if line.get_c() == 'r'][0]) -fig.legend(leg_lines, ['Montage A Event 1', 'Montage A Event 2'], loc='lower right') - - -fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(6, 4)) -mne.viz.plot_evoked_topo(mtg_b_epochs['event_1'].average(), color='b', - axes=axes, legend=False) -mne.viz.plot_evoked_topo(mtg_b_epochs['event_2'].average(), color='r', - axes=axes, legend=False) +mne.viz.plot_evoked_topo(evoked_1, color='b', axes=axes, legend=False) +mne.viz.plot_evoked_topo(evoked_2, color='r', axes=axes, legend=False) # Tidy the legend leg_lines = [line for line in axes.lines if line.get_c() == 'b'][:1] leg_lines.append([line for line in axes.lines if line.get_c() == 'r'][0]) -fig.legend(leg_lines, ['Montage A Event 1', 'Montage A Event 2'], loc='lower right') +fig.legend(leg_lines, ['Event 1', 'Event 2'], loc='lower right') From 51be83c972332161d9723fbb7241afbc12983df4 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Thu, 28 May 2020 10:51:10 -0600 Subject: [PATCH 099/167] working on adding two new data types, fnirs_ac and fnirs_ph --- mne/defaults.py | 12 +++--- mne/io/boxy/boxy.py | 41 +++++-------------- mne/io/constants.py | 4 +- mne/io/meas_info.py | 6 ++- mne/io/pick.py | 13 +++++- mne/preprocessing/nirs/_optical_density.py | 2 + mne/utils/_bunch.py | 3 -- mne/viz/raw.py | 2 +- .../preprocessing/plot_80_boxy_processing.py | 10 +++++ 9 files changed, 49 insertions(+), 44 deletions(-) diff --git a/mne/defaults.py b/mne/defaults.py index bbf7be5aefa..6ff15575b69 100644 --- a/mne/defaults.py +++ b/mne/defaults.py @@ -11,23 +11,24 @@ ref_meg='steelblue', misc='k', stim='k', resp='k', chpi='k', exci='k', ias='k', syst='k', seeg='saddlebrown', dipole='k', gof='k', bio='k', ecog='k', hbo='#AA3377', hbr='b', - fnirs_raw='k', fnirs_od='k', csd='k'), + fnirs_raw='k', fnirs_od='k', fnirs_ac='k', fnirs_ph='k', csd='k'), units=dict(mag='fT', grad='fT/cm', eeg='µV', eog='µV', ecg='µV', emg='µV', misc='AU', seeg='mV', dipole='nAm', gof='GOF', bio='µV', ecog='µV', hbo='µM', hbr='µM', ref_meg='fT', fnirs_raw='V', - fnirs_od='V', csd='V/m²'), + fnirs_od='V', fnirs_ac='V', fnirs_ph=u'\N{DEGREE SIGN}', csd='V/m²'), # scalings for the units scalings=dict(mag=1e15, grad=1e13, eeg=1e6, eog=1e6, emg=1e6, ecg=1e6, misc=1.0, seeg=1e3, dipole=1e9, gof=1.0, bio=1e6, ecog=1e6, hbo=1e6, hbr=1e6, ref_meg=1e15, fnirs_raw=1.0, fnirs_od=1.0, - csd=1e5), + fnirs_ac=1.0, fnirs_ph=1.0, csd=1e5), # rough guess for a good plot scalings_plot_raw=dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4, emg=1e-3, ref_meg=1e-12, misc='auto', stim=1, resp=1, chpi=1e-4, exci=1, ias=1, syst=1, seeg=1e-4, bio=1e-6, ecog=1e-4, hbo=10e-6, hbr=10e-6, whitened=10., fnirs_raw=2e-2, - fnirs_od=2e-2, csd=20e-4), + fnirs_od=2e-2, fnirs_ac=2e-2, + fnirs_ph=180, csd=20e-4), scalings_cov_rank=dict(mag=1e12, grad=1e11, eeg=1e5, # ~100x scalings seeg=1e1, ecog=1e4, hbo=1e4, hbr=1e4), ylim=dict(mag=(-600., 600.), grad=(-200., 200.), eeg=(-200., 200.), @@ -38,7 +39,8 @@ ecg='ECG', emg='EMG', misc='misc', seeg='sEEG', bio='BIO', dipole='Dipole', ecog='ECoG', hbo='Oxyhemoglobin', ref_meg='Reference Magnetometers', fnirs_raw='fNIRS (raw)', - fnirs_od='fNIRS (OD)', hbr='Deoxyhemoglobin', + fnirs_od='fNIRS (OD)', fnirs_ac='fNIRS (AC)', + fnirs_ph='fNIRS (Ph)',hbr='Deoxyhemoglobin', gof='Goodness of fit', csd='Current source density'), mask_params=dict(marker='o', markerfacecolor='w', diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index f2ee8544a42..0b705d997c3 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -299,7 +299,7 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): rpa=fiducial_coords[2]) # create info structure - ch_types = (['fnirs_raw' if i_chan < np.sum(mtg_chan_num) else 'stim' + ch_types = (['fnirs_ph' if i_chan < np.sum(mtg_chan_num) else 'stim' for i_chan, _ in enumerate(boxy_labels)]) info = create_info(boxy_labels, srate[0], ch_types=ch_types) @@ -535,11 +535,13 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): data_[index_loc, :] = boxy_array[:, channel] ###phase unwrapping### - # thresh = 0.00000001 - # scipy.io.savemat(file_name = r"C:\Users\spork\Desktop\data_matlab.mat", - # mdict=dict(data=data_)) if i_data == 'Ph': print('Fixing phase wrap') + # accounts for sharp, sudden changes in phase + # such as crossing over from 0/360 degrees + # estimate mean phase of first 50 points + # if a point differs more than 90 degrees from the mean + # add or subtract 360 degress from that point for i_chan in range(np.size(data_, axis=0)): if np.mean(data_[i_chan,:50]) < 180: wrapped_points = data_[i_chan, :] > 270 @@ -548,14 +550,8 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): wrapped_points = data_[i_chan,:] < 90 data_[i_chan, wrapped_points] += 360 - # unwrapped_data = scipy.io.loadmat(r"C:\Users\spork\Desktop\data_unwrap_python.mat") - - # test1 = abs(unwrapped_data['data'] - data_) <= thresh - # test1.all() - print('Detrending phase data') - # scipy.io.savemat(file_name = r"C:\Users\spork\Desktop\data_unwrap_matlab.mat", - # mdict=dict(data=data_)) + # remove trends and drifts in data that occur over time y = np.linspace(0, np.size(data_, axis=1)-1, np.size(data_, axis=1)) @@ -564,30 +560,18 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): poly_coeffs = np.polyfit(x,data_[i_chan, :] ,3) tmp_ph = data_[i_chan, :] - np.polyval(poly_coeffs,x) data_[i_chan, :] = tmp_ph - - # detrend_data = scipy.io.loadmat(r"C:\Users\spork\Desktop\data_detrend_python.mat") - - # test2 = abs(detrend_data['data'] - data_) <= thresh - # test2.all() print('Removing phase mean') - # scipy.io.savemat(file_name = r"C:\Users\spork\Desktop\data_detrend_matlab.mat", - # mdict=dict(data=data_)) + # subtract mean to better detect outliers using SD mrph = np.mean(data_,axis=1); for i_chan in range(np.size(data_, axis=0)): data_[i_chan,:]=(data_[i_chan,:]-mrph[i_chan]) - - # mean_data = scipy.io.loadmat(r"C:\Users\spork\Desktop\data_mean_python.mat") - - # test3 = abs(mean_data['data'] - data_) <= thresh - # test3.all() print('Removing phase outliers') - # scipy.io.savemat(file_name = r"C:\Users\spork\Desktop\data_mean_matlab.mat", - # mdict=dict(data=data_)) + # remove data points that are larger than three SDs - ph_out_thr=3; # always set to "3" per Kathy & Gabriele Oct 12 2012 + ph_out_thr=3; sdph=np.std(data_,1, ddof = 1); #set ddof to 1 to mimic matlab n_ph_out = np.zeros(np.size(data_, axis=0), dtype= np.int8) @@ -607,11 +591,6 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): data_[i_chan,j_pt] = ( (data_[i_chan,j_pt-1] + data_[i_chan,j_pt+1])/2) - - # outlier_data = scipy.io.loadmat(r"C:\Users\spork\Desktop\data_outliers_python.mat") - - # test4 = abs(outlier_data['data'] - data_) <= thresh - # test4.all() #convert phase to pico seconds for i_chan in range(np.size(data_, axis=0)): diff --git a/mne/io/constants.py b/mne/io/constants.py index e1311d54cce..57ba7acc665 100644 --- a/mne/io/constants.py +++ b/mne/io/constants.py @@ -827,6 +827,8 @@ FIFF.FIFFV_COIL_FNIRS_HBR = 301 # fNIRS deoxyhemoglobin FIFF.FIFFV_COIL_FNIRS_RAW = 302 # fNIRS raw light intensity FIFF.FIFFV_COIL_FNIRS_OD = 303 # fNIRS optical density +FIFF.FIFFV_COIL_FNIRS_AC = 304 # fNIRS changes in light intensity +FIFF.FIFFV_COIL_FNIRS_PH = 305 # fNIRS phase of optical signal FIFF.FIFFV_COIL_MCG_42 = 1000 # For testing the MCG software @@ -912,4 +914,4 @@ FIFF.FIFFB_MNE_ANNOTATIONS = 3810 # annotations block # MNE Metadata Dataframes -FIFF.FIFFB_MNE_METADATA = 3811 # metadata dataframes block +FIFF.FIFFB_MNE_METADATA = 3811 # metadata dataframes block \ No newline at end of file diff --git a/mne/io/meas_info.py b/mne/io/meas_info.py index 0d48a5642c9..c025daebb56 100644 --- a/mne/io/meas_info.py +++ b/mne/io/meas_info.py @@ -5,7 +5,6 @@ # Stefan Appelhoff # # License: BSD (3-clause) - from collections import Counter from copy import deepcopy import datetime @@ -54,6 +53,10 @@ ecog=(FIFF.FIFFV_ECOG_CH, FIFF.FIFFV_COIL_EEG, FIFF.FIFF_UNIT_V), fnirs_raw=(FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_COIL_FNIRS_RAW, FIFF.FIFF_UNIT_V), + fnirs_ac=(FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_COIL_FNIRS_AC, + FIFF.FIFF_UNIT_V), + fnirs_ph=(FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_COIL_FNIRS_PH, + FIFF.FIFF_UNIT_V), fnirs_od=(FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_COIL_FNIRS_OD, FIFF.FIFF_UNIT_NONE), hbo=(FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_COIL_FNIRS_HBO, FIFF.FIFF_UNIT_MOL), @@ -886,6 +889,7 @@ def read_info(fname, verbose=None): info : instance of Info Measurement information for the dataset. """ + print('###############################################################') f, tree, _ = fiff_open(fname) with f as fid: info = read_meas_info(fid, tree)[0] diff --git a/mne/io/pick.py b/mne/io/pick.py index bbb4352bb58..8ea6ffdbd8b 100644 --- a/mne/io/pick.py +++ b/mne/io/pick.py @@ -99,6 +99,8 @@ def get_channel_type_constants(): FIFF.FIFFV_COIL_FNIRS_HBR: 'hbr', FIFF.FIFFV_COIL_FNIRS_RAW: 'fnirs_raw', FIFF.FIFFV_COIL_FNIRS_OD: 'fnirs_od', + FIFF.FIFFV_COIL_FNIRS_AC: 'fnirs_ac', + FIFF.FIFFV_COIL_FNIRS_PH: 'fnirs_ph', }), 'eeg': ('coil_type', {FIFF.FIFFV_COIL_EEG: 'eeg', FIFF.FIFFV_COIL_EEG_BIPOLAR: 'eeg', @@ -271,6 +273,10 @@ def _triage_fnirs_pick(ch, fnirs): return True elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_OD and fnirs == 'fnirs_od': return True + elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_AC and fnirs == 'fnirs_ac': + return True + elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_PH and fnirs == 'fnirs_ph': + return True return False @@ -401,7 +407,8 @@ def pick_types(info, meg=True, eeg=False, stim=False, eog=False, ecg=False, for key in ('grad', 'mag'): param_dict[key] = meg if isinstance(fnirs, bool): - for key in ('hbo', 'hbr', 'fnirs_raw', 'fnirs_od'): + for key in ('hbo', 'hbr', 'fnirs_raw', + 'fnirs_od', 'fnirs_ac', 'fnirs_ph'): param_dict[key] = fnirs for k in range(nchan): ch_type = channel_type(info, k) @@ -409,7 +416,7 @@ def pick_types(info, meg=True, eeg=False, stim=False, eog=False, ecg=False, pick[k] = param_dict[ch_type] except KeyError: # not so simple assert ch_type in ('grad', 'mag', 'hbo', 'hbr', 'ref_meg', - 'fnirs_raw', 'fnirs_od') + 'fnirs_raw', 'fnirs_od', 'fnirs_ac', 'fnirs_ph') if ch_type in ('grad', 'mag'): pick[k] = _triage_meg_pick(info['chs'][k], meg) elif ch_type == 'ref_meg': @@ -937,6 +944,8 @@ def _pick_data_or_ica(info, exclude=()): def _picks_to_idx(info, picks, none='data', exclude='bads', allow_empty=False, with_ref_meg=True, return_kind=False): """Convert and check pick validity.""" + # import pdb + # pdb.set_trace() from .meas_info import Info picked_ch_type_or_generic = False # diff --git a/mne/preprocessing/nirs/_optical_density.py b/mne/preprocessing/nirs/_optical_density.py index 136e0d8e6f6..7a809d08420 100644 --- a/mne/preprocessing/nirs/_optical_density.py +++ b/mne/preprocessing/nirs/_optical_density.py @@ -25,6 +25,8 @@ def optical_density(raw): raw : instance of Raw The modified raw instance. """ + # import pdb + # pdb.set_trace() raw = raw.copy().load_data() _validate_type(raw, BaseRaw, 'raw') picks = _picks_to_idx(raw.info, 'fnirs_raw') diff --git a/mne/utils/_bunch.py b/mne/utils/_bunch.py index 3db11a4390c..3659116110f 100644 --- a/mne/utils/_bunch.py +++ b/mne/utils/_bunch.py @@ -14,7 +14,6 @@ class Bunch(dict): """Dictionary-like object that exposes its keys as attributes.""" - def __init__(self, **kwargs): # noqa: D102 dict.__init__(self, kwargs) self.__dict__ = self @@ -25,7 +24,6 @@ def __init__(self, **kwargs): # noqa: D102 class BunchConst(Bunch): """Class to prevent us from re-defining constants (DRY).""" - def __setattr__(self, attr, val): # noqa: D105 if attr != '__dict__' and hasattr(self, attr): raise AttributeError('Attribute "%s" already set' % attr) @@ -40,7 +38,6 @@ class BunchConstNamed(BunchConst): Only supports string keys and int or float values. """ - def __setattr__(self, attr, val): # noqa: D105 assert isinstance(attr, str) if isinstance(val, int): diff --git a/mne/viz/raw.py b/mne/viz/raw.py index 2ff3fd9f5c1..4de90acc1c9 100644 --- a/mne/viz/raw.py +++ b/mne/viz/raw.py @@ -350,7 +350,7 @@ def plot_raw(raw, events=None, duration=10.0, start=0.0, n_channels=20, for t in ['grad', 'mag']: inds += [pick_types(info, meg=t, ref_meg=False, exclude=[])] types += [t] * len(inds[-1]) - for t in ['hbo', 'hbr', 'fnirs_raw', 'fnirs_od']: + for t in ['hbo', 'hbr', 'fnirs_raw', 'fnirs_od', 'fnirs_ac', 'fnirs_ph']: inds += [pick_types(info, meg=False, ref_meg=False, fnirs=t, exclude=[])] types += [t] * len(inds[-1]) diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 8932bc4338c..b01e4551629 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -29,6 +29,11 @@ boxy_raw_dir = os.path.join(boxy_data_folder, 'Participant-1') raw_intensity = mne.io.read_raw_boxy(boxy_raw_dir, 'Ph', verbose=True).load_data() +### plot the raw data ### +raw_intensity.plot(n_channels=10, clipping='clamp') +# raw_intensity.plot(n_channels=10, scalings={'fnirs_raw':180}, clipping='clamp') + + ###separate data based on montages### mtg_a_indices = [i_index for i_index,i_label in enumerate(raw_intensity.info['ch_names']) if re.search(r'(S[1-5]_|\bMarkers a\b)', i_label)] @@ -131,6 +136,11 @@ # # # # The raw intensity values are then converted to optical density. +# doesn't work with the new channel types +# not sure what to change since _picks_to_idx(raw.info, 'fnirs_raw') +# maybe add a try statement for the other types? +# not sure if we want to change the default function + raw_od = mne.preprocessing.nirs.optical_density(raw_intensity) raw_od_a = mne.preprocessing.nirs.optical_density(mtg_a_intensity) raw_od_b = mne.preprocessing.nirs.optical_density(mtg_b_intensity) From 7ee083f9e220072d531810d7f05ab7c59787ffa7 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Thu, 28 May 2020 12:25:21 -0700 Subject: [PATCH 100/167] made test script in temporary dev folder --- dev/plot_test.py | 14 ++++++++++++++ mne/defaults.py | 4 ++-- 2 files changed, 16 insertions(+), 2 deletions(-) create mode 100644 dev/plot_test.py diff --git a/dev/plot_test.py b/dev/plot_test.py new file mode 100644 index 00000000000..54de0eb4cc8 --- /dev/null +++ b/dev/plot_test.py @@ -0,0 +1,14 @@ + + +import os +import matplotlib.pyplot as plt + +import mne + + +boxy_data_folder = mne.datasets.boxy_example.data_path() +boxy_raw_dir = os.path.join(boxy_data_folder, 'Participant-1') +raw_intensity = mne.io.read_raw_boxy(boxy_raw_dir, 'AC', verbose=True).load_data() + +### plot the raw data ### +raw_intensity.plot(n_channels=10) diff --git a/mne/defaults.py b/mne/defaults.py index 6ff15575b69..42b0a6920d8 100644 --- a/mne/defaults.py +++ b/mne/defaults.py @@ -27,8 +27,8 @@ stim=1, resp=1, chpi=1e-4, exci=1, ias=1, syst=1, seeg=1e-4, bio=1e-6, ecog=1e-4, hbo=10e-6, hbr=10e-6, whitened=10., fnirs_raw=2e-2, - fnirs_od=2e-2, fnirs_ac=2e-2, - fnirs_ph=180, csd=20e-4), + fnirs_od=2e-2, fnirs_ac=20000, + fnirs_ph=10e3, csd=20e-4), scalings_cov_rank=dict(mag=1e12, grad=1e11, eeg=1e5, # ~100x scalings seeg=1e1, ecog=1e4, hbo=1e4, hbr=1e4), ylim=dict(mag=(-600., 600.), grad=(-200., 200.), eeg=(-200., 200.), From d8bfcc3c00410ae9169b9910a1efdcde61f945bb Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Thu, 21 May 2020 17:05:00 -0600 Subject: [PATCH 101/167] phase data will be unwrapped, detrended, and outliers removed --- mne/io/boxy/boxy.py | 79 +++++++++++++++++++ .../preprocessing/plot_80_boxy_processing.py | 2 +- 2 files changed, 80 insertions(+), 1 deletion(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 167c22fa90c..0c0fccc5093 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -527,6 +527,85 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): # save our data based on data type data_[index_loc, :] = boxy_array[:, channel] + + ###phase unwrapping### + # thresh = 0.00000001 + # scipy.io.savemat(file_name = r"C:\Users\spork\Desktop\data_matlab.mat", + # mdict=dict(data=data_)) + if i_data == 'Ph': + print('Fixing phase wrap') + for i_chan in range(np.size(data_, axis=0)): + if np.mean(data_[i_chan,:50]) < 180: + wrapped_points = data_[i_chan, :] > 270 + data_[i_chan, wrapped_points] -= 360 + else: + wrapped_points = data_[i_chan,:] < 90 + data_[i_chan, wrapped_points] += 360 + + # unwrapped_data = scipy.io.loadmat(r"C:\Users\spork\Desktop\data_unwrap_python.mat") + + # test1 = abs(unwrapped_data['data'] - data_) <= thresh + # test1.all() + + print('Detrending phase data') + # scipy.io.savemat(file_name = r"C:\Users\spork\Desktop\data_unwrap_matlab.mat", + # mdict=dict(data=data_)) + + y = np.linspace(0, np.size(data_, axis=1)-1, + np.size(data_, axis=1)) + x = np.transpose(y) + for i_chan in range(np.size(data_, axis=0)): + poly_coeffs = np.polyfit(x,data_[i_chan, :] ,3) + tmp_ph = data_[i_chan, :] - np.polyval(poly_coeffs,x) + data_[i_chan, :] = tmp_ph + + # detrend_data = scipy.io.loadmat(r"C:\Users\spork\Desktop\data_detrend_python.mat") + + # test2 = abs(detrend_data['data'] - data_) <= thresh + # test2.all() + + print('Removing phase mean') + # scipy.io.savemat(file_name = r"C:\Users\spork\Desktop\data_detrend_matlab.mat", + # mdict=dict(data=data_)) + + mrph = np.mean(data_,axis=1); + for i_chan in range(np.size(data_, axis=0)): + data_[i_chan,:]=(data_[i_chan,:]-mrph[i_chan]) + + # mean_data = scipy.io.loadmat(r"C:\Users\spork\Desktop\data_mean_python.mat") + + # test3 = abs(mean_data['data'] - data_) <= thresh + # test3.all() + + print('Removing phase outliers') + # scipy.io.savemat(file_name = r"C:\Users\spork\Desktop\data_mean_matlab.mat", + # mdict=dict(data=data_)) + + ph_out_thr=3; # always set to "3" per Kathy & Gabriele Oct 12 2012 + sdph=np.std(data_,1, ddof = 1); #set ddof to 1 to mimic matlab + n_ph_out = np.zeros(np.size(data_, axis=0), dtype= np.int8) + + for i_chan in range(np.size(data_, axis=0)): + outliers = np.where(np.abs(data_[i_chan,:]) > + (ph_out_thr*sdph[i_chan])) + outliers = outliers[0] + if len(outliers) > 0: + if outliers[0] == 0: + outliers = outliers[1:] + if len(outliers) > 0: + if outliers[-1] == np.size(data_, axis=1) - 1: + outliers = outliers[:-1] + n_ph_out[i_chan] = int(len(outliers)) + for i_pt in range(n_ph_out[i_chan]): + j_pt = outliers[i_pt] + data_[i_chan,j_pt] = ( + (data_[i_chan,j_pt-1] + + data_[i_chan,j_pt+1])/2) + + # outlier_data = scipy.io.loadmat(r"C:\Users\spork\Desktop\data_outliers_python.mat") + + # test4 = abs(outlier_data['data'] - data_) <= thresh + # test4.all() # swap channels to match new wavelength order for i_chan in range(0, len(data_), 2): diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index c98bd029847..8932bc4338c 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -27,7 +27,7 @@ boxy_data_folder = mne.datasets.boxy_example.data_path() boxy_raw_dir = os.path.join(boxy_data_folder, 'Participant-1') -raw_intensity = mne.io.read_raw_boxy(boxy_raw_dir, 'AC', verbose=True).load_data() +raw_intensity = mne.io.read_raw_boxy(boxy_raw_dir, 'Ph', verbose=True).load_data() ###separate data based on montages### mtg_a_indices = [i_index for i_index,i_label in enumerate(raw_intensity.info['ch_names']) From 0de6fd5696313ecb029923c0c4cbb4a8c541f745 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Fri, 22 May 2020 11:01:01 -0600 Subject: [PATCH 102/167] added conversion of phase to pico seconds --- mne/io/boxy/boxy.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 0c0fccc5093..f2ee8544a42 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -242,6 +242,7 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): mtg_end = [] mtg_src_num = [] mtg_det_num = [] + mtg_mdf = [] blk_num = [len(blk) for blk in blk_names] for mtg_num, i_mtg in enumerate(mtg_chan_num, 0): start = int(np.sum(mtg_chan_num[:mtg_num])) @@ -254,6 +255,9 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): # get source and detector numbers for each montage mtg_src_num.append(source_num[start_blk]) mtg_det_num.append(detect_num[start_blk]) + # get modulation frequency for each channel and montage + # assuming modulation freq in MHz + mtg_mdf.append([int(chan_mdf)*1e6 for chan_mdf in chan_modulation[start:end]]) for i_type in data_types: for i_coord in range(start, end): boxy_coords.append(np.mean( @@ -328,7 +332,7 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): temp_other = np.asarray(boxy_coords[i_chan][9:], dtype=np.float64) info['chs'][i_chan]['loc'] = np.concatenate((temp_ch_src_det, temp_other), axis=0) - + raw_extras = {'source_num': source_num, 'detect_num': detect_num, 'start_line': start_line, @@ -338,6 +342,7 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): 'montages': mtg_names, 'blocks': blk_names, 'data_types': data_types, + 'mtg_mdf': mtg_mdf, } ###check to make sure data is the same length for each file @@ -395,6 +400,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): data_types = self._raw_extras[fi]['data_types'] montages = self._raw_extras[fi]['montages'] blocks = self._raw_extras[fi]['blocks'] + mtg_mdf = self._raw_extras[fi]['mtg_mdf'] boxy_files = self._raw_extras[fi]['files']['*.[000-999]*'] event_fname = os.path.join(self._filenames[fi], 'evt') @@ -606,6 +612,11 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): # test4 = abs(outlier_data['data'] - data_) <= thresh # test4.all() + + #convert phase to pico seconds + for i_chan in range(np.size(data_, axis=0)): + data_[i_chan,:] = ((1e12*data_[i_chan,:])/ + (360*mtg_mdf[i_mtg][i_chan])) # swap channels to match new wavelength order for i_chan in range(0, len(data_), 2): From 375e51aa505d570f55019c9e697803f1069dca85 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Thu, 28 May 2020 10:51:10 -0600 Subject: [PATCH 103/167] working on adding two new data types, fnirs_ac and fnirs_ph --- mne/defaults.py | 12 +++--- mne/io/boxy/boxy.py | 41 +++++-------------- mne/io/constants.py | 4 +- mne/io/meas_info.py | 6 ++- mne/io/pick.py | 13 +++++- mne/preprocessing/nirs/_optical_density.py | 2 + mne/utils/_bunch.py | 3 -- mne/viz/raw.py | 2 +- .../preprocessing/plot_80_boxy_processing.py | 10 +++++ 9 files changed, 49 insertions(+), 44 deletions(-) diff --git a/mne/defaults.py b/mne/defaults.py index bbf7be5aefa..6ff15575b69 100644 --- a/mne/defaults.py +++ b/mne/defaults.py @@ -11,23 +11,24 @@ ref_meg='steelblue', misc='k', stim='k', resp='k', chpi='k', exci='k', ias='k', syst='k', seeg='saddlebrown', dipole='k', gof='k', bio='k', ecog='k', hbo='#AA3377', hbr='b', - fnirs_raw='k', fnirs_od='k', csd='k'), + fnirs_raw='k', fnirs_od='k', fnirs_ac='k', fnirs_ph='k', csd='k'), units=dict(mag='fT', grad='fT/cm', eeg='µV', eog='µV', ecg='µV', emg='µV', misc='AU', seeg='mV', dipole='nAm', gof='GOF', bio='µV', ecog='µV', hbo='µM', hbr='µM', ref_meg='fT', fnirs_raw='V', - fnirs_od='V', csd='V/m²'), + fnirs_od='V', fnirs_ac='V', fnirs_ph=u'\N{DEGREE SIGN}', csd='V/m²'), # scalings for the units scalings=dict(mag=1e15, grad=1e13, eeg=1e6, eog=1e6, emg=1e6, ecg=1e6, misc=1.0, seeg=1e3, dipole=1e9, gof=1.0, bio=1e6, ecog=1e6, hbo=1e6, hbr=1e6, ref_meg=1e15, fnirs_raw=1.0, fnirs_od=1.0, - csd=1e5), + fnirs_ac=1.0, fnirs_ph=1.0, csd=1e5), # rough guess for a good plot scalings_plot_raw=dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4, emg=1e-3, ref_meg=1e-12, misc='auto', stim=1, resp=1, chpi=1e-4, exci=1, ias=1, syst=1, seeg=1e-4, bio=1e-6, ecog=1e-4, hbo=10e-6, hbr=10e-6, whitened=10., fnirs_raw=2e-2, - fnirs_od=2e-2, csd=20e-4), + fnirs_od=2e-2, fnirs_ac=2e-2, + fnirs_ph=180, csd=20e-4), scalings_cov_rank=dict(mag=1e12, grad=1e11, eeg=1e5, # ~100x scalings seeg=1e1, ecog=1e4, hbo=1e4, hbr=1e4), ylim=dict(mag=(-600., 600.), grad=(-200., 200.), eeg=(-200., 200.), @@ -38,7 +39,8 @@ ecg='ECG', emg='EMG', misc='misc', seeg='sEEG', bio='BIO', dipole='Dipole', ecog='ECoG', hbo='Oxyhemoglobin', ref_meg='Reference Magnetometers', fnirs_raw='fNIRS (raw)', - fnirs_od='fNIRS (OD)', hbr='Deoxyhemoglobin', + fnirs_od='fNIRS (OD)', fnirs_ac='fNIRS (AC)', + fnirs_ph='fNIRS (Ph)',hbr='Deoxyhemoglobin', gof='Goodness of fit', csd='Current source density'), mask_params=dict(marker='o', markerfacecolor='w', diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index f2ee8544a42..0b705d997c3 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -299,7 +299,7 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): rpa=fiducial_coords[2]) # create info structure - ch_types = (['fnirs_raw' if i_chan < np.sum(mtg_chan_num) else 'stim' + ch_types = (['fnirs_ph' if i_chan < np.sum(mtg_chan_num) else 'stim' for i_chan, _ in enumerate(boxy_labels)]) info = create_info(boxy_labels, srate[0], ch_types=ch_types) @@ -535,11 +535,13 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): data_[index_loc, :] = boxy_array[:, channel] ###phase unwrapping### - # thresh = 0.00000001 - # scipy.io.savemat(file_name = r"C:\Users\spork\Desktop\data_matlab.mat", - # mdict=dict(data=data_)) if i_data == 'Ph': print('Fixing phase wrap') + # accounts for sharp, sudden changes in phase + # such as crossing over from 0/360 degrees + # estimate mean phase of first 50 points + # if a point differs more than 90 degrees from the mean + # add or subtract 360 degress from that point for i_chan in range(np.size(data_, axis=0)): if np.mean(data_[i_chan,:50]) < 180: wrapped_points = data_[i_chan, :] > 270 @@ -548,14 +550,8 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): wrapped_points = data_[i_chan,:] < 90 data_[i_chan, wrapped_points] += 360 - # unwrapped_data = scipy.io.loadmat(r"C:\Users\spork\Desktop\data_unwrap_python.mat") - - # test1 = abs(unwrapped_data['data'] - data_) <= thresh - # test1.all() - print('Detrending phase data') - # scipy.io.savemat(file_name = r"C:\Users\spork\Desktop\data_unwrap_matlab.mat", - # mdict=dict(data=data_)) + # remove trends and drifts in data that occur over time y = np.linspace(0, np.size(data_, axis=1)-1, np.size(data_, axis=1)) @@ -564,30 +560,18 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): poly_coeffs = np.polyfit(x,data_[i_chan, :] ,3) tmp_ph = data_[i_chan, :] - np.polyval(poly_coeffs,x) data_[i_chan, :] = tmp_ph - - # detrend_data = scipy.io.loadmat(r"C:\Users\spork\Desktop\data_detrend_python.mat") - - # test2 = abs(detrend_data['data'] - data_) <= thresh - # test2.all() print('Removing phase mean') - # scipy.io.savemat(file_name = r"C:\Users\spork\Desktop\data_detrend_matlab.mat", - # mdict=dict(data=data_)) + # subtract mean to better detect outliers using SD mrph = np.mean(data_,axis=1); for i_chan in range(np.size(data_, axis=0)): data_[i_chan,:]=(data_[i_chan,:]-mrph[i_chan]) - - # mean_data = scipy.io.loadmat(r"C:\Users\spork\Desktop\data_mean_python.mat") - - # test3 = abs(mean_data['data'] - data_) <= thresh - # test3.all() print('Removing phase outliers') - # scipy.io.savemat(file_name = r"C:\Users\spork\Desktop\data_mean_matlab.mat", - # mdict=dict(data=data_)) + # remove data points that are larger than three SDs - ph_out_thr=3; # always set to "3" per Kathy & Gabriele Oct 12 2012 + ph_out_thr=3; sdph=np.std(data_,1, ddof = 1); #set ddof to 1 to mimic matlab n_ph_out = np.zeros(np.size(data_, axis=0), dtype= np.int8) @@ -607,11 +591,6 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): data_[i_chan,j_pt] = ( (data_[i_chan,j_pt-1] + data_[i_chan,j_pt+1])/2) - - # outlier_data = scipy.io.loadmat(r"C:\Users\spork\Desktop\data_outliers_python.mat") - - # test4 = abs(outlier_data['data'] - data_) <= thresh - # test4.all() #convert phase to pico seconds for i_chan in range(np.size(data_, axis=0)): diff --git a/mne/io/constants.py b/mne/io/constants.py index e1311d54cce..57ba7acc665 100644 --- a/mne/io/constants.py +++ b/mne/io/constants.py @@ -827,6 +827,8 @@ FIFF.FIFFV_COIL_FNIRS_HBR = 301 # fNIRS deoxyhemoglobin FIFF.FIFFV_COIL_FNIRS_RAW = 302 # fNIRS raw light intensity FIFF.FIFFV_COIL_FNIRS_OD = 303 # fNIRS optical density +FIFF.FIFFV_COIL_FNIRS_AC = 304 # fNIRS changes in light intensity +FIFF.FIFFV_COIL_FNIRS_PH = 305 # fNIRS phase of optical signal FIFF.FIFFV_COIL_MCG_42 = 1000 # For testing the MCG software @@ -912,4 +914,4 @@ FIFF.FIFFB_MNE_ANNOTATIONS = 3810 # annotations block # MNE Metadata Dataframes -FIFF.FIFFB_MNE_METADATA = 3811 # metadata dataframes block +FIFF.FIFFB_MNE_METADATA = 3811 # metadata dataframes block \ No newline at end of file diff --git a/mne/io/meas_info.py b/mne/io/meas_info.py index 8c2fa0e540e..e17f482616d 100644 --- a/mne/io/meas_info.py +++ b/mne/io/meas_info.py @@ -5,7 +5,6 @@ # Stefan Appelhoff # # License: BSD (3-clause) - from collections import Counter from copy import deepcopy import datetime @@ -54,6 +53,10 @@ ecog=(FIFF.FIFFV_ECOG_CH, FIFF.FIFFV_COIL_EEG, FIFF.FIFF_UNIT_V), fnirs_raw=(FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_COIL_FNIRS_RAW, FIFF.FIFF_UNIT_V), + fnirs_ac=(FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_COIL_FNIRS_AC, + FIFF.FIFF_UNIT_V), + fnirs_ph=(FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_COIL_FNIRS_PH, + FIFF.FIFF_UNIT_V), fnirs_od=(FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_COIL_FNIRS_OD, FIFF.FIFF_UNIT_NONE), hbo=(FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_COIL_FNIRS_HBO, FIFF.FIFF_UNIT_MOL), @@ -886,6 +889,7 @@ def read_info(fname, verbose=None): info : instance of Info Measurement information for the dataset. """ + print('###############################################################') f, tree, _ = fiff_open(fname) with f as fid: info = read_meas_info(fid, tree)[0] diff --git a/mne/io/pick.py b/mne/io/pick.py index bbb4352bb58..8ea6ffdbd8b 100644 --- a/mne/io/pick.py +++ b/mne/io/pick.py @@ -99,6 +99,8 @@ def get_channel_type_constants(): FIFF.FIFFV_COIL_FNIRS_HBR: 'hbr', FIFF.FIFFV_COIL_FNIRS_RAW: 'fnirs_raw', FIFF.FIFFV_COIL_FNIRS_OD: 'fnirs_od', + FIFF.FIFFV_COIL_FNIRS_AC: 'fnirs_ac', + FIFF.FIFFV_COIL_FNIRS_PH: 'fnirs_ph', }), 'eeg': ('coil_type', {FIFF.FIFFV_COIL_EEG: 'eeg', FIFF.FIFFV_COIL_EEG_BIPOLAR: 'eeg', @@ -271,6 +273,10 @@ def _triage_fnirs_pick(ch, fnirs): return True elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_OD and fnirs == 'fnirs_od': return True + elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_AC and fnirs == 'fnirs_ac': + return True + elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_PH and fnirs == 'fnirs_ph': + return True return False @@ -401,7 +407,8 @@ def pick_types(info, meg=True, eeg=False, stim=False, eog=False, ecg=False, for key in ('grad', 'mag'): param_dict[key] = meg if isinstance(fnirs, bool): - for key in ('hbo', 'hbr', 'fnirs_raw', 'fnirs_od'): + for key in ('hbo', 'hbr', 'fnirs_raw', + 'fnirs_od', 'fnirs_ac', 'fnirs_ph'): param_dict[key] = fnirs for k in range(nchan): ch_type = channel_type(info, k) @@ -409,7 +416,7 @@ def pick_types(info, meg=True, eeg=False, stim=False, eog=False, ecg=False, pick[k] = param_dict[ch_type] except KeyError: # not so simple assert ch_type in ('grad', 'mag', 'hbo', 'hbr', 'ref_meg', - 'fnirs_raw', 'fnirs_od') + 'fnirs_raw', 'fnirs_od', 'fnirs_ac', 'fnirs_ph') if ch_type in ('grad', 'mag'): pick[k] = _triage_meg_pick(info['chs'][k], meg) elif ch_type == 'ref_meg': @@ -937,6 +944,8 @@ def _pick_data_or_ica(info, exclude=()): def _picks_to_idx(info, picks, none='data', exclude='bads', allow_empty=False, with_ref_meg=True, return_kind=False): """Convert and check pick validity.""" + # import pdb + # pdb.set_trace() from .meas_info import Info picked_ch_type_or_generic = False # diff --git a/mne/preprocessing/nirs/_optical_density.py b/mne/preprocessing/nirs/_optical_density.py index 136e0d8e6f6..7a809d08420 100644 --- a/mne/preprocessing/nirs/_optical_density.py +++ b/mne/preprocessing/nirs/_optical_density.py @@ -25,6 +25,8 @@ def optical_density(raw): raw : instance of Raw The modified raw instance. """ + # import pdb + # pdb.set_trace() raw = raw.copy().load_data() _validate_type(raw, BaseRaw, 'raw') picks = _picks_to_idx(raw.info, 'fnirs_raw') diff --git a/mne/utils/_bunch.py b/mne/utils/_bunch.py index 3db11a4390c..3659116110f 100644 --- a/mne/utils/_bunch.py +++ b/mne/utils/_bunch.py @@ -14,7 +14,6 @@ class Bunch(dict): """Dictionary-like object that exposes its keys as attributes.""" - def __init__(self, **kwargs): # noqa: D102 dict.__init__(self, kwargs) self.__dict__ = self @@ -25,7 +24,6 @@ def __init__(self, **kwargs): # noqa: D102 class BunchConst(Bunch): """Class to prevent us from re-defining constants (DRY).""" - def __setattr__(self, attr, val): # noqa: D105 if attr != '__dict__' and hasattr(self, attr): raise AttributeError('Attribute "%s" already set' % attr) @@ -40,7 +38,6 @@ class BunchConstNamed(BunchConst): Only supports string keys and int or float values. """ - def __setattr__(self, attr, val): # noqa: D105 assert isinstance(attr, str) if isinstance(val, int): diff --git a/mne/viz/raw.py b/mne/viz/raw.py index 2ff3fd9f5c1..4de90acc1c9 100644 --- a/mne/viz/raw.py +++ b/mne/viz/raw.py @@ -350,7 +350,7 @@ def plot_raw(raw, events=None, duration=10.0, start=0.0, n_channels=20, for t in ['grad', 'mag']: inds += [pick_types(info, meg=t, ref_meg=False, exclude=[])] types += [t] * len(inds[-1]) - for t in ['hbo', 'hbr', 'fnirs_raw', 'fnirs_od']: + for t in ['hbo', 'hbr', 'fnirs_raw', 'fnirs_od', 'fnirs_ac', 'fnirs_ph']: inds += [pick_types(info, meg=False, ref_meg=False, fnirs=t, exclude=[])] types += [t] * len(inds[-1]) diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 8932bc4338c..b01e4551629 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -29,6 +29,11 @@ boxy_raw_dir = os.path.join(boxy_data_folder, 'Participant-1') raw_intensity = mne.io.read_raw_boxy(boxy_raw_dir, 'Ph', verbose=True).load_data() +### plot the raw data ### +raw_intensity.plot(n_channels=10, clipping='clamp') +# raw_intensity.plot(n_channels=10, scalings={'fnirs_raw':180}, clipping='clamp') + + ###separate data based on montages### mtg_a_indices = [i_index for i_index,i_label in enumerate(raw_intensity.info['ch_names']) if re.search(r'(S[1-5]_|\bMarkers a\b)', i_label)] @@ -131,6 +136,11 @@ # # # # The raw intensity values are then converted to optical density. +# doesn't work with the new channel types +# not sure what to change since _picks_to_idx(raw.info, 'fnirs_raw') +# maybe add a try statement for the other types? +# not sure if we want to change the default function + raw_od = mne.preprocessing.nirs.optical_density(raw_intensity) raw_od_a = mne.preprocessing.nirs.optical_density(mtg_a_intensity) raw_od_b = mne.preprocessing.nirs.optical_density(mtg_b_intensity) From 78279f43b8b7e3f88da202c02c306fe75456dbb2 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Thu, 28 May 2020 12:25:21 -0700 Subject: [PATCH 104/167] made test script in temporary dev folder --- dev/plot_test.py | 14 ++++++++++++++ mne/defaults.py | 4 ++-- 2 files changed, 16 insertions(+), 2 deletions(-) create mode 100644 dev/plot_test.py diff --git a/dev/plot_test.py b/dev/plot_test.py new file mode 100644 index 00000000000..54de0eb4cc8 --- /dev/null +++ b/dev/plot_test.py @@ -0,0 +1,14 @@ + + +import os +import matplotlib.pyplot as plt + +import mne + + +boxy_data_folder = mne.datasets.boxy_example.data_path() +boxy_raw_dir = os.path.join(boxy_data_folder, 'Participant-1') +raw_intensity = mne.io.read_raw_boxy(boxy_raw_dir, 'AC', verbose=True).load_data() + +### plot the raw data ### +raw_intensity.plot(n_channels=10) diff --git a/mne/defaults.py b/mne/defaults.py index 6ff15575b69..42b0a6920d8 100644 --- a/mne/defaults.py +++ b/mne/defaults.py @@ -27,8 +27,8 @@ stim=1, resp=1, chpi=1e-4, exci=1, ias=1, syst=1, seeg=1e-4, bio=1e-6, ecog=1e-4, hbo=10e-6, hbr=10e-6, whitened=10., fnirs_raw=2e-2, - fnirs_od=2e-2, fnirs_ac=2e-2, - fnirs_ph=180, csd=20e-4), + fnirs_od=2e-2, fnirs_ac=20000, + fnirs_ph=10e3, csd=20e-4), scalings_cov_rank=dict(mag=1e12, grad=1e11, eeg=1e5, # ~100x scalings seeg=1e1, ecog=1e4, hbo=1e4, hbr=1e4), ylim=dict(mag=(-600., 600.), grad=(-200., 200.), eeg=(-200., 200.), From 479558d5c980b18281581d4d91e1a71739cacf60 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Thu, 28 May 2020 17:38:07 -0600 Subject: [PATCH 105/167] all plots except haemo should now be able to handle fnirs_ph, might not need all of them though --- mne/channels/channels.py | 3 +- mne/channels/layout.py | 2 +- mne/defaults.py | 15 +- mne/io/boxy/boxy.py | 9 +- mne/io/constants.py | 5 +- mne/io/meas_info.py | 2 - mne/io/pick.py | 25 +-- mne/preprocessing/nirs/_optical_density.py | 7 +- mne/viz/epochs.py | 1 - mne/viz/raw.py | 2 +- mne/viz/topo.py | 2 +- mne/viz/topomap.py | 2 +- mne/viz/utils.py | 4 +- .../preprocessing/plot_80_boxy_processing.py | 184 +++++++++--------- 14 files changed, 127 insertions(+), 136 deletions(-) diff --git a/mne/channels/channels.py b/mne/channels/channels.py index 5542198603c..b886be1d98f 100644 --- a/mne/channels/channels.py +++ b/mne/channels/channels.py @@ -73,7 +73,8 @@ def _get_ch_type(inst, ch_type, allow_ref_meg=False): """ if ch_type is None: allowed_types = ['mag', 'grad', 'planar1', 'planar2', 'eeg', 'csd', - 'fnirs_raw', 'fnirs_od', 'hbo', 'hbr', 'ecog', 'seeg'] + 'fnirs_raw', 'fnirs_od', 'fnirs_ph', + 'hbo', 'hbr', 'ecog', 'seeg'] allowed_types += ['ref_meg'] if allow_ref_meg else [] for type_ in allowed_types: if isinstance(inst, Info): diff --git a/mne/channels/layout.py b/mne/channels/layout.py index 83bc29266de..f1226b7aa84 100644 --- a/mne/channels/layout.py +++ b/mne/channels/layout.py @@ -907,7 +907,7 @@ def _merge_ch_data(data, ch_type, names, method='rms'): if ch_type == 'grad': data = _merge_grad_data(data, method) else: - assert ch_type in ('hbo', 'hbr', 'fnirs_raw', 'fnirs_od') + assert ch_type in ('hbo', 'hbr', 'fnirs_raw', 'fnirs_od', 'fnirs_ph') data, names = _merge_nirs_data(data, names) return data, names diff --git a/mne/defaults.py b/mne/defaults.py index 42b0a6920d8..3ab6736961d 100644 --- a/mne/defaults.py +++ b/mne/defaults.py @@ -11,24 +11,23 @@ ref_meg='steelblue', misc='k', stim='k', resp='k', chpi='k', exci='k', ias='k', syst='k', seeg='saddlebrown', dipole='k', gof='k', bio='k', ecog='k', hbo='#AA3377', hbr='b', - fnirs_raw='k', fnirs_od='k', fnirs_ac='k', fnirs_ph='k', csd='k'), + fnirs_raw='k', fnirs_od='k', fnirs_ph='k', csd='k'), units=dict(mag='fT', grad='fT/cm', eeg='µV', eog='µV', ecg='µV', emg='µV', misc='AU', seeg='mV', dipole='nAm', gof='GOF', bio='µV', ecog='µV', hbo='µM', hbr='µM', ref_meg='fT', fnirs_raw='V', - fnirs_od='V', fnirs_ac='V', fnirs_ph=u'\N{DEGREE SIGN}', csd='V/m²'), + fnirs_od='V', fnirs_ph=u'\N{DEGREE SIGN}', csd='V/m²'), # scalings for the units scalings=dict(mag=1e15, grad=1e13, eeg=1e6, eog=1e6, emg=1e6, ecg=1e6, misc=1.0, seeg=1e3, dipole=1e9, gof=1.0, bio=1e6, ecog=1e6, hbo=1e6, hbr=1e6, ref_meg=1e15, fnirs_raw=1.0, fnirs_od=1.0, - fnirs_ac=1.0, fnirs_ph=1.0, csd=1e5), + fnirs_ph=1.0, csd=1e5), # rough guess for a good plot scalings_plot_raw=dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4, emg=1e-3, ref_meg=1e-12, misc='auto', stim=1, resp=1, chpi=1e-4, exci=1, ias=1, syst=1, seeg=1e-4, bio=1e-6, ecog=1e-4, hbo=10e-6, hbr=10e-6, whitened=10., fnirs_raw=2e-2, - fnirs_od=2e-2, fnirs_ac=20000, - fnirs_ph=10e3, csd=20e-4), + fnirs_od=2e-2, fnirs_ph=180, csd=20e-4), scalings_cov_rank=dict(mag=1e12, grad=1e11, eeg=1e5, # ~100x scalings seeg=1e1, ecog=1e4, hbo=1e4, hbr=1e4), ylim=dict(mag=(-600., 600.), grad=(-200., 200.), eeg=(-200., 200.), @@ -39,9 +38,9 @@ ecg='ECG', emg='EMG', misc='misc', seeg='sEEG', bio='BIO', dipole='Dipole', ecog='ECoG', hbo='Oxyhemoglobin', ref_meg='Reference Magnetometers', fnirs_raw='fNIRS (raw)', - fnirs_od='fNIRS (OD)', fnirs_ac='fNIRS (AC)', - fnirs_ph='fNIRS (Ph)',hbr='Deoxyhemoglobin', - gof='Goodness of fit', csd='Current source density'), + fnirs_od='fNIRS (OD)', fnirs_ph='fNIRS (Ph)', + hbr='Deoxyhemoglobin', gof='Goodness of fit', + csd='Current source density'), mask_params=dict(marker='o', markerfacecolor='w', markeredgecolor='k', diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 0b705d997c3..19f57977920 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -299,10 +299,15 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): rpa=fiducial_coords[2]) # create info structure - ch_types = (['fnirs_ph' if i_chan < np.sum(mtg_chan_num) else 'stim' + if datatype == 'Ph': + chan_type = 'fnirs_ph' + else: + chan_type = 'fnirs_raw' + + ch_types = ([chan_type if i_chan < np.sum(mtg_chan_num) else 'stim' for i_chan, _ in enumerate(boxy_labels)]) info = create_info(boxy_labels, srate[0], ch_types=ch_types) - + # add dig info # this also applies a transform to the data into neuromag space # based on fiducials diff --git a/mne/io/constants.py b/mne/io/constants.py index 57ba7acc665..e62ac835176 100644 --- a/mne/io/constants.py +++ b/mne/io/constants.py @@ -827,8 +827,7 @@ FIFF.FIFFV_COIL_FNIRS_HBR = 301 # fNIRS deoxyhemoglobin FIFF.FIFFV_COIL_FNIRS_RAW = 302 # fNIRS raw light intensity FIFF.FIFFV_COIL_FNIRS_OD = 303 # fNIRS optical density -FIFF.FIFFV_COIL_FNIRS_AC = 304 # fNIRS changes in light intensity -FIFF.FIFFV_COIL_FNIRS_PH = 305 # fNIRS phase of optical signal +FIFF.FIFFV_COIL_FNIRS_PH = 304 # fNIRS phase of optical signal FIFF.FIFFV_COIL_MCG_42 = 1000 # For testing the MCG software @@ -914,4 +913,4 @@ FIFF.FIFFB_MNE_ANNOTATIONS = 3810 # annotations block # MNE Metadata Dataframes -FIFF.FIFFB_MNE_METADATA = 3811 # metadata dataframes block \ No newline at end of file +FIFF.FIFFB_MNE_METADATA = 3811 # metadata dataframes block diff --git a/mne/io/meas_info.py b/mne/io/meas_info.py index e17f482616d..8043c75db2c 100644 --- a/mne/io/meas_info.py +++ b/mne/io/meas_info.py @@ -53,8 +53,6 @@ ecog=(FIFF.FIFFV_ECOG_CH, FIFF.FIFFV_COIL_EEG, FIFF.FIFF_UNIT_V), fnirs_raw=(FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_COIL_FNIRS_RAW, FIFF.FIFF_UNIT_V), - fnirs_ac=(FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_COIL_FNIRS_AC, - FIFF.FIFF_UNIT_V), fnirs_ph=(FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_COIL_FNIRS_PH, FIFF.FIFF_UNIT_V), fnirs_od=(FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_COIL_FNIRS_OD, diff --git a/mne/io/pick.py b/mne/io/pick.py index 8ea6ffdbd8b..89850b92724 100644 --- a/mne/io/pick.py +++ b/mne/io/pick.py @@ -53,6 +53,8 @@ def get_channel_type_constants(): coil_type=FIFF.FIFFV_COIL_FNIRS_RAW), fnirs_od=dict(kind=FIFF.FIFFV_FNIRS_CH, coil_type=FIFF.FIFFV_COIL_FNIRS_OD), + fnirs_ph=dict(kind=FIFF.FIFFV_FNIRS_CH, + coil_type=FIFF.FIFFV_COIL_FNIRS_PH), hbo=dict(kind=FIFF.FIFFV_FNIRS_CH, coil_type=FIFF.FIFFV_COIL_FNIRS_HBO), hbr=dict(kind=FIFF.FIFFV_FNIRS_CH, @@ -99,7 +101,6 @@ def get_channel_type_constants(): FIFF.FIFFV_COIL_FNIRS_HBR: 'hbr', FIFF.FIFFV_COIL_FNIRS_RAW: 'fnirs_raw', FIFF.FIFFV_COIL_FNIRS_OD: 'fnirs_od', - FIFF.FIFFV_COIL_FNIRS_AC: 'fnirs_ac', FIFF.FIFFV_COIL_FNIRS_PH: 'fnirs_ph', }), 'eeg': ('coil_type', {FIFF.FIFFV_COIL_EEG: 'eeg', @@ -273,8 +274,6 @@ def _triage_fnirs_pick(ch, fnirs): return True elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_OD and fnirs == 'fnirs_od': return True - elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_AC and fnirs == 'fnirs_ac': - return True elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_PH and fnirs == 'fnirs_ph': return True return False @@ -407,8 +406,7 @@ def pick_types(info, meg=True, eeg=False, stim=False, eog=False, ecg=False, for key in ('grad', 'mag'): param_dict[key] = meg if isinstance(fnirs, bool): - for key in ('hbo', 'hbr', 'fnirs_raw', - 'fnirs_od', 'fnirs_ac', 'fnirs_ph'): + for key in ('hbo', 'hbr', 'fnirs_raw', 'fnirs_od', 'fnirs_ph'): param_dict[key] = fnirs for k in range(nchan): ch_type = channel_type(info, k) @@ -416,7 +414,7 @@ def pick_types(info, meg=True, eeg=False, stim=False, eog=False, ecg=False, pick[k] = param_dict[ch_type] except KeyError: # not so simple assert ch_type in ('grad', 'mag', 'hbo', 'hbr', 'ref_meg', - 'fnirs_raw', 'fnirs_od', 'fnirs_ac', 'fnirs_ph') + 'fnirs_raw', 'fnirs_od', 'fnirs_ph') if ch_type in ('grad', 'mag'): pick[k] = _triage_meg_pick(info['chs'][k], meg) elif ch_type == 'ref_meg': @@ -436,7 +434,6 @@ def pick_types(info, meg=True, eeg=False, stim=False, eog=False, ecg=False, myinclude = [info['ch_names'][k] for k in range(nchan) if pick[k]] myinclude += include - if len(myinclude) == 0: sel = np.array([], int) else: @@ -707,7 +704,7 @@ def channel_indices_by_type(info, picks=None): idx_by_type = {key: list() for key in _PICK_TYPES_KEYS if key not in ('meg', 'fnirs')} idx_by_type.update(mag=list(), grad=list(), hbo=list(), hbr=list(), - fnirs_raw=list(), fnirs_od=list()) + fnirs_raw=list(), fnirs_od=list(), fnirs_ph=list()) picks = _picks_to_idx(info, picks, none='all', exclude=(), allow_empty=True) for k in picks: @@ -796,7 +793,7 @@ def _contains_ch_type(info, ch_type): _validate_type(ch_type, 'str', "ch_type") meg_extras = ['mag', 'grad', 'planar1', 'planar2'] - fnirs_extras = ['hbo', 'hbr', 'fnirs_raw', 'fnirs_od'] + fnirs_extras = ['hbo', 'hbr', 'fnirs_raw', 'fnirs_od', 'fnirs_ph'] valid_channel_types = sorted([key for key in _PICK_TYPES_KEYS if key != 'meg'] + meg_extras + fnirs_extras) _check_option('ch_type', ch_type, valid_channel_types) @@ -901,20 +898,20 @@ def _check_excludes_includes(chs, info=None, allow_bads=False): seeg=True, dipole=False, gof=False, bio=False, ecog=True, fnirs=True) _PICK_TYPES_KEYS = tuple(list(_PICK_TYPES_DATA_DICT) + ['ref_meg']) _DATA_CH_TYPES_SPLIT = ('mag', 'grad', 'eeg', 'csd', 'seeg', 'ecog', - 'hbo', 'hbr', 'fnirs_raw', 'fnirs_od') + 'hbo', 'hbr', 'fnirs_raw', 'fnirs_od', 'fnirs_ph') _DATA_CH_TYPES_ORDER_DEFAULT = ('mag', 'grad', 'eeg', 'csd', 'eog', 'ecg', 'emg', 'ref_meg', 'misc', 'stim', 'resp', 'chpi', 'exci', 'ias', 'syst', 'seeg', 'bio', 'ecog', 'hbo', 'hbr', 'fnirs_raw', 'fnirs_od', - 'whitened') + 'whitened', 'fnirs_ph') # Valid data types, ordered for consistency, used in viz/evoked. _VALID_CHANNEL_TYPES = ('eeg', 'grad', 'mag', 'seeg', 'eog', 'ecg', 'emg', 'dipole', 'gof', 'bio', 'ecog', 'hbo', 'hbr', - 'fnirs_raw', 'fnirs_od', 'misc', 'csd') + 'fnirs_raw', 'fnirs_od', 'misc', 'csd', 'fnirs_ph') _MEG_CH_TYPES_SPLIT = ('mag', 'grad', 'planar1', 'planar2') -_FNIRS_CH_TYPES_SPLIT = ('hbo', 'hbr', 'fnirs_raw', 'fnirs_od') +_FNIRS_CH_TYPES_SPLIT = ('hbo', 'hbr', 'fnirs_raw', 'fnirs_od', 'fnirs_ph') def _pick_data_channels(info, exclude='bads', with_ref_meg=True): @@ -944,8 +941,6 @@ def _pick_data_or_ica(info, exclude=()): def _picks_to_idx(info, picks, none='data', exclude='bads', allow_empty=False, with_ref_meg=True, return_kind=False): """Convert and check pick validity.""" - # import pdb - # pdb.set_trace() from .meas_info import Info picked_ch_type_or_generic = False # diff --git a/mne/preprocessing/nirs/_optical_density.py b/mne/preprocessing/nirs/_optical_density.py index 7a809d08420..0c58a987508 100644 --- a/mne/preprocessing/nirs/_optical_density.py +++ b/mne/preprocessing/nirs/_optical_density.py @@ -25,11 +25,12 @@ def optical_density(raw): raw : instance of Raw The modified raw instance. """ - # import pdb - # pdb.set_trace() raw = raw.copy().load_data() _validate_type(raw, BaseRaw, 'raw') - picks = _picks_to_idx(raw.info, 'fnirs_raw') + try: + picks = _picks_to_idx(raw.info, 'fnirs_raw') + except: + picks = _picks_to_idx(raw.info, 'fnirs_ph') data_means = np.mean(raw.get_data(), axis=1) # The devices measure light intensity. Negative light intensities should diff --git a/mne/viz/epochs.py b/mne/viz/epochs.py index 781554bf8e8..1651bc126ac 100644 --- a/mne/viz/epochs.py +++ b/mne/viz/epochs.py @@ -1392,7 +1392,6 @@ def _plot_update_epochs_proj(params, bools=None): else: # this is faster than epochs.get_data()[start:end] when not preloaded data = np.concatenate(epochs[start:end].get_data(), axis=1) - if params['projector'] is not None: data = np.dot(params['projector'], data) types = params['types'] diff --git a/mne/viz/raw.py b/mne/viz/raw.py index 4de90acc1c9..2a54134d206 100644 --- a/mne/viz/raw.py +++ b/mne/viz/raw.py @@ -350,7 +350,7 @@ def plot_raw(raw, events=None, duration=10.0, start=0.0, n_channels=20, for t in ['grad', 'mag']: inds += [pick_types(info, meg=t, ref_meg=False, exclude=[])] types += [t] * len(inds[-1]) - for t in ['hbo', 'hbr', 'fnirs_raw', 'fnirs_od', 'fnirs_ac', 'fnirs_ph']: + for t in ['hbo', 'hbr', 'fnirs_raw', 'fnirs_od', 'fnirs_ph']: inds += [pick_types(info, meg=False, ref_meg=False, fnirs=t, exclude=[])] types += [t] * len(inds[-1]) diff --git a/mne/viz/topo.py b/mne/viz/topo.py index f040aa1cea5..1a57a19657e 100644 --- a/mne/viz/topo.py +++ b/mne/viz/topo.py @@ -714,7 +714,7 @@ def _plot_evoked_topo(evoked, layout=None, layout_scale=0.945, color=None, # one check for all vendors meg_types = {'mag', 'grad'} is_meg = len(set.intersection(types_used, meg_types)) > 0 - nirs_types = {'hbo', 'hbr', 'fnirs_raw', 'fnirs_od'} + nirs_types = {'hbo', 'hbr', 'fnirs_raw', 'fnirs_od', 'fnirs_ph'} is_nirs = len(set.intersection(types_used, nirs_types)) > 0 if is_meg: types_used = list(types_used)[::-1] # -> restore kwarg order diff --git a/mne/viz/topomap.py b/mne/viz/topomap.py index eef8804e9b8..d2c0b0cc794 100644 --- a/mne/viz/topomap.py +++ b/mne/viz/topomap.py @@ -37,7 +37,7 @@ from ..io.proj import Projection -_fnirs_types = ('hbo', 'hbr', 'fnirs_raw', 'fnirs_od') +_fnirs_types = ('hbo', 'hbr', 'fnirs_raw', 'fnirs_od', 'fnirs_ph') def _adjust_meg_sphere(sphere, info, ch_type): diff --git a/mne/viz/utils.py b/mne/viz/utils.py index b03b64d9ab2..378aa8e92dd 100644 --- a/mne/viz/utils.py +++ b/mne/viz/utils.py @@ -3065,7 +3065,7 @@ def _set_psd_plot_params(info, proj, picks, ax, area_mode): kwargs = dict(meg=False, ref_meg=False, exclude=[]) if name in ('mag', 'grad'): kwargs['meg'] = name - elif name in ('fnirs_raw', 'fnirs_od', 'hbo', 'hbr'): + elif name in ('fnirs_raw', 'fnirs_od', 'fnirs_ph', 'hbo', 'hbr'): kwargs['fnirs'] = name else: kwargs[name] = True @@ -3234,7 +3234,7 @@ def _plot_psd(inst, fig, freqs, psd_list, picks_list, titles_list, valid_channel_types = [ 'mag', 'grad', 'eeg', 'csd', 'seeg', 'eog', 'ecg', 'emg', 'dipole', 'gof', 'bio', 'ecog', 'hbo', - 'hbr', 'misc', 'fnirs_raw', 'fnirs_od'] + 'hbr', 'misc', 'fnirs_raw', 'fnirs_od', 'fnirs_ph'] ch_types_used = list() for this_type in valid_channel_types: if this_type in types: diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index b01e4551629..7901fb3f891 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -31,8 +31,6 @@ ### plot the raw data ### raw_intensity.plot(n_channels=10, clipping='clamp') -# raw_intensity.plot(n_channels=10, scalings={'fnirs_raw':180}, clipping='clamp') - ###separate data based on montages### mtg_a_indices = [i_index for i_index,i_label in enumerate(raw_intensity.info['ch_names']) @@ -121,13 +119,9 @@ mtg_a_intensity.pick(picks_a[dists_a < 0.08]) mtg_b_intensity.pick(picks_b[dists_b < 0.08]) -scalings = dict(fnirs_raw=1e2) -raw_intensity.plot(n_channels=5, - duration=20, scalings=100, show_scrollbars=True) -mtg_a_intensity.plot(n_channels=5, - duration=20, scalings=100, show_scrollbars=True) -mtg_b_intensity.plot(n_channels=5, - duration=20, scalings=100, show_scrollbars=True) +raw_intensity.plot(n_channels=5, duration=20, show_scrollbars=True) +mtg_a_intensity.plot(n_channels=5, duration=20, show_scrollbars=True) +mtg_b_intensity.plot(n_channels=5, duration=20, show_scrollbars=True) # ############################################################################### @@ -141,16 +135,16 @@ # maybe add a try statement for the other types? # not sure if we want to change the default function -raw_od = mne.preprocessing.nirs.optical_density(raw_intensity) -raw_od_a = mne.preprocessing.nirs.optical_density(mtg_a_intensity) -raw_od_b = mne.preprocessing.nirs.optical_density(mtg_b_intensity) +# raw_od = mne.preprocessing.nirs.optical_density(raw_intensity) +# raw_od_a = mne.preprocessing.nirs.optical_density(mtg_a_intensity) +# raw_od_b = mne.preprocessing.nirs.optical_density(mtg_b_intensity) -raw_od.plot(n_channels=len(raw_od.ch_names), - duration=500, show_scrollbars=False) -raw_od_a.plot(n_channels=len(raw_od_a.ch_names), - duration=500, show_scrollbars=False) -raw_od_b.plot(n_channels=len(raw_od_b.ch_names), - duration=500, show_scrollbars=False) +# raw_od.plot(n_channels=len(raw_od.ch_names), +# duration=500, show_scrollbars=False) +# raw_od_a.plot(n_channels=len(raw_od_a.ch_names), +# duration=500, show_scrollbars=False) +# raw_od_b.plot(n_channels=len(raw_od_b.ch_names), +# duration=500, show_scrollbars=False) # ############################################################################### @@ -166,30 +160,30 @@ # # channels, so we will not mark any channels as bad based on the scalp # # coupling index. -sci = mne.preprocessing.nirs.scalp_coupling_index(raw_od) -sci_a = mne.preprocessing.nirs.scalp_coupling_index(raw_od_a) -sci_b = mne.preprocessing.nirs.scalp_coupling_index(raw_od_b) +# sci = mne.preprocessing.nirs.scalp_coupling_index(raw_od) +# sci_a = mne.preprocessing.nirs.scalp_coupling_index(raw_od_a) +# sci_b = mne.preprocessing.nirs.scalp_coupling_index(raw_od_b) -fig, ax = plt.subplots() -ax.hist(sci) -ax.set(xlabel='Scalp Coupling Index', ylabel='Count', xlim=[0, 1]) +# fig, ax = plt.subplots() +# ax.hist(sci) +# ax.set(xlabel='Scalp Coupling Index', ylabel='Count', xlim=[0, 1]) -fig, ax = plt.subplots() -ax.hist(sci_a) -ax.set(xlabel='Scalp Coupling Index-A', ylabel='Count', xlim=[0, 1]) +# fig, ax = plt.subplots() +# ax.hist(sci_a) +# ax.set(xlabel='Scalp Coupling Index-A', ylabel='Count', xlim=[0, 1]) -fig, ax = plt.subplots() -ax.hist(sci_b) -ax.set(xlabel='Scalp Coupling Index-B', ylabel='Count', xlim=[0, 1]) +# fig, ax = plt.subplots() +# ax.hist(sci_b) +# ax.set(xlabel='Scalp Coupling Index-B', ylabel='Count', xlim=[0, 1]) # ############################################################################### # # In this example we will mark all channels with a SCI less than 0.5 as bad # # (this dataset is quite clean, so no channels are marked as bad). -raw_od.info['bads'] = list(compress(raw_od.ch_names, sci < 0.5)) -raw_od_a.info['bads'] = list(compress(raw_od_a.ch_names, sci_a < 0.5)) -raw_od_b.info['bads'] = list(compress(raw_od_b.ch_names, sci_b < 0.5)) +# raw_od.info['bads'] = list(compress(raw_od.ch_names, sci < 0.5)) +# raw_od_a.info['bads'] = list(compress(raw_od_a.ch_names, sci_a < 0.5)) +# raw_od_b.info['bads'] = list(compress(raw_od_b.ch_names, sci_b < 0.5)) # ############################################################################### # # At this stage it is appropriate to inspect your data @@ -207,18 +201,18 @@ # # Next we convert the optical density data to haemoglobin concentration using # # the modified Beer-Lambert law. -raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od) -raw_haemo_a = mne.preprocessing.nirs.beer_lambert_law(raw_od_a) -raw_haemo_b = mne.preprocessing.nirs.beer_lambert_law(raw_od_b) +# raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od) +# raw_haemo_a = mne.preprocessing.nirs.beer_lambert_law(raw_od_a) +# raw_haemo_b = mne.preprocessing.nirs.beer_lambert_law(raw_od_b) -raw_haemo.plot(n_channels=len(raw_haemo.ch_names), - duration=500, show_scrollbars=False) +# raw_haemo.plot(n_channels=len(raw_haemo.ch_names), +# duration=500, show_scrollbars=False) -raw_haemo_a.plot(n_channels=len(raw_haemo_a.ch_names), - duration=500, show_scrollbars=False) +# raw_haemo_a.plot(n_channels=len(raw_haemo_a.ch_names), +# duration=500, show_scrollbars=False) -raw_haemo_b.plot(n_channels=len(raw_haemo_b.ch_names), - duration=500, show_scrollbars=False) +# raw_haemo_b.plot(n_channels=len(raw_haemo_b.ch_names), +# duration=500, show_scrollbars=False) # ############################################################################### @@ -231,34 +225,34 @@ # # remove this. A high pass filter is also included to remove slow drifts # # in the data. -fig = raw_haemo.plot_psd(average=True) -fig.suptitle('Before filtering', weight='bold', size='x-large') -fig.subplots_adjust(top=0.88) -raw_haemo = raw_haemo.filter(0.05, 0.7, h_trans_bandwidth=0.2, - l_trans_bandwidth=0.02) -fig = raw_haemo.plot_psd(average=True) -fig.suptitle('After filtering', weight='bold', size='x-large') -fig.subplots_adjust(top=0.88) - - -fig = raw_haemo_a.plot_psd(average=True) -fig.suptitle('Before filtering Montage A', weight='bold', size='x-large') -fig.subplots_adjust(top=0.88) -raw_haemo_a = raw_haemo_a.filter(0.05, 0.7, h_trans_bandwidth=0.2, - l_trans_bandwidth=0.02) -fig = raw_haemo_a.plot_psd(average=True) -fig.suptitle('After filtering Montage A', weight='bold', size='x-large') -fig.subplots_adjust(top=0.88) - - -fig = raw_haemo_b.plot_psd(average=True) -fig.suptitle('Before filtering Montage B', weight='bold', size='x-large') -fig.subplots_adjust(top=0.88) -raw_haemo_b = raw_haemo_b.filter(0.05, 0.7, h_trans_bandwidth=0.2, - l_trans_bandwidth=0.02) -fig = raw_haemo_b.plot_psd(average=True) -fig.suptitle('After filtering Montage B', weight='bold', size='x-large') -fig.subplots_adjust(top=0.88) +# fig = raw_haemo.plot_psd(average=True) +# fig.suptitle('Before filtering', weight='bold', size='x-large') +# fig.subplots_adjust(top=0.88) +# raw_haemo = raw_haemo.filter(0.05, 0.7, h_trans_bandwidth=0.2, +# l_trans_bandwidth=0.02) +# fig = raw_haemo.plot_psd(average=True) +# fig.suptitle('After filtering', weight='bold', size='x-large') +# fig.subplots_adjust(top=0.88) + + +# fig = raw_haemo_a.plot_psd(average=True) +# fig.suptitle('Before filtering Montage A', weight='bold', size='x-large') +# fig.subplots_adjust(top=0.88) +# raw_haemo_a = raw_haemo_a.filter(0.05, 0.7, h_trans_bandwidth=0.2, +# l_trans_bandwidth=0.02) +# fig = raw_haemo_a.plot_psd(average=True) +# fig.suptitle('After filtering Montage A', weight='bold', size='x-large') +# fig.subplots_adjust(top=0.88) + + +# fig = raw_haemo_b.plot_psd(average=True) +# fig.suptitle('Before filtering Montage B', weight='bold', size='x-large') +# fig.subplots_adjust(top=0.88) +# raw_haemo_b = raw_haemo_b.filter(0.05, 0.7, h_trans_bandwidth=0.2, +# l_trans_bandwidth=0.02) +# fig = raw_haemo_b.plot_psd(average=True) +# fig.suptitle('After filtering Montage B', weight='bold', size='x-large') +# fig.subplots_adjust(top=0.88) # ############################################################################### # # Extract epochs @@ -296,20 +290,20 @@ reject_criteria = None tmin, tmax = -0.2, 1 -mtg_a_haemo_epochs = mne.Epochs(raw_haemo_a, mtg_a_events, - tmin=tmin, tmax=tmax, - reject=reject_criteria, reject_by_annotation=False, - proj=True, baseline=(None, 0), preload=True, - detrend=None, verbose=True) -mtg_a_haemo_epochs.plot_drop_log() +# mtg_a_haemo_epochs = mne.Epochs(raw_haemo_a, mtg_a_events, +# tmin=tmin, tmax=tmax, +# reject=reject_criteria, reject_by_annotation=False, +# proj=True, baseline=(None, 0), preload=True, +# detrend=None, verbose=True) +# mtg_a_haemo_epochs.plot_drop_log() -mtg_b_haemo_epochs = mne.Epochs(raw_haemo_b, mtg_b_events, - tmin=tmin, tmax=tmax, - reject=reject_criteria, reject_by_annotation=False, - proj=True, baseline=(None, 0), preload=True, - detrend=None, verbose=True) -mtg_b_haemo_epochs.plot_drop_log() +# mtg_b_haemo_epochs = mne.Epochs(raw_haemo_b, mtg_b_events, +# tmin=tmin, tmax=tmax, +# reject=reject_criteria, reject_by_annotation=False, +# proj=True, baseline=(None, 0), preload=True, +# detrend=None, verbose=True) +# mtg_b_haemo_epochs.plot_drop_log() #get epochs from the raw intensities @@ -345,22 +339,22 @@ # # trials, and the consistent dip in HbR that is slightly delayed relative to # # the HbO peak. -#haemo plots -mtg_a_haemo_epochs['1'].plot_image(combine='mean', vmin=-30, vmax=30, - ts_args=dict(ylim=dict(hbo=[-15, 15], - hbr=[-15, 15]))) +# #haemo plots +# mtg_a_haemo_epochs['1'].plot_image(combine='mean', vmin=-30, vmax=30, +# ts_args=dict(ylim=dict(hbo=[-15, 15], +# hbr=[-15, 15]))) -mtg_a_haemo_epochs['2'].plot_image(combine='mean', vmin=-30, vmax=30, - ts_args=dict(ylim=dict(hbo=[-15, 15], - hbr=[-15, 15]))) +# mtg_a_haemo_epochs['2'].plot_image(combine='mean', vmin=-30, vmax=30, +# ts_args=dict(ylim=dict(hbo=[-15, 15], +# hbr=[-15, 15]))) -mtg_b_haemo_epochs['1'].plot_image(combine='mean', vmin=-30, vmax=30, - ts_args=dict(ylim=dict(hbo=[-15, 15], - hbr=[-15, 15]))) +# mtg_b_haemo_epochs['1'].plot_image(combine='mean', vmin=-30, vmax=30, +# ts_args=dict(ylim=dict(hbo=[-15, 15], +# hbr=[-15, 15]))) -mtg_b_haemo_epochs['2'].plot_image(combine='mean', vmin=-30, vmax=30, - ts_args=dict(ylim=dict(hbo=[-15, 15], - hbr=[-15, 15]))) +# mtg_b_haemo_epochs['2'].plot_image(combine='mean', vmin=-30, vmax=30, +# ts_args=dict(ylim=dict(hbo=[-15, 15], +# hbr=[-15, 15]))) #raw epochs #separate first and last detectors From 2c7f7287d67258afc681ef4e74d51c7b711dce4c Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Thu, 28 May 2020 18:10:39 -0700 Subject: [PATCH 106/167] changed ph default scale --- mne/defaults.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mne/defaults.py b/mne/defaults.py index 3ab6736961d..f09d576cc7d 100644 --- a/mne/defaults.py +++ b/mne/defaults.py @@ -27,7 +27,7 @@ stim=1, resp=1, chpi=1e-4, exci=1, ias=1, syst=1, seeg=1e-4, bio=1e-6, ecog=1e-4, hbo=10e-6, hbr=10e-6, whitened=10., fnirs_raw=2e-2, - fnirs_od=2e-2, fnirs_ph=180, csd=20e-4), + fnirs_od=2e-2, fnirs_ph=5e3, csd=20e-4), scalings_cov_rank=dict(mag=1e12, grad=1e11, eeg=1e5, # ~100x scalings seeg=1e1, ecog=1e4, hbo=1e4, hbr=1e4), ylim=dict(mag=(-600., 600.), grad=(-200., 200.), eeg=(-200., 200.), From 32f7cdcffec427b2da3e76956b51bdebf3bebf09 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Fri, 29 May 2020 09:57:25 -0600 Subject: [PATCH 107/167] removed some commented code --- mne/io/boxy/boxy.py | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 78f52572580..9d6e84dd83f 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -565,27 +565,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): #so we can send values between 1-255 #we'll multiply our block start/end markers by 1000 to ensure #we aren't within the 1-255 range - # import pdb - # pdb.set_trace() - # if i_mtg == 0: - # block_markers[i_blk][100:200] = 1 - # block_markers[i_blk][400:600] = 1 - # block_markers[i_blk][700:900] = 1 - # block_markers[i_blk][1000:1200] = 1 - # block_markers[i_blk][1300:1500] = 1 - # elif i_mtg == 1: - # block_markers[i_blk][100:200] = 2 - # block_markers[i_blk][400:600] = 2 - # block_markers[i_blk][700:900] = 2 - # block_markers[i_blk][1000:1200] = 2 - # block_markers[i_blk][1300:1500] = 2 block_markers[i_blk][-1] = int(blk_name) * 1000 - - # # indicate which montage our markers belong to - # block_markers[i_blk] = ([(i_mrk+((i_mtg+1)*10000)) - # if i_mrk > 0 - # else 0 for i_mrk - # in block_markers[i_blk]]) all_blocks.append(data_) From 035dd059ed2e5d5ca6f3537f47d992d17a2ce010 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Fri, 29 May 2020 12:13:31 -0600 Subject: [PATCH 108/167] added sections to plot both AC and Ph data --- .../preprocessing/plot_80_boxy_processing.py | 333 ++++++++++++++---- 1 file changed, 263 insertions(+), 70 deletions(-) diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 588508f7514..55f2f247cf9 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -24,15 +24,16 @@ import mne - +# load AC and Phase data boxy_data_folder = mne.datasets.boxy_example.data_path() boxy_raw_dir = os.path.join(boxy_data_folder, 'Participant-1') -raw_intensity = mne.io.read_raw_boxy(boxy_raw_dir, 'AC', verbose=True).load_data() +raw_intensity_ac = mne.io.read_raw_boxy(boxy_raw_dir, 'AC', verbose=True).load_data() +raw_intensity_ph = mne.io.read_raw_boxy(boxy_raw_dir, 'Ph', verbose=True).load_data() # get channel indices for our two montages -mtg_a = [raw_intensity.ch_names[i_index] for i_index,i_label in enumerate(raw_intensity.info['ch_names']) +mtg_a = [raw_intensity_ac.ch_names[i_index] for i_index,i_label in enumerate(raw_intensity_ac.info['ch_names']) if re.search(r'S[1-5]_', i_label)] -mtg_b = [raw_intensity.ch_names[i_index] for i_index,i_label in enumerate(raw_intensity.info['ch_names']) +mtg_b = [raw_intensity_ac.ch_names[i_index] for i_index,i_label in enumerate(raw_intensity_ac.info['ch_names']) if re.search(r'S([6-9]|10)_', i_label)] # ############################################################################### @@ -48,7 +49,7 @@ # plot all montages fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') -fig = mne.viz.plot_alignment(raw_intensity.info, +fig = mne.viz.plot_alignment(raw_intensity_ac.info, show_axes=True, subject='fsaverage', trans='fsaverage', @@ -62,7 +63,7 @@ # montage A fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') -fig = mne.viz.plot_alignment(raw_intensity.copy().pick_channels(mtg_a).info, +fig = mne.viz.plot_alignment(raw_intensity_ac.copy().pick_channels(mtg_a).info, show_axes=True, subject='fsaverage', trans='fsaverage', @@ -76,7 +77,7 @@ # montage B fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') -fig = mne.viz.plot_alignment(raw_intensity.copy().pick_channels(mtg_b).info, +fig = mne.viz.plot_alignment(raw_intensity_ac.copy().pick_channels(mtg_b).info, show_axes=True, subject='fsaverage', trans='fsaverage', @@ -97,24 +98,31 @@ # # These short channels can be seen in the figure above. # # To achieve this we pick all the channels that are not considered to be short. -picks = mne.pick_types(raw_intensity.info, meg=False, fnirs=True, stim=True) +picks = mne.pick_types(raw_intensity_ac.info, meg=False, fnirs=True, stim=True) dists = mne.preprocessing.nirs.source_detector_distances( - raw_intensity.info, picks=picks) + raw_intensity_ac.info, picks=picks) -raw_intensity.pick(picks[dists < 0.08]) +raw_intensity_ac.pick(picks[dists < 0.08]) +# AC scalings = dict(fnirs_raw=1e2) -raw_intensity.plot(n_channels=5, - duration=20, scalings=100, show_scrollbars=True) +raw_intensity_ac.plot(n_channels=5, + duration=20, scalings=scalings, show_scrollbars=True) + +# Phase +scalings = dict(fnirs_ph=1e4) +raw_intensity_ph.plot(n_channels=5, + duration=20, scalings=scalings, show_scrollbars=True) # ############################################################################### # # Converting from raw intensity to optical density # # ------------------------------------------------ # # # # The raw intensity values are then converted to optical density. +# # We will only do this for either DC or AC data, since they are intensity data -raw_od = mne.preprocessing.nirs.optical_density(raw_intensity) +raw_od = mne.preprocessing.nirs.optical_density(raw_intensity_ac) raw_od.plot(n_channels=len(raw_od.ch_names), duration=500, show_scrollbars=False) @@ -199,7 +207,7 @@ # we are going to find events for each montage separately and combine them later # Montage A Events -mtg_a_events = mne.find_events(raw_intensity, stim_channel=['Markers a']) +mtg_a_events = mne.find_events(raw_intensity_ac, stim_channel=['Markers a']) mtg_a_event_dict = {'Montage_A/Event_1': 1, 'Montage_A/Event_2': 2, 'Montage A/Block 1 End': 1000, 'Montage A/Block 2 End': 2000} @@ -207,12 +215,12 @@ fig = mne.viz.plot_events(mtg_a_events) fig.subplots_adjust(right=0.7) # make room for the legend -raw_intensity.copy().pick_channels(mtg_a).plot( +raw_intensity_ac.copy().pick_channels(mtg_a).plot( events=mtg_a_events, start=0, duration=10,color='gray', event_color={1: 'r', 2: 'b', 1000: 'k', 2000: 'k'}) # Montage B Events -mtg_b_events = mne.find_events(raw_intensity, stim_channel=['Markers b']) +mtg_b_events = mne.find_events(raw_intensity_ac, stim_channel=['Markers b']) mtg_b_event_dict = {'Montage_B/Event_1': 1, 'Montage_B/Event_2': 2, 'Montage B/Block 1 End': 1000, 'Montage B/Block 2 End': 2000} @@ -220,7 +228,7 @@ fig = mne.viz.plot_events(mtg_b_events) fig.subplots_adjust(right=0.7) # make room for the legend -raw_intensity.copy().pick_channels(mtg_b).plot( +raw_intensity_ac.copy().pick_channels(mtg_b).plot( events=mtg_b_events, start=0, duration=10,color='gray', event_color={1: 'r', 2: 'b', 1000: 'k', 2000: 'k'}) @@ -237,6 +245,7 @@ mtg_a = [i_index for i_index,i_label in enumerate(raw_haemo.info['ch_names']) if re.search(r'S[1-5]_', i_label)] +# haemo epochs mtg_a_haemo_epochs = mne.Epochs(raw_haemo, mtg_a_events, event_id = mtg_a_event_dict, tmin=tmin, tmax=tmax, @@ -245,8 +254,15 @@ detrend=None, verbose=True, event_repeated='drop') mtg_a_haemo_epochs.plot_drop_log() -#get epochs from the raw intensities -mtg_a_epochs = mne.Epochs(raw_intensity, +#get epochs from the raw AC and Phase +mtg_a_epochs_ac = mne.Epochs(raw_intensity_ac, + mtg_a_events, event_id=mtg_a_event_dict, + tmin=tmin, tmax=tmax, + reject=None, reject_by_annotation=False, + proj=False, baseline=(-0.2, 0), preload=True, + detrend=None, verbose=True) + +mtg_a_epochs_ph = mne.Epochs(raw_intensity_ph, mtg_a_events, event_id=mtg_a_event_dict, tmin=tmin, tmax=tmax, reject=None, reject_by_annotation=False, @@ -254,14 +270,23 @@ detrend=None, verbose=True) #two ways to plot epochs, should be the same + +# haemo epochs fig = mne.viz.plot_epochs(mtg_a_haemo_epochs,n_epochs=5,n_channels=5, scalings='auto', picks = mtg_a) fig = mtg_a_haemo_epochs.plot(n_epochs=5,n_channels=5, scalings='auto', picks = mtg_a) -fig = mne.viz.plot_epochs(mtg_a_epochs,n_epochs=5,n_channels=5, +# ac epochs +fig = mne.viz.plot_epochs(mtg_a_epochs_ac,n_epochs=5,n_channels=5, scalings='auto', picks = mtg_a) -fig = mtg_a_epochs.plot(n_epochs=5,n_channels=5, scalings='auto', +fig = mtg_a_epochs_ac.plot(n_epochs=5,n_channels=5, scalings='auto', + picks = mtg_a) + +# ph epochs +fig = mne.viz.plot_epochs(mtg_a_epochs_ph,n_epochs=5,n_channels=5, + scalings='auto', picks = mtg_a) +fig = mtg_a_epochs_ph.plot(n_epochs=5,n_channels=5, scalings='auto', picks = mtg_a) @@ -269,6 +294,7 @@ mtg_b = [i_index for i_index,i_label in enumerate(raw_haemo.info['ch_names']) if re.search(r'S([6-9]|10)_', i_label)] +# haemo epochs mtg_b_haemo_epochs = mne.Epochs(raw_haemo, mtg_b_events, event_id = mtg_b_event_dict, tmin=tmin, tmax=tmax, @@ -277,8 +303,15 @@ detrend=None, verbose=True, event_repeated='drop') mtg_b_haemo_epochs.plot_drop_log() -#get epochs from the raw intensities -mtg_b_epochs = mne.Epochs(raw_intensity, +#get epochs from the raw AC and Phase +mtg_b_epochs_ac = mne.Epochs(raw_intensity_ac, + mtg_b_events, event_id=mtg_b_event_dict, + tmin=tmin, tmax=tmax, + reject=None, reject_by_annotation=False, + proj=False, baseline=(-0.2, 0), preload=True, + detrend=None, verbose=True) + +mtg_b_epochs_ph = mne.Epochs(raw_intensity_ph, mtg_b_events, event_id=mtg_b_event_dict, tmin=tmin, tmax=tmax, reject=None, reject_by_annotation=False, @@ -286,14 +319,22 @@ detrend=None, verbose=True) #two ways to plot epochs, should be the same +# haemo epochs fig = mne.viz.plot_epochs(mtg_b_haemo_epochs,n_epochs=5,n_channels=5, scalings='auto', picks = mtg_b) fig = mtg_b_haemo_epochs.plot(n_epochs=5,n_channels=5, scalings='auto', picks = mtg_b) -fig = mne.viz.plot_epochs(mtg_b_epochs,n_epochs=5,n_channels=5, +# ac epochs +fig = mne.viz.plot_epochs(mtg_b_epochs_ac,n_epochs=5,n_channels=5, + scalings='auto', picks = mtg_b) +fig = mtg_b_epochs_ac.plot(n_epochs=5,n_channels=5, scalings='auto', + picks = mtg_b) + +# ph epochs +fig = mne.viz.plot_epochs(mtg_b_epochs_ph,n_epochs=5,n_channels=5, scalings='auto', picks = mtg_b) -fig = mtg_b_epochs.plot(n_epochs=5,n_channels=5, scalings='auto', +fig = mtg_b_epochs_ph.plot(n_epochs=5,n_channels=5, scalings='auto', picks = mtg_b) # ############################################################################### @@ -328,17 +369,28 @@ ts_args=dict(ylim=dict(hbo=[-15, 15], hbr=[-15, 15]))) -# raw epochs -fig = mtg_a_epochs['Montage_A/Event_1'].plot_image( +# ac epochs +fig = mtg_a_epochs_ac['Montage_A/Event_1'].plot_image( combine='mean', vmin=-20, vmax=20, picks = mtg_a, colorbar=True, title='Montage A Event 1') -fig = mtg_a_epochs['Montage_A/Event_2'].plot_image( +fig = mtg_a_epochs_ac['Montage_A/Event_2'].plot_image( combine='mean', vmin=-20, vmax=20, picks = mtg_a, colorbar=True, title='Montage A Event 2') +# ph epochs +fig = mtg_a_epochs_ph['Montage_A/Event_1'].plot_image( + combine='mean', vmin=-180, vmax=180, + picks = mtg_a, colorbar=True, + title='Montage A Event 1') + +fig = mtg_a_epochs_ph['Montage_A/Event_2'].plot_image( + combine='mean', vmin=-180, vmax=180, + picks = mtg_a, colorbar=True, + title='Montage A Event 2') + # Montage B hbo = [i_index for i_index,i_label @@ -361,17 +413,28 @@ ts_args=dict(ylim=dict(hbo=[-15, 15], hbr=[-15, 15]))) -# raw epochs -fig = mtg_b_epochs['Montage_B/Event_1'].plot_image( +# ac epochs +fig = mtg_b_epochs_ac['Montage_B/Event_1'].plot_image( combine='mean', vmin=-20, vmax=20, picks = mtg_b, colorbar=True, title='Montage B Event 1') -fig = mtg_b_epochs['Montage_B/Event_2'].plot_image( +fig = mtg_b_epochs_ac['Montage_B/Event_2'].plot_image( combine='mean', vmin=-20, vmax=20, picks = mtg_b, colorbar=True, title='Montage B Event 2') +# ph epochs +fig = mtg_b_epochs_ph['Montage_B/Event_1'].plot_image( + combine='mean', vmin=-180, vmax=180, + picks = mtg_b, colorbar=True, + title='Montage B Event 1') + +fig = mtg_b_epochs_ph['Montage_B/Event_2'].plot_image( + combine='mean', vmin=-180, vmax=180, + picks = mtg_b, colorbar=True, + title='Montage B Event 2') + # ############################################################################### # # View consistency of responses across channels # # --------------------------------------------- @@ -380,50 +443,93 @@ # # pairs that we selected. All the channels in this data are located over the # # motor cortex, and all channels show a similar pattern in the data. -# individual montages +# ac evoked fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(15, 6)) clim=dict(fnirs_raw=[-20,20]) -mtg_a_1_evoked = mtg_a_epochs['Montage_A/Event_1'].average() -mtg_a_2_evoked = mtg_a_epochs['Montage_A/Event_2'].average() -mtg_b_1_evoked = mtg_b_epochs['Montage_B/Event_1'].average() -mtg_b_2_evoked = mtg_b_epochs['Montage_B/Event_2'].average() +mtg_a_1_evoked_ac = mtg_a_epochs_ac['Montage_A/Event_1'].average() +mtg_a_2_evoked_ac = mtg_a_epochs_ac['Montage_A/Event_2'].average() +mtg_b_1_evoked_ac = mtg_b_epochs_ac['Montage_B/Event_1'].average() +mtg_b_2_evoked_ac = mtg_b_epochs_ac['Montage_B/Event_2'].average() -mtg_a_1_evoked.plot_image(axes=axes[0, 0], picks = mtg_a, +mtg_a_1_evoked_ac.plot_image(axes=axes[0, 0], picks = mtg_a, titles='Montage A Event 1', clim=clim) -mtg_a_2_evoked.plot_image(axes=axes[1, 0], picks = mtg_a, +mtg_a_2_evoked_ac.plot_image(axes=axes[1, 0], picks = mtg_a, titles='Montage A Event 2', clim=clim) -mtg_b_1_evoked.plot_image(axes=axes[0, 1], picks = mtg_b, +mtg_b_1_evoked_ac.plot_image(axes=axes[0, 1], picks = mtg_b, titles='Montage B Event 1', clim=clim) -mtg_b_2_evoked.plot_image(axes=axes[1, 1], picks = mtg_b, +mtg_b_2_evoked_ac.plot_image(axes=axes[1, 1], picks = mtg_b, titles='Montage B Event 2', clim=clim) # Combine Montages -evoked_1 = mtg_a_epochs['Montage_A/Event_1'].average() -evoked_2 = mtg_a_epochs['Montage_A/Event_2'].average() -evoked_3 = mtg_b_epochs['Montage_B/Event_1'].average() -evoked_4 = mtg_b_epochs['Montage_B/Event_2'].average() +evoked_1_ac = mtg_a_epochs_ac['Montage_A/Event_1'].average() +evoked_2_ac = mtg_a_epochs_ac['Montage_A/Event_2'].average() +evoked_3_ac = mtg_b_epochs_ac['Montage_B/Event_1'].average() +evoked_4_ac = mtg_b_epochs_ac['Montage_B/Event_2'].average() -mtg_a_channels = [i_index for i_index,i_label in enumerate(evoked_1.info['ch_names']) +mtg_a_channels_ac = [i_index for i_index,i_label in enumerate(evoked_1_ac.info['ch_names']) if re.search(r'S[1-5]_', i_label)] -mtg_b_channels = [i_index for i_index,i_label in enumerate(evoked_3.info['ch_names']) +mtg_b_channels_ac = [i_index for i_index,i_label in enumerate(evoked_3_ac.info['ch_names']) if re.search(r'S([6-9]|10)_', i_label)] -evoked_1._data[mtg_b_channels,:] = 0 -evoked_2._data[mtg_b_channels,:] = 0 -evoked_3._data[mtg_a_channels,:] = 0 -evoked_4._data[mtg_a_channels,:] = 0 +evoked_1_ac._data[mtg_b_channels_ac,:] = 0 +evoked_2_ac._data[mtg_b_channels_ac,:] = 0 +evoked_3_ac._data[mtg_a_channels_ac,:] = 0 +evoked_4_ac._data[mtg_a_channels_ac,:] = 0 -evoked_event_1 = mne.combine_evoked([evoked_1,evoked_3],'equal') -evoked_event_2 = mne.combine_evoked([evoked_2,evoked_4],'equal') +evoked_event_1_ac = mne.combine_evoked([evoked_1_ac,evoked_3_ac],'equal') +evoked_event_2_ac = mne.combine_evoked([evoked_2_ac,evoked_4_ac],'equal') fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 6)) clim=dict(fnirs_raw=[-20,20]) -evoked_event_1.plot_image(axes=axes[0], titles='Event_1', clim=clim) -evoked_event_2.plot_image(axes=axes[1], titles='Event_2', clim=clim) +evoked_event_1_ac.plot_image(axes=axes[0], titles='Event_1', clim=clim) +evoked_event_2_ac.plot_image(axes=axes[1], titles='Event_2', clim=clim) + +# ph evoked +fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(15, 6)) +clim=dict(fnirs_ph=[-180,180]) + +mtg_a_1_evoked_ph = mtg_a_epochs_ph['Montage_A/Event_1'].average() +mtg_a_2_evoked_ph = mtg_a_epochs_ph['Montage_A/Event_2'].average() +mtg_b_1_evoked_ph = mtg_b_epochs_ph['Montage_B/Event_1'].average() +mtg_b_2_evoked_ph = mtg_b_epochs_ph['Montage_B/Event_2'].average() + +mtg_a_1_evoked_ph.plot_image(axes=axes[0, 0], picks = mtg_a, + titles='Montage A Event 1', clim=clim) +mtg_a_2_evoked_ph.plot_image(axes=axes[1, 0], picks = mtg_a, + titles='Montage A Event 2', clim=clim) +mtg_b_1_evoked_ph.plot_image(axes=axes[0, 1], picks = mtg_b, + titles='Montage B Event 1', clim=clim) +mtg_b_2_evoked_ph.plot_image(axes=axes[1, 1], picks = mtg_b, + titles='Montage B Event 2', clim=clim) + +# Combine Montages +evoked_1_ph = mtg_a_epochs_ph['Montage_A/Event_1'].average() +evoked_2_ph = mtg_a_epochs_ph['Montage_A/Event_2'].average() +evoked_3_ph = mtg_b_epochs_ph['Montage_B/Event_1'].average() +evoked_4_ph = mtg_b_epochs_ph['Montage_B/Event_2'].average() + +mtg_a_channels_ph = [i_index for i_index,i_label in enumerate(evoked_1_ph.info['ch_names']) + if re.search(r'S[1-5]_', i_label)] + +mtg_b_channels_ph = [i_index for i_index,i_label in enumerate(evoked_3_ph.info['ch_names']) + if re.search(r'S([6-9]|10)_', i_label)] + +evoked_1_ph._data[mtg_b_channels_ph,:] = 0 +evoked_2_ph._data[mtg_b_channels_ph,:] = 0 +evoked_3_ph._data[mtg_a_channels_ph,:] = 0 +evoked_4_ph._data[mtg_a_channels_ph,:] = 0 + +evoked_event_1_ph = mne.combine_evoked([evoked_1_ph,evoked_3_ph],'equal') +evoked_event_2_ph = mne.combine_evoked([evoked_2_ph,evoked_4_ph],'equal') +fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 6)) +clim=dict(fnirs_ph=[-180,180]) + +evoked_event_1_ph.plot_image(axes=axes[0], titles='Event_1', clim=clim) +evoked_event_2_ph.plot_image(axes=axes[1], titles='Event_2', clim=clim) # ############################################################################### # # Plot standard fNIRS response image # # ---------------------------------- @@ -432,11 +538,20 @@ # # both the HbO and HbR on the same figure to illustrate the relation between # # the two signals. -evoked_dict = {'Event_1': evoked_event_1,'Event_2': evoked_event_2} +# ac +evoked_dict_ac = {'Event_1': evoked_event_1_ac,'Event_2': evoked_event_2_ac} + +color_dict = {'Event_1':'r','Event_2':'b'} + +mne.viz.plot_compare_evokeds(evoked_dict_ac, combine="mean", ci=0.95, + colors=color_dict) + +# ph +evoked_dict_ph = {'Event_1': evoked_event_1_ph,'Event_2': evoked_event_2_ph} color_dict = {'Event_1':'r','Event_2':'b'} -mne.viz.plot_compare_evokeds(evoked_dict, combine="mean", ci=0.95, +mne.viz.plot_compare_evokeds(evoked_dict_ph, combine="mean", ci=0.95, colors=color_dict) # ############################################################################### @@ -445,11 +560,19 @@ # # # # Next we view how the topographic activity changes throughout the response. +# ac times = np.arange(0.0, 2.0, 0.5) topomap_args = dict(extrapolate='local') -fig = evoked_event_1.plot_joint(times=times, topomap_args=topomap_args) -fig = evoked_event_2.plot_joint(times=times, topomap_args=topomap_args) +fig = evoked_event_1_ac.plot_joint(times=times, topomap_args=topomap_args) +fig = evoked_event_2_ac.plot_joint(times=times, topomap_args=topomap_args) + +# ph +times = np.arange(0.0, 2.0, 0.5) +topomap_args = dict(extrapolate='local') + +fig = evoked_event_1_ph.plot_joint(times=times, topomap_args=topomap_args) +fig = evoked_event_2_ph.plot_joint(times=times, topomap_args=topomap_args) # ############################################################################### # # Compare tapping of left and right hands @@ -458,27 +581,59 @@ # # Finally we generate topo maps for the left and right conditions to view # # the location of activity. First we visualise the HbO activity. +# ac +fig, axes = plt.subplots(nrows=2, ncols=4, figsize=(9, 5), + gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1])) + +topomap_args = dict(extrapolate='local', size=3,res=256, sensors='k.') +times = 1.0 + +evoked_1_ac.copy().pick(mtg_a_channels_ac).plot_topomap(times=times, axes=axes[0,0], + colorbar=False,**topomap_args) + +evoked_2_ac.copy().pick(mtg_a_channels_ac).plot_topomap(times=times, axes=axes[1,0], + colorbar=False,**topomap_args) + +evoked_3_ac.copy().pick(mtg_b_channels_ac).plot_topomap(times=times, axes=axes[0,1], + colorbar=False,**topomap_args) + +evoked_4_ac.copy().pick(mtg_b_channels_ac).plot_topomap(times=times, axes=axes[1,1], + colorbar=False, **topomap_args) + +evoked_event_1_ac.plot_topomap(times=times, axes=axes[0,2:], colorbar=True, + **topomap_args) +evoked_event_2_ac.plot_topomap(times=times, axes=axes[1,2:], colorbar=True, + **topomap_args) + +for column, condition in enumerate( + ['Montage A', 'Montage B','Combined']): + for row, chroma in enumerate(['Event 1', 'Event 2']): + axes[row, column].set_title('{}: {}'.format(chroma, condition)) +fig.tight_layout() + + +# ph fig, axes = plt.subplots(nrows=2, ncols=4, figsize=(9, 5), gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1])) topomap_args = dict(extrapolate='local', size=3,res=256, sensors='k.') times = 1.0 -evoked_1.copy().pick(mtg_a_channels).plot_topomap(times=times, axes=axes[0,0], +evoked_1_ph.copy().pick(mtg_a_channels_ph).plot_topomap(times=times, axes=axes[0,0], colorbar=False,**topomap_args) -evoked_2.copy().pick(mtg_a_channels).plot_topomap(times=times, axes=axes[1,0], +evoked_2_ph.copy().pick(mtg_a_channels_ph).plot_topomap(times=times, axes=axes[1,0], colorbar=False,**topomap_args) -evoked_3.copy().pick(mtg_b_channels).plot_topomap(times=times, axes=axes[0,1], +evoked_3_ph.copy().pick(mtg_b_channels_ph).plot_topomap(times=times, axes=axes[0,1], colorbar=False,**topomap_args) -evoked_4.copy().pick(mtg_b_channels).plot_topomap(times=times, axes=axes[1,1], +evoked_4_ph.copy().pick(mtg_b_channels_ph).plot_topomap(times=times, axes=axes[1,1], colorbar=False, **topomap_args) -evoked_event_1.plot_topomap(times=times, axes=axes[0,2:], colorbar=True, +evoked_event_1_ph.plot_topomap(times=times, axes=axes[0,2:], colorbar=True, **topomap_args) -evoked_event_2.plot_topomap(times=times, axes=axes[1,2:], colorbar=True, +evoked_event_2_ph.plot_topomap(times=times, axes=axes[1,2:], colorbar=True, **topomap_args) for column, condition in enumerate( @@ -490,22 +645,48 @@ # ############################################################################### # # And we can plot the comparison at a single time point for two conditions. +# ac fig, axes = plt.subplots(nrows=1, ncols=4, figsize=(9, 5), gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1])) vmin, vmax, ts = -0.192, 0.992, 0.1 vmin = -20 vmax = 20 -evoked_1.plot_topomap(times=ts, axes=axes[0], vmin=vmin, vmax=vmax, +evoked_1_ac.plot_topomap(times=ts, axes=axes[0], vmin=vmin, vmax=vmax, colorbar=False,**topomap_args) -evoked_2.plot_topomap(times=ts, axes=axes[1], vmin=vmin, vmax=vmax, +evoked_2_ac.plot_topomap(times=ts, axes=axes[1], vmin=vmin, vmax=vmax, colorbar=False,**topomap_args) -evoked_diff = mne.combine_evoked([evoked_1, -evoked_2], +evoked_diff_ac = mne.combine_evoked([evoked_1_ac, -evoked_2_ac], weights='equal') -evoked_diff.plot_topomap(times=ts, axes=axes[2:],vmin=vmin, vmax=vmax, +evoked_diff_ac.plot_topomap(times=ts, axes=axes[2:],vmin=vmin, vmax=vmax, + colorbar=True,**topomap_args) + +for column, condition in enumerate( + ['Event 1', 'Event 2', 'Difference']): + axes[column].set_title('{}'.format(condition)) +fig.tight_layout() + + +# ph +fig, axes = plt.subplots(nrows=1, ncols=4, figsize=(9, 5), + gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1])) +vmin, vmax, ts = -0.192, 0.992, 0.1 +vmin = -20 +vmax = 20 + +evoked_1_ph.plot_topomap(times=ts, axes=axes[0], vmin=vmin, vmax=vmax, + colorbar=False,**topomap_args) + +evoked_2_ph.plot_topomap(times=ts, axes=axes[1], vmin=vmin, vmax=vmax, + colorbar=False,**topomap_args) + +evoked_diff_ph = mne.combine_evoked([evoked_1_ph, -evoked_2_ph], + weights='equal') + +evoked_diff_ph.plot_topomap(times=ts, axes=axes[2:],vmin=vmin, vmax=vmax, colorbar=True,**topomap_args) for column, condition in enumerate( @@ -517,9 +698,21 @@ # # Lastly, we can also look at the individual waveforms to see what is # # driving the topographic plot above. +# ac +fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(6, 4)) +mne.viz.plot_evoked_topo(evoked_1_ac, color='b', axes=axes, legend=False) +mne.viz.plot_evoked_topo(evoked_2_ac, color='r', axes=axes, legend=False) + +# Tidy the legend +leg_lines = [line for line in axes.lines if line.get_c() == 'b'][:1] +leg_lines.append([line for line in axes.lines if line.get_c() == 'r'][0]) +fig.legend(leg_lines, ['Event 1', 'Event 2'], loc='lower right') + + +# ph fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(6, 4)) -mne.viz.plot_evoked_topo(evoked_1, color='b', axes=axes, legend=False) -mne.viz.plot_evoked_topo(evoked_2, color='r', axes=axes, legend=False) +mne.viz.plot_evoked_topo(evoked_1_ph, color='b', axes=axes, legend=False) +mne.viz.plot_evoked_topo(evoked_2_ph, color='r', axes=axes, legend=False) # Tidy the legend leg_lines = [line for line in axes.lines if line.get_c() == 'b'][:1] From bbbc0fe0ad18ef15b28efae05f635233ebc3efce Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Wed, 10 Jun 2020 11:52:02 -0700 Subject: [PATCH 109/167] changed time scale of ac data to 10 sec, replaced raw ac with haemo for plotting --- .../preprocessing/plot_80_boxy_processing.py | 69 +++---------------- 1 file changed, 10 insertions(+), 59 deletions(-) diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 55f2f247cf9..92f8e9e72df 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -240,6 +240,7 @@ # reject_criteria = dict(hbo=80e-6) reject_criteria = None tmin, tmax = -0.2, 2 +tmin_AC, tmax_AC = -2, 10 # Montage A mtg_a = [i_index for i_index,i_label in enumerate(raw_haemo.info['ch_names']) @@ -248,20 +249,12 @@ # haemo epochs mtg_a_haemo_epochs = mne.Epochs(raw_haemo, mtg_a_events, event_id = mtg_a_event_dict, - tmin=tmin, tmax=tmax, + tmin=tmin_AC, tmax=tmax_AC, reject=reject_criteria, reject_by_annotation=False, proj=True, baseline=(None, 0), preload=True, detrend=None, verbose=True, event_repeated='drop') mtg_a_haemo_epochs.plot_drop_log() -#get epochs from the raw AC and Phase -mtg_a_epochs_ac = mne.Epochs(raw_intensity_ac, - mtg_a_events, event_id=mtg_a_event_dict, - tmin=tmin, tmax=tmax, - reject=None, reject_by_annotation=False, - proj=False, baseline=(-0.2, 0), preload=True, - detrend=None, verbose=True) - mtg_a_epochs_ph = mne.Epochs(raw_intensity_ph, mtg_a_events, event_id=mtg_a_event_dict, tmin=tmin, tmax=tmax, @@ -277,12 +270,6 @@ fig = mtg_a_haemo_epochs.plot(n_epochs=5,n_channels=5, scalings='auto', picks = mtg_a) -# ac epochs -fig = mne.viz.plot_epochs(mtg_a_epochs_ac,n_epochs=5,n_channels=5, - scalings='auto', picks = mtg_a) -fig = mtg_a_epochs_ac.plot(n_epochs=5,n_channels=5, scalings='auto', - picks = mtg_a) - # ph epochs fig = mne.viz.plot_epochs(mtg_a_epochs_ph,n_epochs=5,n_channels=5, scalings='auto', picks = mtg_a) @@ -303,14 +290,6 @@ detrend=None, verbose=True, event_repeated='drop') mtg_b_haemo_epochs.plot_drop_log() -#get epochs from the raw AC and Phase -mtg_b_epochs_ac = mne.Epochs(raw_intensity_ac, - mtg_b_events, event_id=mtg_b_event_dict, - tmin=tmin, tmax=tmax, - reject=None, reject_by_annotation=False, - proj=False, baseline=(-0.2, 0), preload=True, - detrend=None, verbose=True) - mtg_b_epochs_ph = mne.Epochs(raw_intensity_ph, mtg_b_events, event_id=mtg_b_event_dict, tmin=tmin, tmax=tmax, @@ -325,12 +304,6 @@ fig = mtg_b_haemo_epochs.plot(n_epochs=5,n_channels=5, scalings='auto', picks = mtg_b) -# ac epochs -fig = mne.viz.plot_epochs(mtg_b_epochs_ac,n_epochs=5,n_channels=5, - scalings='auto', picks = mtg_b) -fig = mtg_b_epochs_ac.plot(n_epochs=5,n_channels=5, scalings='auto', - picks = mtg_b) - # ph epochs fig = mne.viz.plot_epochs(mtg_b_epochs_ph,n_epochs=5,n_channels=5, scalings='auto', picks = mtg_b) @@ -369,17 +342,6 @@ ts_args=dict(ylim=dict(hbo=[-15, 15], hbr=[-15, 15]))) -# ac epochs -fig = mtg_a_epochs_ac['Montage_A/Event_1'].plot_image( - combine='mean', vmin=-20, vmax=20, - picks = mtg_a, colorbar=True, - title='Montage A Event 1') - -fig = mtg_a_epochs_ac['Montage_A/Event_2'].plot_image( - combine='mean', vmin=-20, vmax=20, - picks = mtg_a, colorbar=True, - title='Montage A Event 2') - # ph epochs fig = mtg_a_epochs_ph['Montage_A/Event_1'].plot_image( combine='mean', vmin=-180, vmax=180, @@ -413,17 +375,6 @@ ts_args=dict(ylim=dict(hbo=[-15, 15], hbr=[-15, 15]))) -# ac epochs -fig = mtg_b_epochs_ac['Montage_B/Event_1'].plot_image( - combine='mean', vmin=-20, vmax=20, - picks = mtg_b, colorbar=True, - title='Montage B Event 1') - -fig = mtg_b_epochs_ac['Montage_B/Event_2'].plot_image( - combine='mean', vmin=-20, vmax=20, - picks = mtg_b, colorbar=True, - title='Montage B Event 2') - # ph epochs fig = mtg_b_epochs_ph['Montage_B/Event_1'].plot_image( combine='mean', vmin=-180, vmax=180, @@ -447,10 +398,10 @@ fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(15, 6)) clim=dict(fnirs_raw=[-20,20]) -mtg_a_1_evoked_ac = mtg_a_epochs_ac['Montage_A/Event_1'].average() -mtg_a_2_evoked_ac = mtg_a_epochs_ac['Montage_A/Event_2'].average() -mtg_b_1_evoked_ac = mtg_b_epochs_ac['Montage_B/Event_1'].average() -mtg_b_2_evoked_ac = mtg_b_epochs_ac['Montage_B/Event_2'].average() +mtg_a_1_evoked_ac = mtg_a_haemo_epochs['Montage_A/Event_1'].average() +mtg_a_2_evoked_ac = mtg_a_haemo_epochs['Montage_A/Event_2'].average() +mtg_b_1_evoked_ac = mtg_b_haemo_epochs['Montage_B/Event_1'].average() +mtg_b_2_evoked_ac = mtg_b_haemo_epochs['Montage_B/Event_2'].average() mtg_a_1_evoked_ac.plot_image(axes=axes[0, 0], picks = mtg_a, titles='Montage A Event 1', clim=clim) @@ -462,10 +413,10 @@ titles='Montage B Event 2', clim=clim) # Combine Montages -evoked_1_ac = mtg_a_epochs_ac['Montage_A/Event_1'].average() -evoked_2_ac = mtg_a_epochs_ac['Montage_A/Event_2'].average() -evoked_3_ac = mtg_b_epochs_ac['Montage_B/Event_1'].average() -evoked_4_ac = mtg_b_epochs_ac['Montage_B/Event_2'].average() +evoked_1_ac = mtg_a_haemo_epochs['Montage_A/Event_1'].average() +evoked_2_ac = mtg_a_haemo_epochs['Montage_A/Event_2'].average() +evoked_3_ac = mtg_b_haemo_epochs['Montage_B/Event_1'].average() +evoked_4_ac = mtg_b_haemo_epochs['Montage_B/Event_2'].average() mtg_a_channels_ac = [i_index for i_index,i_label in enumerate(evoked_1_ac.info['ch_names']) if re.search(r'S[1-5]_', i_label)] From a2042c7595e943c35959b00fb303ddd3f14fda70 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Wed, 10 Jun 2020 17:05:10 -0600 Subject: [PATCH 110/167] now only plots AC haemo and phase data. Cleaned up formatting --- .../preprocessing/plot_80_boxy_processing.py | 621 +++++++++++------- 1 file changed, 379 insertions(+), 242 deletions(-) diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 92f8e9e72df..4f251e022b0 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -27,14 +27,18 @@ # load AC and Phase data boxy_data_folder = mne.datasets.boxy_example.data_path() boxy_raw_dir = os.path.join(boxy_data_folder, 'Participant-1') -raw_intensity_ac = mne.io.read_raw_boxy(boxy_raw_dir, 'AC', verbose=True).load_data() -raw_intensity_ph = mne.io.read_raw_boxy(boxy_raw_dir, 'Ph', verbose=True).load_data() +raw_intensity_ac = mne.io.read_raw_boxy(boxy_raw_dir, 'AC', + verbose=True).load_data() +raw_intensity_ph = mne.io.read_raw_boxy(boxy_raw_dir, 'Ph', + verbose=True).load_data() # get channel indices for our two montages -mtg_a = [raw_intensity_ac.ch_names[i_index] for i_index,i_label in enumerate(raw_intensity_ac.info['ch_names']) - if re.search(r'S[1-5]_', i_label)] -mtg_b = [raw_intensity_ac.ch_names[i_index] for i_index,i_label in enumerate(raw_intensity_ac.info['ch_names']) - if re.search(r'S([6-9]|10)_', i_label)] +mtg_a = [raw_intensity_ac.ch_names[i_index] for i_index, i_label + in enumerate(raw_intensity_ac.info['ch_names']) + if re.search(r'S[1-5]_', i_label)] +mtg_b = [raw_intensity_ac.ch_names[i_index] for i_index, i_label + in enumerate(raw_intensity_ac.info['ch_names']) + if re.search(r'S([6-9]|10)_', i_label)] # ############################################################################### # # View location of sensors over brain surface @@ -49,43 +53,43 @@ # plot all montages fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') -fig = mne.viz.plot_alignment(raw_intensity_ac.info, +fig = mne.viz.plot_alignment(raw_intensity_ac.info, show_axes=True, subject='fsaverage', - trans='fsaverage', + trans='fsaverage', surfaces=['head-dense', 'brain'], - fnirs=['sources','detectors', 'pairs'], + fnirs=['sources', 'detectors', 'pairs'], mri_fiducials=True, dig=True, - subjects_dir=subjects_dir, + subjects_dir=subjects_dir, fig=fig) mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) # montage A fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') -fig = mne.viz.plot_alignment(raw_intensity_ac.copy().pick_channels(mtg_a).info, +fig = mne.viz.plot_alignment(raw_intensity_ac.copy().pick_channels(mtg_a).info, show_axes=True, subject='fsaverage', - trans='fsaverage', + trans='fsaverage', surfaces=['head-dense', 'brain'], - fnirs=['sources','detectors', 'pairs'], + fnirs=['sources', 'detectors', 'pairs'], mri_fiducials=True, dig=True, - subjects_dir=subjects_dir, + subjects_dir=subjects_dir, fig=fig) mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) # montage B fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') -fig = mne.viz.plot_alignment(raw_intensity_ac.copy().pick_channels(mtg_b).info, +fig = mne.viz.plot_alignment(raw_intensity_ac.copy().pick_channels(mtg_b).info, show_axes=True, subject='fsaverage', - trans='fsaverage', + trans='fsaverage', surfaces=['head-dense', 'brain'], - fnirs=['sources','detectors', 'pairs'], + fnirs=['sources', 'detectors', 'pairs'], mri_fiducials=True, dig=True, - subjects_dir=subjects_dir, + subjects_dir=subjects_dir, fig=fig) mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) @@ -107,13 +111,13 @@ # AC scalings = dict(fnirs_raw=1e2) -raw_intensity_ac.plot(n_channels=5, - duration=20, scalings=scalings, show_scrollbars=True) +raw_intensity_ac.plot(n_channels=5, duration=20, scalings=scalings, + show_scrollbars=True) # Phase scalings = dict(fnirs_ph=1e4) -raw_intensity_ph.plot(n_channels=5, - duration=20, scalings=scalings, show_scrollbars=True) +raw_intensity_ph.plot(n_channels=5, duration=20, scalings=scalings, + show_scrollbars=True) # ############################################################################### # # Converting from raw intensity to optical density @@ -171,7 +175,7 @@ raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od) raw_haemo.plot(n_channels=len(raw_haemo.ch_names), - duration=500, show_scrollbars=False) + duration=500, show_scrollbars=False) # ############################################################################### # # Removing heart rate from signal @@ -187,7 +191,7 @@ fig.suptitle('Before filtering', weight='bold', size='x-large') fig.subplots_adjust(top=0.88) raw_haemo = raw_haemo.filter(0.05, 0.7, h_trans_bandwidth=0.2, - l_trans_bandwidth=0.02) + l_trans_bandwidth=0.02) fig = raw_haemo.plot_psd(average=True) fig.suptitle('After filtering', weight='bold', size='x-large') fig.subplots_adjust(top=0.88) @@ -209,28 +213,39 @@ # Montage A Events mtg_a_events = mne.find_events(raw_intensity_ac, stim_channel=['Markers a']) -mtg_a_event_dict = {'Montage_A/Event_1': 1, 'Montage_A/Event_2': 2, - 'Montage A/Block 1 End': 1000, 'Montage A/Block 2 End': 2000} +mtg_a_event_dict = {'Montage_A/Event_1': 1, + 'Montage_A/Event_2': 2, + 'Montage A/Block 1 End': 1000, + 'Montage A/Block 2 End': 2000} fig = mne.viz.plot_events(mtg_a_events) fig.subplots_adjust(right=0.7) # make room for the legend -raw_intensity_ac.copy().pick_channels(mtg_a).plot( - events=mtg_a_events, start=0, duration=10,color='gray', - event_color={1: 'r', 2: 'b', 1000: 'k', 2000: 'k'}) +raw_intensity_ac.copy().pick_channels(mtg_a).plot(events=mtg_a_events, start=0, + duration=10, color='gray', + event_color={1: 'r', + 2: 'b', + 1000: 'k', + 2000: 'k'}) # Montage B Events mtg_b_events = mne.find_events(raw_intensity_ac, stim_channel=['Markers b']) -mtg_b_event_dict = {'Montage_B/Event_1': 1, 'Montage_B/Event_2': 2, - 'Montage B/Block 1 End': 1000, 'Montage B/Block 2 End': 2000} +mtg_b_event_dict = {'Montage_B/Event_1': 1, + 'Montage_B/Event_2': 2, + 'Montage B/Block 1 End': 1000, + 'Montage B/Block 2 End': 2000} fig = mne.viz.plot_events(mtg_b_events) fig.subplots_adjust(right=0.7) # make room for the legend -raw_intensity_ac.copy().pick_channels(mtg_b).plot( - events=mtg_b_events, start=0, duration=10,color='gray', - event_color={1: 'r', 2: 'b', 1000: 'k', 2000: 'k'}) +raw_intensity_ac.copy().pick_channels(mtg_b).plot(events=mtg_b_events, + start=0, duration=10, + color='gray', + event_color={1: 'r', + 2: 'b', + 1000: 'k', + 2000: 'k'}) # ############################################################################### # # Next we define the range of our epochs, the rejection criteria, @@ -243,72 +258,72 @@ tmin_AC, tmax_AC = -2, 10 # Montage A -mtg_a = [i_index for i_index,i_label in enumerate(raw_haemo.info['ch_names']) - if re.search(r'S[1-5]_', i_label)] +mtg_a = [i_index for i_index, i_label + in enumerate(raw_haemo.info['ch_names']) + if re.search(r'S[1-5]_', i_label)] # haemo epochs -mtg_a_haemo_epochs = mne.Epochs(raw_haemo, - mtg_a_events, event_id = mtg_a_event_dict, - tmin=tmin_AC, tmax=tmax_AC, - reject=reject_criteria, reject_by_annotation=False, - proj=True, baseline=(None, 0), preload=True, - detrend=None, verbose=True, event_repeated='drop') +mtg_a_haemo_epochs = mne.Epochs(raw_haemo, mtg_a_events, + event_id=mtg_a_event_dict, tmin=tmin_AC, + tmax=tmax_AC, reject=reject_criteria, + reject_by_annotation=False, proj=True, + baseline=(None, 0), preload=True, detrend=None, + verbose=True, event_repeated='drop') mtg_a_haemo_epochs.plot_drop_log() -mtg_a_epochs_ph = mne.Epochs(raw_intensity_ph, - mtg_a_events, event_id=mtg_a_event_dict, - tmin=tmin, tmax=tmax, - reject=None, reject_by_annotation=False, - proj=False, baseline=(-0.2, 0), preload=True, - detrend=None, verbose=True) +mtg_a_epochs_ph = mne.Epochs(raw_intensity_ph, mtg_a_events, + event_id=mtg_a_event_dict, tmin=tmin, tmax=tmax, + reject=None, reject_by_annotation=False, + proj=False, baseline=(-0.2, 0), preload=True, + detrend=None, verbose=True) -#two ways to plot epochs, should be the same +# two ways to plot epochs, should be the same # haemo epochs -fig = mne.viz.plot_epochs(mtg_a_haemo_epochs,n_epochs=5,n_channels=5, - scalings='auto', picks = mtg_a) -fig = mtg_a_haemo_epochs.plot(n_epochs=5,n_channels=5, scalings='auto', - picks = mtg_a) +fig = mne.viz.plot_epochs(mtg_a_haemo_epochs, n_epochs=5, n_channels=5, + scalings='auto', picks=mtg_a) +fig = mtg_a_haemo_epochs.plot(n_epochs=5, n_channels=5, scalings='auto', + picks=mtg_a) # ph epochs -fig = mne.viz.plot_epochs(mtg_a_epochs_ph,n_epochs=5,n_channels=5, - scalings='auto', picks = mtg_a) -fig = mtg_a_epochs_ph.plot(n_epochs=5,n_channels=5, scalings='auto', - picks = mtg_a) +fig = mne.viz.plot_epochs(mtg_a_epochs_ph, n_epochs=5, n_channels=5, + scalings='auto', picks=mtg_a) +fig = mtg_a_epochs_ph.plot(n_epochs=5, n_channels=5, scalings='auto', + picks=mtg_a) # Montage B -mtg_b = [i_index for i_index,i_label in enumerate(raw_haemo.info['ch_names']) - if re.search(r'S([6-9]|10)_', i_label)] +mtg_b = [i_index for i_index, i_label + in enumerate(raw_haemo.info['ch_names']) + if re.search(r'S([6-9]|10)_', i_label)] # haemo epochs -mtg_b_haemo_epochs = mne.Epochs(raw_haemo, - mtg_b_events, event_id = mtg_b_event_dict, - tmin=tmin, tmax=tmax, - reject=reject_criteria, reject_by_annotation=False, - proj=True, baseline=(None, 0), preload=True, - detrend=None, verbose=True, event_repeated='drop') +mtg_b_haemo_epochs = mne.Epochs(raw_haemo, mtg_b_events, + event_id=mtg_b_event_dict, tmin=tmin_AC, + tmax=tmax_AC, reject=reject_criteria, + reject_by_annotation=False, proj=True, + baseline=(None, 0), preload=True, detrend=None, + verbose=True, event_repeated='drop') mtg_b_haemo_epochs.plot_drop_log() -mtg_b_epochs_ph = mne.Epochs(raw_intensity_ph, - mtg_b_events, event_id=mtg_b_event_dict, - tmin=tmin, tmax=tmax, - reject=None, reject_by_annotation=False, - proj=False, baseline=(-0.2, 0), preload=True, - detrend=None, verbose=True) +mtg_b_epochs_ph = mne.Epochs(raw_intensity_ph, mtg_b_events, + event_id=mtg_b_event_dict, tmin=tmin, tmax=tmax, + reject=None, reject_by_annotation=False, + proj=False, baseline=(-0.2, 0), preload=True, + detrend=None, verbose=True) -#two ways to plot epochs, should be the same +# two ways to plot epochs, should be the same # haemo epochs -fig = mne.viz.plot_epochs(mtg_b_haemo_epochs,n_epochs=5,n_channels=5, - scalings='auto', picks = mtg_b) -fig = mtg_b_haemo_epochs.plot(n_epochs=5,n_channels=5, scalings='auto', - picks = mtg_b) +fig = mne.viz.plot_epochs(mtg_b_haemo_epochs, n_epochs=5, n_channels=5, + scalings='auto', picks=mtg_b) +fig = mtg_b_haemo_epochs.plot(n_epochs=5, n_channels=5, scalings='auto', + picks=mtg_b) # ph epochs -fig = mne.viz.plot_epochs(mtg_b_epochs_ph,n_epochs=5,n_channels=5, - scalings='auto', picks = mtg_b) -fig = mtg_b_epochs_ph.plot(n_epochs=5,n_channels=5, scalings='auto', - picks = mtg_b) +fig = mne.viz.plot_epochs(mtg_b_epochs_ph, n_epochs=5, n_channels=5, + scalings='auto', picks=mtg_b) +fig = mtg_b_epochs_ph.plot(n_epochs=5, n_channels=5, scalings='auto', + picks=mtg_b) # ############################################################################### # # View consistency of responses across trials @@ -320,71 +335,59 @@ # # trials, and the consistent dip in HbR that is slightly delayed relative to # # the HbO peak. -#haemo plots +# haemo plots # Montage A -hbo = [i_index for i_index,i_label - in enumerate(mtg_a_haemo_epochs.info['ch_names']) +hbo = [i_index for i_index, i_label + in enumerate(mtg_a_haemo_epochs.info['ch_names']) if re.search(r'S[1-5]_D[0-9] hbo', i_label)] -hbr = [i_index for i_index,i_label - in enumerate(mtg_a_haemo_epochs.info['ch_names']) +hbr = [i_index for i_index, i_label + in enumerate(mtg_a_haemo_epochs.info['ch_names']) if re.search(r'S[1-5]_D[0-9] hbr', i_label)] mtg_a_haemo_epochs['Montage_A/Event_1'].plot_image( - combine='mean', vmin=-30, vmax=30, - group_by = {'Oxy':hbo,'De-Oxy':hbr}, - ts_args=dict(ylim=dict(hbo=[-15, 15], - hbr=[-15, 15]))) + combine='mean', vmin=-30, vmax=30, group_by={'Oxy': hbo, 'De-Oxy': hbr}, + ts_args=dict(ylim=dict(hbo=[-15, 15], hbr=[-15, 15]))) mtg_a_haemo_epochs['Montage_A/Event_2'].plot_image( - combine='mean', vmin=-30, vmax=30, - group_by = {'Oxy':hbo,'De-Oxy':hbr}, - ts_args=dict(ylim=dict(hbo=[-15, 15], - hbr=[-15, 15]))) + combine='mean', vmin=-30, vmax=30, group_by={'Oxy': hbo, 'De-Oxy': hbr}, + ts_args=dict(ylim=dict(hbo=[-15, 15], hbr=[-15, 15]))) # ph epochs fig = mtg_a_epochs_ph['Montage_A/Event_1'].plot_image( - combine='mean', vmin=-180, vmax=180, - picks = mtg_a, colorbar=True, - title='Montage A Event 1') + combine='mean', vmin=-180, vmax=180, picks=mtg_a, colorbar=True, + title='Montage A Event 1') fig = mtg_a_epochs_ph['Montage_A/Event_2'].plot_image( - combine='mean', vmin=-180, vmax=180, - picks = mtg_a, colorbar=True, - title='Montage A Event 2') + combine='mean', vmin=-180, vmax=180, picks=mtg_a, colorbar=True, + title='Montage A Event 2') # Montage B -hbo = [i_index for i_index,i_label - in enumerate(mtg_a_haemo_epochs.info['ch_names']) +hbo = [i_index for i_index, i_label + in enumerate(mtg_a_haemo_epochs.info['ch_names']) if re.search(r'S([6-9]|10)_D([0-9]|1[0-6]) hbo', i_label)] -hbr = [i_index for i_index,i_label - in enumerate(mtg_a_haemo_epochs.info['ch_names']) +hbr = [i_index for i_index, i_label + in enumerate(mtg_a_haemo_epochs.info['ch_names']) if re.search(r'S([6-9]|10)_D([0-9]|1[0-6]) hbr', i_label)] mtg_b_haemo_epochs['Montage_B/Event_1'].plot_image( - combine='mean', vmin=-30, vmax=30, - group_by = {'Oxy':hbo,'De-Oxy':hbr}, - ts_args=dict(ylim=dict(hbo=[-15, 15], - hbr=[-15, 15]))) + combine='mean', vmin=-30, vmax=30, group_by={'Oxy': hbo, 'De-Oxy': hbr}, + ts_args=dict(ylim=dict(hbo=[-15, 15], hbr=[-15, 15]))) mtg_b_haemo_epochs['Montage_B/Event_2'].plot_image( - combine='mean', vmin=-30, vmax=30, - group_by = {'Oxy':hbo,'De-Oxy':hbr}, - ts_args=dict(ylim=dict(hbo=[-15, 15], - hbr=[-15, 15]))) + combine='mean', vmin=-30, vmax=30, group_by={'Oxy': hbo, 'De-Oxy': hbr}, + ts_args=dict(ylim=dict(hbo=[-15, 15], hbr=[-15, 15]))) # ph epochs fig = mtg_b_epochs_ph['Montage_B/Event_1'].plot_image( - combine='mean', vmin=-180, vmax=180, - picks = mtg_b, colorbar=True, - title='Montage B Event 1') + combine='mean', vmin=-180, vmax=180, picks=mtg_b, colorbar=True, + title='Montage B Event 1') fig = mtg_b_epochs_ph['Montage_B/Event_2'].plot_image( - combine='mean', vmin=-180, vmax=180, - picks = mtg_b, colorbar=True, - title='Montage B Event 2') + combine='mean', vmin=-180, vmax=180, picks=mtg_b, colorbar=True, + title='Montage B Event 2') # ############################################################################### # # View consistency of responses across channels @@ -395,92 +398,103 @@ # # motor cortex, and all channels show a similar pattern in the data. # ac evoked -fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(15, 6)) -clim=dict(fnirs_raw=[-20,20]) +fig, axes = plt.subplots(nrows=4, ncols=2, figsize=(15, 6)) +clim = dict(hbo=[-10, 10], hbr=[-10, 10]) mtg_a_1_evoked_ac = mtg_a_haemo_epochs['Montage_A/Event_1'].average() mtg_a_2_evoked_ac = mtg_a_haemo_epochs['Montage_A/Event_2'].average() mtg_b_1_evoked_ac = mtg_b_haemo_epochs['Montage_B/Event_1'].average() mtg_b_2_evoked_ac = mtg_b_haemo_epochs['Montage_B/Event_2'].average() -mtg_a_1_evoked_ac.plot_image(axes=axes[0, 0], picks = mtg_a, - titles='Montage A Event 1', clim=clim) -mtg_a_2_evoked_ac.plot_image(axes=axes[1, 0], picks = mtg_a, - titles='Montage A Event 2', clim=clim) -mtg_b_1_evoked_ac.plot_image(axes=axes[0, 1], picks = mtg_b, - titles='Montage B Event 1', clim=clim) -mtg_b_2_evoked_ac.plot_image(axes=axes[1, 1], picks = mtg_b, - titles='Montage B Event 2', clim=clim) +mtg_a_1_evoked_ac.plot_image(axes=axes[0, 0], picks=hbo, + titles='HBO Montage A Event 1', clim=clim) +mtg_a_1_evoked_ac.plot_image(axes=axes[0, 1], picks=hbr, + titles='HBR Montage A Event 1', clim=clim) +mtg_a_2_evoked_ac.plot_image(axes=axes[1, 0], picks=hbo, + titles='HBO Montage A Event 2', clim=clim) +mtg_a_2_evoked_ac.plot_image(axes=axes[1, 1], picks=hbr, + titles='HBR Montage A Event 2', clim=clim) +mtg_b_1_evoked_ac.plot_image(axes=axes[2, 0], picks=hbo, + titles='HBO Montage B Event 1', clim=clim) +mtg_b_1_evoked_ac.plot_image(axes=axes[2, 1], picks=hbr, + titles='HBR Montage B Event 1', clim=clim) +mtg_b_2_evoked_ac.plot_image(axes=axes[3, 0], picks=hbo, + titles='HBO Montage B Event 2', clim=clim) +mtg_b_2_evoked_ac.plot_image(axes=axes[3, 1], picks=hbr, + titles='HBR Montage B Event 2', clim=clim) # Combine Montages -evoked_1_ac = mtg_a_haemo_epochs['Montage_A/Event_1'].average() -evoked_2_ac = mtg_a_haemo_epochs['Montage_A/Event_2'].average() -evoked_3_ac = mtg_b_haemo_epochs['Montage_B/Event_1'].average() -evoked_4_ac = mtg_b_haemo_epochs['Montage_B/Event_2'].average() - -mtg_a_channels_ac = [i_index for i_index,i_label in enumerate(evoked_1_ac.info['ch_names']) - if re.search(r'S[1-5]_', i_label)] +mtg_a_channels_ac = [i_index for i_index, i_label + in enumerate(mtg_a_1_evoked_ac.info['ch_names']) + if re.search(r'S[1-5]_', i_label)] -mtg_b_channels_ac = [i_index for i_index,i_label in enumerate(evoked_3_ac.info['ch_names']) - if re.search(r'S([6-9]|10)_', i_label)] +mtg_b_channels_ac = [i_index for i_index, i_label + in enumerate(mtg_b_1_evoked_ac.info['ch_names']) + if re.search(r'S([6-9]|10)_', i_label)] -evoked_1_ac._data[mtg_b_channels_ac,:] = 0 -evoked_2_ac._data[mtg_b_channels_ac,:] = 0 -evoked_3_ac._data[mtg_a_channels_ac,:] = 0 -evoked_4_ac._data[mtg_a_channels_ac,:] = 0 +mtg_a_1_evoked_ac._data[mtg_b_channels_ac, :] = 0 +mtg_a_2_evoked_ac._data[mtg_b_channels_ac, :] = 0 +mtg_b_1_evoked_ac._data[mtg_a_channels_ac, :] = 0 +mtg_b_2_evoked_ac._data[mtg_a_channels_ac, :] = 0 -evoked_event_1_ac = mne.combine_evoked([evoked_1_ac,evoked_3_ac],'equal') -evoked_event_2_ac = mne.combine_evoked([evoked_2_ac,evoked_4_ac],'equal') +evoked_event_1_ac = mne.combine_evoked([mtg_a_1_evoked_ac, mtg_b_1_evoked_ac], + 'equal') +evoked_event_2_ac = mne.combine_evoked([mtg_a_2_evoked_ac, mtg_b_2_evoked_ac], + 'equal') -fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 6)) -clim=dict(fnirs_raw=[-20,20]) +fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(15, 6)) +clim = dict(fnirs_raw=[-20, 20]) -evoked_event_1_ac.plot_image(axes=axes[0], titles='Event_1', clim=clim) -evoked_event_2_ac.plot_image(axes=axes[1], titles='Event_2', clim=clim) +evoked_event_1_ac.plot_image(axes=axes[:, 0], + titles=dict(hbo='HBO_Event_1', hbr='HBR_Event_1'), + clim=clim) +evoked_event_2_ac.plot_image(axes=axes[:, 1], + titles=dict(hbo='HBO_Event_2', hbr='HBR_Event_2'), + clim=clim) # ph evoked fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(15, 6)) -clim=dict(fnirs_ph=[-180,180]) +clim = dict(fnirs_ph=[-180, 180]) mtg_a_1_evoked_ph = mtg_a_epochs_ph['Montage_A/Event_1'].average() mtg_a_2_evoked_ph = mtg_a_epochs_ph['Montage_A/Event_2'].average() mtg_b_1_evoked_ph = mtg_b_epochs_ph['Montage_B/Event_1'].average() mtg_b_2_evoked_ph = mtg_b_epochs_ph['Montage_B/Event_2'].average() -mtg_a_1_evoked_ph.plot_image(axes=axes[0, 0], picks = mtg_a, - titles='Montage A Event 1', clim=clim) -mtg_a_2_evoked_ph.plot_image(axes=axes[1, 0], picks = mtg_a, - titles='Montage A Event 2', clim=clim) -mtg_b_1_evoked_ph.plot_image(axes=axes[0, 1], picks = mtg_b, - titles='Montage B Event 1', clim=clim) -mtg_b_2_evoked_ph.plot_image(axes=axes[1, 1], picks = mtg_b, - titles='Montage B Event 2', clim=clim) +mtg_a_1_evoked_ph.plot_image(axes=axes[0, 0], picks=mtg_a, + titles='Montage A Event 1', clim=clim) +mtg_a_2_evoked_ph.plot_image(axes=axes[1, 0], picks=mtg_a, + titles='Montage A Event 2', clim=clim) +mtg_b_1_evoked_ph.plot_image(axes=axes[0, 1], picks=mtg_b, + titles='Montage B Event 1', clim=clim) +mtg_b_2_evoked_ph.plot_image(axes=axes[1, 1], picks=mtg_b, + titles='Montage B Event 2', clim=clim) # Combine Montages -evoked_1_ph = mtg_a_epochs_ph['Montage_A/Event_1'].average() -evoked_2_ph = mtg_a_epochs_ph['Montage_A/Event_2'].average() -evoked_3_ph = mtg_b_epochs_ph['Montage_B/Event_1'].average() -evoked_4_ph = mtg_b_epochs_ph['Montage_B/Event_2'].average() - -mtg_a_channels_ph = [i_index for i_index,i_label in enumerate(evoked_1_ph.info['ch_names']) - if re.search(r'S[1-5]_', i_label)] +mtg_a_channels_ph = [i_index for i_index, i_label + in enumerate(mtg_a_1_evoked_ph.info['ch_names']) + if re.search(r'S[1-5]_', i_label)] -mtg_b_channels_ph = [i_index for i_index,i_label in enumerate(evoked_3_ph.info['ch_names']) - if re.search(r'S([6-9]|10)_', i_label)] +mtg_b_channels_ph = [i_index for i_index, i_label + in enumerate(mtg_b_1_evoked_ph.info['ch_names']) + if re.search(r'S([6-9]|10)_', i_label)] -evoked_1_ph._data[mtg_b_channels_ph,:] = 0 -evoked_2_ph._data[mtg_b_channels_ph,:] = 0 -evoked_3_ph._data[mtg_a_channels_ph,:] = 0 -evoked_4_ph._data[mtg_a_channels_ph,:] = 0 +mtg_a_1_evoked_ph._data[mtg_b_channels_ph, :] = 0 +mtg_a_2_evoked_ph._data[mtg_b_channels_ph, :] = 0 +mtg_b_1_evoked_ph._data[mtg_a_channels_ph, :] = 0 +mtg_b_2_evoked_ph._data[mtg_a_channels_ph, :] = 0 -evoked_event_1_ph = mne.combine_evoked([evoked_1_ph,evoked_3_ph],'equal') -evoked_event_2_ph = mne.combine_evoked([evoked_2_ph,evoked_4_ph],'equal') +evoked_event_1_ph = mne.combine_evoked([mtg_a_1_evoked_ph, mtg_b_1_evoked_ph], + 'equal') +evoked_event_2_ph = mne.combine_evoked([mtg_a_2_evoked_ph, mtg_b_2_evoked_ph], + 'equal') fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 6)) -clim=dict(fnirs_ph=[-180,180]) +clim = dict(fnirs_ph=[-180, 180]) evoked_event_1_ph.plot_image(axes=axes[0], titles='Event_1', clim=clim) evoked_event_2_ph.plot_image(axes=axes[1], titles='Event_2', clim=clim) + # ############################################################################### # # Plot standard fNIRS response image # # ---------------------------------- @@ -490,20 +504,20 @@ # # the two signals. # ac -evoked_dict_ac = {'Event_1': evoked_event_1_ac,'Event_2': evoked_event_2_ac} +evoked_dict_ac = {'Event_1': evoked_event_1_ac, 'Event_2': evoked_event_2_ac} -color_dict = {'Event_1':'r','Event_2':'b'} +color_dict = {'Event_1': 'r', 'Event_2': 'b'} mne.viz.plot_compare_evokeds(evoked_dict_ac, combine="mean", ci=0.95, - colors=color_dict) + colors=color_dict) # ph -evoked_dict_ph = {'Event_1': evoked_event_1_ph,'Event_2': evoked_event_2_ph} +evoked_dict_ph = {'Event_1': evoked_event_1_ph, 'Event_2': evoked_event_2_ph} -color_dict = {'Event_1':'r','Event_2':'b'} +color_dict = {'Event_1': 'r', 'Event_2': 'b'} mne.viz.plot_compare_evokeds(evoked_dict_ph, combine="mean", ci=0.95, - colors=color_dict) + colors=color_dict) # ############################################################################### # # View topographic representation of activity @@ -512,7 +526,7 @@ # # Next we view how the topographic activity changes throughout the response. # ac -times = np.arange(0.0, 2.0, 0.5) +times = np.arange(0.0, 10.0, 2.0) topomap_args = dict(extrapolate='local') fig = evoked_event_1_ac.plot_joint(times=times, topomap_args=topomap_args) @@ -532,33 +546,104 @@ # # Finally we generate topo maps for the left and right conditions to view # # the location of activity. First we visualise the HbO activity. -# ac +# ac HBO fig, axes = plt.subplots(nrows=2, ncols=4, figsize=(9, 5), gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1])) -topomap_args = dict(extrapolate='local', size=3,res=256, sensors='k.') +topomap_args = dict(extrapolate='local', size=3, res=256, sensors='k.') times = 1.0 -evoked_1_ac.copy().pick(mtg_a_channels_ac).plot_topomap(times=times, axes=axes[0,0], - colorbar=False,**topomap_args) - -evoked_2_ac.copy().pick(mtg_a_channels_ac).plot_topomap(times=times, axes=axes[1,0], - colorbar=False,**topomap_args) +hbo_a = [i_index for i_index, i_label + in enumerate(mtg_a_1_evoked_ac.info['ch_names']) + if re.search(r'S[1-5]_D[0-9] hbo', i_label)] + +hbo_b = [i_index for i_index, i_label + in enumerate(mtg_b_1_evoked_ac.info['ch_names']) + if re.search(r'S([6-9]|10)_D([0-9]|1[0-6]) hbo', i_label)] + +evoked_event_1_ac.copy().pick(hbo_a).plot_topomap(times=times, + axes=axes[0, 0], + colorbar=False, + **topomap_args) + +evoked_event_2_ac.copy().pick(hbo_a).plot_topomap(times=times, + axes=axes[1, 0], + colorbar=False, + **topomap_args) + +evoked_event_1_ac.copy().pick(hbo_b).plot_topomap(times=times, + axes=axes[0, 1], + colorbar=False, + **topomap_args) + +evoked_event_2_ac.copy().pick(hbo_b).plot_topomap(times=times, + axes=axes[1, 1], + colorbar=False, + **topomap_args) + +evoked_event_1_ac.copy().pick(hbo_a+hbo_b).plot_topomap(times=times, + axes=axes[0, 2:], + colorbar=True, + **topomap_args) + +evoked_event_2_ac.copy().pick(hbo_a+hbo_b).plot_topomap(times=times, + axes=axes[1, 2:], + colorbar=True, + **topomap_args) + +for column, condition in enumerate(['Montage A', 'Montage B', 'Combined']): + for row, chroma in enumerate(['HBO Event 1', 'HBO Event 2']): + axes[row, column].set_title('{}: {}'.format(chroma, condition)) +fig.tight_layout() -evoked_3_ac.copy().pick(mtg_b_channels_ac).plot_topomap(times=times, axes=axes[0,1], - colorbar=False,**topomap_args) -evoked_4_ac.copy().pick(mtg_b_channels_ac).plot_topomap(times=times, axes=axes[1,1], - colorbar=False, **topomap_args) +# ac HBR +fig, axes = plt.subplots(nrows=2, ncols=4, figsize=(9, 5), + gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1])) -evoked_event_1_ac.plot_topomap(times=times, axes=axes[0,2:], colorbar=True, - **topomap_args) -evoked_event_2_ac.plot_topomap(times=times, axes=axes[1,2:], colorbar=True, - **topomap_args) +topomap_args = dict(extrapolate='local', size=3, res=256, sensors='k.') +times = 1.0 -for column, condition in enumerate( - ['Montage A', 'Montage B','Combined']): - for row, chroma in enumerate(['Event 1', 'Event 2']): +hbr_a = [i_index for i_index, i_label + in enumerate(mtg_a_1_evoked_ac.info['ch_names']) + if re.search(r'S[1-5]_D[0-9] hbr', i_label)] + +hbr_b = [i_index for i_index, i_label + in enumerate(mtg_b_1_evoked_ac.info['ch_names']) + if re.search(r'S([6-9]|10)_D([0-9]|1[0-6]) hbr', i_label)] + + +evoked_event_1_ac.copy().pick(hbr_a).plot_topomap(times=times, + axes=axes[0, 0], + colorbar=False, + **topomap_args) + +evoked_event_2_ac.copy().pick(hbr_a).plot_topomap(times=times, + axes=axes[1, 0], + colorbar=False, + **topomap_args) + +evoked_event_1_ac.copy().pick(hbr_b).plot_topomap(times=times, + axes=axes[0, 1], + colorbar=False, + **topomap_args) + +evoked_event_2_ac.copy().pick(hbr_b).plot_topomap(times=times, + axes=axes[1, 1], + colorbar=False, + **topomap_args) + +evoked_event_1_ac.copy().pick(hbr_a+hbr_b).plot_topomap(times=times, + axes=axes[0, 2:], + colorbar=True, + **topomap_args) +evoked_event_2_ac.copy().pick(hbr_a+hbr_b).plot_topomap(times=times, + axes=axes[1, 2:], + colorbar=True, + **topomap_args) + +for column, condition in enumerate(['Montage A', 'Montage B', 'Combined']): + for row, chroma in enumerate(['HBR Event 1', 'HBR Event 2']): axes[row, column].set_title('{}: {}'.format(chroma, condition)) fig.tight_layout() @@ -567,28 +652,35 @@ fig, axes = plt.subplots(nrows=2, ncols=4, figsize=(9, 5), gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1])) -topomap_args = dict(extrapolate='local', size=3,res=256, sensors='k.') +topomap_args = dict(extrapolate='local', size=3, res=256, sensors='k.') times = 1.0 -evoked_1_ph.copy().pick(mtg_a_channels_ph).plot_topomap(times=times, axes=axes[0,0], - colorbar=False,**topomap_args) +evoked_event_1_ph.copy().pick(mtg_a_channels_ph).plot_topomap(times=times, + axes=axes[0, 0], + colorbar=False, + **topomap_args) -evoked_2_ph.copy().pick(mtg_a_channels_ph).plot_topomap(times=times, axes=axes[1,0], - colorbar=False,**topomap_args) +evoked_event_2_ph.copy().pick(mtg_a_channels_ph).plot_topomap(times=times, + axes=axes[1, 0], + colorbar=False, + **topomap_args) -evoked_3_ph.copy().pick(mtg_b_channels_ph).plot_topomap(times=times, axes=axes[0,1], - colorbar=False,**topomap_args) +evoked_event_1_ph.copy().pick(mtg_b_channels_ph).plot_topomap(times=times, + axes=axes[0, 1], + colorbar=False, + **topomap_args) -evoked_4_ph.copy().pick(mtg_b_channels_ph).plot_topomap(times=times, axes=axes[1,1], - colorbar=False, **topomap_args) +evoked_event_2_ph.copy().pick(mtg_b_channels_ph).plot_topomap(times=times, + axes=axes[1, 1], + colorbar=False, + **topomap_args) -evoked_event_1_ph.plot_topomap(times=times, axes=axes[0,2:], colorbar=True, - **topomap_args) -evoked_event_2_ph.plot_topomap(times=times, axes=axes[1,2:], colorbar=True, - **topomap_args) +evoked_event_1_ph.plot_topomap(times=times, axes=axes[0, 2:], colorbar=True, + **topomap_args) +evoked_event_2_ph.plot_topomap(times=times, axes=axes[1, 2:], colorbar=True, + **topomap_args) -for column, condition in enumerate( - ['Montage A', 'Montage B','Combined']): +for column, condition in enumerate(['Montage A', 'Montage B', 'Combined']): for row, chroma in enumerate(['Event 1', 'Event 2']): axes[row, column].set_title('{}: {}'.format(chroma, condition)) fig.tight_layout() @@ -596,28 +688,59 @@ # ############################################################################### # # And we can plot the comparison at a single time point for two conditions. -# ac +# ac HBO fig, axes = plt.subplots(nrows=1, ncols=4, figsize=(9, 5), gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1])) vmin, vmax, ts = -0.192, 0.992, 0.1 -vmin = -20 -vmax = 20 +vmin = -5 +vmax = 5 -evoked_1_ac.plot_topomap(times=ts, axes=axes[0], vmin=vmin, vmax=vmax, - colorbar=False,**topomap_args) +evoked_event_1_ac.plot_topomap(ch_type='hbo', times=ts, axes=axes[0], + vmin=vmin, vmax=vmax, + colorbar=False, **topomap_args) -evoked_2_ac.plot_topomap(times=ts, axes=axes[1], vmin=vmin, vmax=vmax, - colorbar=False,**topomap_args) +evoked_event_2_ac.plot_topomap(ch_type='hbo', times=ts, axes=axes[1], + vmin=vmin, vmax=vmax, + colorbar=False, **topomap_args) -evoked_diff_ac = mne.combine_evoked([evoked_1_ac, -evoked_2_ac], - weights='equal') +evoked_diff_ac = mne.combine_evoked([evoked_event_1_ac, -evoked_event_2_ac], + weights='equal') -evoked_diff_ac.plot_topomap(times=ts, axes=axes[2:],vmin=vmin, vmax=vmax, - colorbar=True,**topomap_args) +evoked_diff_ac.plot_topomap(ch_type='hbo', times=ts, axes=axes[2:], + vmin=vmin, vmax=vmax, + colorbar=True, **topomap_args) for column, condition in enumerate( - ['Event 1', 'Event 2', 'Difference']): - axes[column].set_title('{}'.format(condition)) + ['HBO Event 1', 'HBO Event 2', 'HBO Difference']): + axes[column].set_title('{}'.format(condition)) +fig.tight_layout() + + +# ac HBR +fig, axes = plt.subplots(nrows=1, ncols=4, figsize=(9, 5), + gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1])) +vmin, vmax, ts = -0.192, 0.992, 0.1 +vmin = -5 +vmax = 5 + +evoked_event_1_ac.plot_topomap(ch_type='hbr', times=ts, axes=axes[0], + vmin=vmin, vmax=vmax, + colorbar=False, **topomap_args) + +evoked_event_2_ac.plot_topomap(ch_type='hbr', times=ts, axes=axes[1], + vmin=vmin, vmax=vmax, + colorbar=False, **topomap_args) + +evoked_diff_ac = mne.combine_evoked([evoked_event_1_ac, -evoked_event_2_ac], + weights='equal') + +evoked_diff_ac.plot_topomap(ch_type='hbr', times=ts, axes=axes[2:], + vmin=vmin, vmax=vmax, + colorbar=True, **topomap_args) + +for column, condition in enumerate( + ['HBR Event 1', 'HBR Event 2', 'HBR Difference']): + axes[column].set_title('{}'.format(condition)) fig.tight_layout() @@ -628,42 +751,56 @@ vmin = -20 vmax = 20 -evoked_1_ph.plot_topomap(times=ts, axes=axes[0], vmin=vmin, vmax=vmax, - colorbar=False,**topomap_args) +evoked_event_1_ph.plot_topomap(times=ts, axes=axes[0], vmin=vmin, vmax=vmax, + colorbar=False, **topomap_args) -evoked_2_ph.plot_topomap(times=ts, axes=axes[1], vmin=vmin, vmax=vmax, - colorbar=False,**topomap_args) +evoked_event_2_ph.plot_topomap(times=ts, axes=axes[1], vmin=vmin, vmax=vmax, + colorbar=False, **topomap_args) -evoked_diff_ph = mne.combine_evoked([evoked_1_ph, -evoked_2_ph], - weights='equal') +evoked_diff_ph = mne.combine_evoked([evoked_event_1_ph, -evoked_event_2_ph], + weights='equal') -evoked_diff_ph.plot_topomap(times=ts, axes=axes[2:],vmin=vmin, vmax=vmax, - colorbar=True,**topomap_args) +evoked_diff_ph.plot_topomap(times=ts, axes=axes[2:], vmin=vmin, vmax=vmax, + colorbar=True, **topomap_args) -for column, condition in enumerate( - ['Event 1', 'Event 2', 'Difference']): - axes[column].set_title('{}'.format(condition)) +for column, condition in enumerate(['Event 1', 'Event 2', 'Difference']): + axes[column].set_title('{}'.format(condition)) fig.tight_layout() -# ############################################################################### +# ############################################################################# # # Lastly, we can also look at the individual waveforms to see what is # # driving the topographic plot above. -# ac +# ac HBO fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(6, 4)) -mne.viz.plot_evoked_topo(evoked_1_ac, color='b', axes=axes, legend=False) -mne.viz.plot_evoked_topo(evoked_2_ac, color='r', axes=axes, legend=False) +mne.viz.plot_evoked_topo(evoked_event_1_ac.copy().pick('hbo'), + color='b', axes=axes, legend=False) +mne.viz.plot_evoked_topo(evoked_event_2_ac.copy().pick('hbo'), + color='r', axes=axes, legend=False) # Tidy the legend leg_lines = [line for line in axes.lines if line.get_c() == 'b'][:1] leg_lines.append([line for line in axes.lines if line.get_c() == 'r'][0]) -fig.legend(leg_lines, ['Event 1', 'Event 2'], loc='lower right') +fig.legend(leg_lines, ['HBO Event 1', 'HBO Event 2'], loc='lower right') + + +# ac HBR +fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(6, 4)) +mne.viz.plot_evoked_topo(evoked_event_1_ac.copy().pick('hbr'), + color='b', axes=axes, legend=False) +mne.viz.plot_evoked_topo(evoked_event_2_ac.copy().pick('hbr'), + color='r', axes=axes, legend=False) + +# Tidy the legend +leg_lines = [line for line in axes.lines if line.get_c() == 'b'][:1] +leg_lines.append([line for line in axes.lines if line.get_c() == 'r'][0]) +fig.legend(leg_lines, ['HBR Event 1', 'HBR Event 2'], loc='lower right') # ph fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(6, 4)) -mne.viz.plot_evoked_topo(evoked_1_ph, color='b', axes=axes, legend=False) -mne.viz.plot_evoked_topo(evoked_2_ph, color='r', axes=axes, legend=False) +mne.viz.plot_evoked_topo(evoked_event_1_ph, color='b', axes=axes, legend=False) +mne.viz.plot_evoked_topo(evoked_event_2_ph, color='r', axes=axes, legend=False) # Tidy the legend leg_lines = [line for line in axes.lines if line.get_c() == 'b'][:1] From 75fb2d27314c678404c47cc5b6b78cbf547eef13 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Mon, 15 Jun 2020 16:39:15 -0600 Subject: [PATCH 111/167] plot_80 and boxy.py should now hopefully conform to pep8 --- mne/io/boxy/boxy.py | 466 +++++++++--------- .../preprocessing/plot_80_boxy_processing.py | 159 +++--- 2 files changed, 316 insertions(+), 309 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 66773813013..c1c2066d929 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -13,7 +13,6 @@ from ...transforms import apply_trans, get_ras_to_neuromag_trans from ...utils import logger, verbose, fill_doc from ...channels.montage import make_dig_montage -from ...annotations import Annotations @fill_doc @@ -58,7 +57,7 @@ class RawBOXY(BaseRaw): def __init__(self, fname, datatype='AC', preload=False, verbose=None): logger.info('Loading %s' % fname) - # Check if required files exist and store names for later use + # Check if required files exist and store names for later use. files = dict() keys = ('mtg', 'elp', '*.[000-999]*') print(fname) @@ -72,31 +71,24 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): (key, len(files[key]),)) files[key] = files[key][0] - # determine which data type to return### + # Determine which data type to return. if datatype in ['AC', 'DC', 'Ph']: data_types = [datatype] else: raise RuntimeError('Expect AC, DC, or Ph, got %s' % datatype) - # determine how many blocks we have per montage + # Determine how many blocks we have per montage. blk_names = [] mtg_names = [] - mtgs = re.findall('\w\.\d+', str(files['*.[000-999]*'])) + mtgs = re.findall(r'\w\.\d+', str(files['*.[000-999]*'])) [mtg_names.append(i_mtg[0]) for i_mtg in mtgs if i_mtg[0] not in mtg_names] for i_mtg in mtg_names: temp = [] [temp.append(ii_mtg[2:]) for ii_mtg in mtgs if ii_mtg[0] == i_mtg] blk_names.append(temp) - - # Read header file - # Parse required header fields - # this keeps track of the line we're on - # mostly to know the start and stop of data (probably an easier way) - # load and read data to get some meta information - # there is alot of information at the beginning of a file - # but this only grabs some of it + # Read header file and grab some info. detect_num = [] source_num = [] aux_num = [] @@ -109,7 +101,7 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): with open(i_file, 'r') as data: for line_num, i_line in enumerate(data, 1): if '#DATA ENDS' in i_line: - #data ends just before this + # Data ends just before this. end_line.append(line_num - 1) break if 'Detector Channels' in i_line: @@ -125,13 +117,12 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): elif 'Updata Rate (Hz)' in i_line: srate.append(float(i_line.rsplit(' ')[0])) elif '#DATA BEGINS' in i_line: - #data starts a couple lines later + # Data should start a couple lines later. start_line.append(line_num + 2) elif 'exmux' in i_line: filetype[file_num] = 'non-parsed' - # Extract source-detectors - # set up some variables + # Extract source-detectors. chan_num_1 = [] chan_num_2 = [] source_label = [] @@ -139,13 +130,14 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): chan_wavelength = [] chan_modulation = [] - # load and read each line of the .mtg file + # Load and read each line of the .mtg file. with open(files['mtg'], 'r') as data: for line_num, i_line in enumerate(data, 1): if line_num == 2: mtg_chan_num = [int(num) for num in i_line.split()] elif line_num > 2: - chan1, chan2, source, detector, wavelength, modulation = i_line.split() + (chan1, chan2, source, detector, + wavelength, modulation) = i_line.split() chan_num_1.append(chan1) chan_num_2.append(chan2) source_label.append(source) @@ -153,7 +145,7 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): chan_wavelength.append(wavelength) chan_modulation.append(modulation) - # Read information about probe/montage/optodes + # Read information about probe/montage/optodes. # A word on terminology used here: # Sources produce light # Detectors measure light @@ -161,24 +153,23 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): # Each source - detector pair produces a channel # Channels are defined as the midpoint between source and detector - # check if we are given .elp file + # Load and read .elp file. all_labels = [] all_coords = [] fiducial_coords = [] get_label = 0 get_coords = 0 - # load and read .elp file with open(files['elp'], 'r') as data: for i_line in data: - # first let's get our fiducial coordinates + # First let's get our fiducial coordinates. if '%F' in i_line: fiducial_coords.append(i_line.split()[1:]) - # check where sensor info starts + # Check where sensor info starts. if '//Sensor name' in i_line: get_label = 1 elif get_label == 1: - # grab the part after '%N' for the label + # Grab the part after '%N' for the label. label = i_line.split()[1] all_labels.append(label) get_label = 0 @@ -192,22 +183,21 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): for x in fiducial_coords[i_index]]) - # get coordinates for sources in .mtg file from .elp file + # Get coordinates from .elp file, for sources in .mtg file. source_coords = [] for i_chan in source_label: if i_chan in all_labels: chan_index = all_labels.index(i_chan) source_coords.append(all_coords[chan_index]) - # get coordinates for detectors in .mtg file from .elp file + # get coordinates from .elp file, for detectors in .mtg file. detect_coords = [] for i_chan in detect_label: if i_chan in all_labels: chan_index = all_labels.index(i_chan) detect_coords.append(all_coords[chan_index]) - # Generate meaningful channel names for each montage - # get our unique labels for sources and detectors for each montage + # Generate meaningful channel names for each montage. unique_source_labels = [] unique_detect_labels = [] for mtg_num, i_mtg in enumerate(mtg_chan_num, 0): @@ -220,20 +210,15 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): for label in detect_label[start:end] if label not in unique_detect_labels] - # swap order to have lower wavelength first + # Swap order to have lower wavelength first. for i_chan in range(0, len(chan_wavelength), 2): chan_wavelength[i_chan], chan_wavelength[i_chan + 1] = ( chan_wavelength[i_chan + 1], chan_wavelength[i_chan]) - # now let's label each channel in our data - # data is channels X timepoint where the first source_num rows - # correspond to the first detector, and each row within that - # group is a different source should note that - # current .mtg files contain channels for multiple - # data files going to move to have a single .mtg file - # per participant, condition, and montage - # combine coordinates and label our channels - # will label them based on ac, dc, and ph data + # Label each channel in our data. + # Data is organised by channels x timepoint, where the first + # 'source_num' rows correspond to the first detector, the next + # 'source_num' rows correspond to the second detector, and so on. boxy_coords = [] boxy_labels = [] mrk_coords = [] @@ -247,49 +232,45 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): for mtg_num, i_mtg in enumerate(mtg_chan_num, 0): start = int(np.sum(mtg_chan_num[:mtg_num])) end = int(np.sum(mtg_chan_num[:mtg_num + 1])) - # we will also organise some data for each montage + # Organise some data for each montage. start_blk = int(np.sum(blk_num[:mtg_num])) - # get stop and stop lines for each montage + # Get stop and stop lines for each montage. mtg_start.append(start_line[start_blk]) mtg_end.append(end_line[start_blk]) - # get source and detector numbers for each montage + # Get source and detector numbers for each montage. mtg_src_num.append(source_num[start_blk]) mtg_det_num.append(detect_num[start_blk]) - # get modulation frequency for each channel and montage - # assuming modulation freq in MHz - mtg_mdf.append([int(chan_mdf)*1e6 for chan_mdf in chan_modulation[start:end]]) + # Get modulation frequency for each channel and montage. + # Assuming modulation freq in MHz. + mtg_mdf.append([int(chan_mdf)*1e6 for chan_mdf + in chan_modulation[start:end]]) for i_type in data_types: for i_coord in range(start, end): - boxy_coords.append(np.mean( - np.vstack((source_coords[i_coord], - detect_coords[i_coord])), - axis=0).tolist() + source_coords[i_coord] + - detect_coords[i_coord] + - [chan_wavelength[i_coord]] + - [0] + [0]) - boxy_labels.append('S' + str( - unique_source_labels.index( - source_label[i_coord]) + 1) + '_D' + - str(unique_detect_labels.index( - detect_label[i_coord]) + 1) + - ' ' + chan_wavelength[i_coord]) - - # add extra column for triggers - mrk_labels.append('Markers' + ' ' + - mtg_names[mtg_num]) + boxy_coords.append( + np.mean(np.vstack((source_coords[i_coord], + detect_coords[i_coord])), + axis=0).tolist() + source_coords[i_coord] + + detect_coords[i_coord] + [chan_wavelength[i_coord]] + + [0] + [0]) + boxy_labels.append('S' + str(unique_source_labels.index( + source_label[i_coord]) + 1) + '_D' + + str(unique_detect_labels.index(detect_label[i_coord]) + + 1) + ' ' + chan_wavelength[i_coord]) + + # Add extra column for triggers. + mrk_labels.append('Markers' + ' ' + mtg_names[mtg_num]) mrk_coords.append(np.zeros((12,))) - - # add triggers to the end of our data + + # Add triggers to the end of our data. boxy_labels.extend(mrk_labels) boxy_coords.extend(mrk_coords) - # convert to floats + # Convert to floats. boxy_coords = np.array(boxy_coords, float) all_coords = np.array(all_coords, float) - # make our montage - # montage only wants channel coords, so need to grab those, convert to - # array, then make a dict with labels + # Montage only wants channel coords, so need to grab those, + # convert to array, then make a dict with labels. all_chan_dict = dict(zip(all_labels, all_coords)) my_dig_montage = make_dig_montage(ch_pos=all_chan_dict, @@ -298,7 +279,7 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): lpa=fiducial_coords[1], rpa=fiducial_coords[2]) - # create info structure + # Create info structure. if datatype == 'Ph': chan_type = 'fnirs_ph' else: @@ -308,36 +289,34 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): for i_chan, _ in enumerate(boxy_labels)]) info = create_info(boxy_labels, srate[0], ch_types=ch_types) - # add dig info - # this also applies a transform to the data into neuromag space - # based on fiducials + # Add dig to info. info.set_montage(my_dig_montage) - # Store channel, source, and detector locations + # Store channel, source, and detector locations. # The channel location is stored in the first 3 entries of loc. # The source location is stored in the second 3 entries of loc. # The detector location is stored in the third 3 entries of loc. # Also encode the light frequency in the structure. - # place our coordinates and wavelengths for each channel # These are all in actual 3d individual coordinates, - # so let's transform them to the Neuromag head coordinate frame + # so let's transform them to the Neuromag head coordinate frame. native_head_t = get_ras_to_neuromag_trans(fiducial_coords[0], fiducial_coords[1], fiducial_coords[2]) for i_chan in range(len(boxy_labels)): if i_chan < np.sum(mtg_chan_num): - temp_ch_src_det = apply_trans(native_head_t, - boxy_coords[i_chan][:9].reshape(3, 3) - ).ravel() + temp_ch_src_det = apply_trans( + native_head_t, + boxy_coords[i_chan][:9].reshape(3, 3)).ravel() else: - temp_ch_src_det = np.zeros(9,)#don't want to transform markers - # add wavelength and placeholders + # Don't want to transform markers. + temp_ch_src_det = np.zeros(9,) + # Add wavelength and placeholders. temp_other = np.asarray(boxy_coords[i_chan][9:], dtype=np.float64) info['chs'][i_chan]['loc'] = np.concatenate((temp_ch_src_det, temp_other), axis=0) - + raw_extras = {'source_num': source_num, 'detect_num': detect_num, 'start_line': start_line, @@ -349,26 +328,22 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): 'data_types': data_types, 'mtg_mdf': mtg_mdf, } - - ###check to make sure data is the same length for each file - ###boxy can be set to only record so many sample points per recording - ###so start and stop lines may differ between files for a given - ###participant/experiment, but amount of data should be the same - ###check start lines - (print('Start lines the same!') if len(set(start_line)) == 1 else + + # Check data start lines. + (print('Start lines the same!') if len(set(start_line)) == 1 else print('Start lines different!')) - - ###check end lines - (print('End lines the same!') if len(set(end_line)) == 1 else + + # Check data end lines. + (print('End lines the same!') if len(set(end_line)) == 1 else print('End lines different!')) - - ###now make sure data lengths are the same - data_length = ([end_line[i_line] - start_line[i_line] for i_line, + + # Make sure data lengths are the same. + data_length = ([end_line[i_line] - start_line[i_line] for i_line, line_num in enumerate(start_line)]) - - (print('Data sizes are the same!') if len(set(data_length)) == 1 else + + (print('Data sizes are the same!') if len(set(data_length)) == 1 else print('Data sizes are different!')) - + print('Start Line: ', start_line[0]) print('End Line: ', end_line[0]) print('Original Difference: ', end_line[0] - start_line[0]) @@ -376,14 +351,13 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): print('New first_samps: ', first_samps) diff = end_line[0] - (start_line[0]) - # input file has rows for each source, - # output variable rearranges as columns and does not + # Number if rows in data file depends on data file type. if filetype[0] == 'non-parsed': last_samps = ((diff*len(blk_names[0])) // (source_num[0])) elif filetype[0] == 'parsed': last_samps = diff*len(blk_names[0]) - # first sample is technically sample 0, not the start line in the file + # First sample is technically sample 0, not the start line in the file. first_samps = 0 print('New last_samps: ', last_samps) @@ -408,10 +382,10 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): mtg_mdf = self._raw_extras[fi]['mtg_mdf'] boxy_files = self._raw_extras[fi]['files']['*.[000-999]*'] event_fname = os.path.join(self._filenames[fi], 'evt') - - # Check if event files are available - # mostly for older boxy files since we'll be using the digaux channel - # for markers in further recordings + + # Check if event files are available. + # Mostly for older boxy files since we'll be using the digaux channel + # for markers in further recordings. try: event_files = dict() key = ('*.[000-999]*') @@ -419,23 +393,25 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): event_files[key] = [glob.glob('%s/*%s' % (event_fname, key))] event_files[key] = event_files[key][0] event_data = [] - + for file_num, i_file in enumerate(event_files[key]): event_data.append(scipy.io.loadmat( event_files[key][file_num])['event']) - if event_data != []: print('Event file found!') - else: print('No event file found. Using digaux!') - - except: + if event_data != []: + print('Event file found!') + else: + print('No event file found. Using digaux!') + + except Exception: print('No event file found. Using digaux!') pass - # detectors, sources, and data types + # Possible detector names. detectors = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'] - # load our data + # Load our optical data. all_data = [] all_markers = [] for i_mtg, mtg_name in enumerate(montages): @@ -447,212 +423,226 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): boxy_data = [] with open(boxy_file, 'r') as data_file: for line_num, i_line in enumerate(data_file, 1): - if line_num == (start_line[i_blk] - 1):# grab column names - col_names = np.asarray(re.findall('\w+\-\w+|\w+\-\d+|\w+', - i_line.rsplit(' ')[0])) - if line_num > start_line[file_num] and line_num <= end_line[file_num]: + if line_num == (start_line[i_blk] - 1): + # Grab column names. + col_names = np.asarray( + re.findall(r'\w+\-\w+|\w+\-\d+|\w+', + i_line.rsplit(' ')[0])) + if (line_num > start_line[file_num] and + line_num <= end_line[file_num]): boxy_data.append(i_line.rsplit(' ')) - + sources = np.arange(1, source_num[file_num] + 1, 1) - - # grab the individual data points for each column - boxy_data = [re.findall('[-+]?\d*\.?\d+', i_row[0]) + + # Grab the individual data points for each column. + boxy_data = [re.findall(r'[-+]?\d*\.?\d+', i_row[0]) for i_row in boxy_data] - - # make variable to store our data as an array - # rather than list of strings + + # Make variable to store our data as an array + # rather than list of strings. boxy_length = len(col_names) boxy_array = np.full((len(boxy_data), boxy_length), np.nan) for ii, i_data in enumerate(boxy_data): - # need to make sure our rows are the same length - # this is done by padding the shorter ones + # Need to make sure our rows are the same length. + # This is done by padding the shorter ones. padding = boxy_length - len(i_data) boxy_array[ii] = np.pad(np.asarray(i_data, dtype=float), (0, padding), mode='empty') - - # grab data from the other columns - # that don't pertain to AC, DC, or Ph + + # Grab data from the other columns that aren't AC, DC, or Ph. meta_data = dict() keys = ['time', 'record', 'group', 'exmux', 'step', 'mark', 'flag', 'aux1', 'digaux'] for i_detect in detectors[0:detect_num[file_num]]: keys.append('bias-' + i_detect) - - # data that isn't in our boxy file will be an empty list + + # Data that isn't in our boxy file will be an empty list. for key in keys: meta_data[key] = (boxy_array[:, np.where(col_names == key)[0][0]] if key in col_names else []) - - # make some empty variables to store our data + + # Make some empty variables to store our data. if filetype[file_num] == 'non-parsed': - data_ = np.zeros(((((detect_num[file_num] * - source_num[file_num]) * len(data_types))), - int(len(boxy_data) / source_num[file_num]))) + data_ = np.zeros(((((detect_num[file_num] + * source_num[file_num]) + * len(data_types))), + int(len(boxy_data) + / source_num[file_num]))) elif filetype[file_num] == 'parsed': - data_ = np.zeros(((((detect_num[file_num] * - source_num[file_num]) * len(data_types))), - int(len(boxy_data)))) - - # loop through data types + data_ = np.zeros(((((detect_num[file_num] + * source_num[file_num]) + * len(data_types))), + int(len(boxy_data)))) + + # Loop through data types. for i_data in data_types: - - # loop through detectors + + # Loop through detectors. for i_detect in detectors[0:detect_num[file_num]]: - - # loop through sources + + # Loop through sources. for i_source in sources: - - # determine where to store our data - index_loc = (detectors.index(i_detect) * - source_num[file_num] + - (i_source - 1) + - (data_types.index(i_data) * - (source_num[file_num] * - detect_num[file_num]))) - - # need to treat our filetypes differently + + # Determine where to store our data. + index_loc = (detectors.index(i_detect) + * source_num[file_num] + + (i_source - 1) + + (data_types.index(i_data) + * (source_num[file_num] + * detect_num[file_num]))) + + # Need to treat our filetypes differently. if filetype[file_num] == 'non-parsed': - - # non-parsed saves timepoints in groups - # this should account for that - time_points = np.arange(i_source - 1, - int( - meta_data['record'][-1] - ) * source_num[file_num], - source_num[file_num]) - - # determine which channel to look for in boxy_array - channel = np.where(col_names == i_detect + - '-' + i_data)[0][0] - - # save our data based on data type + + # Non-parsed saves timepoints in groups and + # this should account for that. + time_points = np.arange( + i_source - 1, + int(meta_data['record'][-1]) + * source_num[file_num], + source_num[file_num]) + + # Determine which channel to + # look for in boxy_array. + channel = np.where(col_names == i_detect + + '-' + i_data)[0][0] + + # Save our data based on data type. data_[index_loc, :] = boxy_array[time_points, channel] - + elif filetype[file_num] == 'parsed': - - # determine which channel to look for in boxy_array - channel = np.where(col_names == i_detect + '-' + - i_data + str(i_source))[0][0] - - # save our data based on data type + + # Which channel to look for in boxy_array. + channel = np.where(col_names == i_detect + + '-' + i_data + + str(i_source))[0][0] + + # Save our data based on data type. data_[index_loc, :] = boxy_array[:, channel] - - ###phase unwrapping### + + # Phase unwrapping. if i_data == 'Ph': print('Fixing phase wrap') - # accounts for sharp, sudden changes in phase - # such as crossing over from 0/360 degrees - # estimate mean phase of first 50 points - # if a point differs more than 90 degrees from the mean - # add or subtract 360 degress from that point + # Accounts for sharp, sudden changes in phase + # such as crossing over from 0/360 degrees. + # Estimate mean phase of first 50 points. + # If a point differs more than 90 degrees from the + # mean, add or subtract 360 degress from that point. for i_chan in range(np.size(data_, axis=0)): - if np.mean(data_[i_chan,:50]) < 180: + if np.mean(data_[i_chan, :50]) < 180: wrapped_points = data_[i_chan, :] > 270 data_[i_chan, wrapped_points] -= 360 else: - wrapped_points = data_[i_chan,:] < 90 + wrapped_points = data_[i_chan, :] < 90 data_[i_chan, wrapped_points] += 360 - + print('Detrending phase data') - # remove trends and drifts in data that occur over time - + # Remove trends and drifts that occur over time. y = np.linspace(0, np.size(data_, axis=1)-1, np.size(data_, axis=1)) x = np.transpose(y) for i_chan in range(np.size(data_, axis=0)): - poly_coeffs = np.polyfit(x,data_[i_chan, :] ,3) - tmp_ph = data_[i_chan, :] - np.polyval(poly_coeffs,x) + poly_coeffs = np.polyfit(x, data_[i_chan, :], 3) + tmp_ph = (data_[i_chan, :] + - np.polyval(poly_coeffs, x)) data_[i_chan, :] = tmp_ph - + print('Removing phase mean') - # subtract mean to better detect outliers using SD - - mrph = np.mean(data_,axis=1); + # Subtract mean to better detect outliers using SD. + + mrph = np.mean(data_, axis=1) for i_chan in range(np.size(data_, axis=0)): - data_[i_chan,:]=(data_[i_chan,:]-mrph[i_chan]) - + data_[i_chan, :] = (data_[i_chan, :] + - mrph[i_chan]) + print('Removing phase outliers') - # remove data points that are larger than three SDs - - ph_out_thr=3; - sdph=np.std(data_,1, ddof = 1); #set ddof to 1 to mimic matlab - n_ph_out = np.zeros(np.size(data_, axis=0), dtype= np.int8) - + # Remove data points that are larger than three SDs. + ph_out_thr = 3 + + # Set ddof to 1 to mimic matlab. + sdph = np.std(data_, 1, ddof=1) + n_ph_out = np.zeros(np.size(data_, axis=0), + dtype=np.int8) + for i_chan in range(np.size(data_, axis=0)): - outliers = np.where(np.abs(data_[i_chan,:]) > - (ph_out_thr*sdph[i_chan])) + outliers = np.where(np.abs(data_[i_chan, :]) > + (ph_out_thr*sdph[i_chan])) outliers = outliers[0] if len(outliers) > 0: if outliers[0] == 0: outliers = outliers[1:] if len(outliers) > 0: - if outliers[-1] == np.size(data_, axis=1) - 1: + if (outliers[-1] == np.size(data_, + axis=1) - 1): outliers = outliers[:-1] n_ph_out[i_chan] = int(len(outliers)) for i_pt in range(n_ph_out[i_chan]): j_pt = outliers[i_pt] - data_[i_chan,j_pt] = ( - (data_[i_chan,j_pt-1] + - data_[i_chan,j_pt+1])/2) - - #convert phase to pico seconds + data_[i_chan, j_pt] = ( + (data_[i_chan, j_pt-1] + + data_[i_chan, j_pt+1])/2) + + # Convert phase to pico seconds. for i_chan in range(np.size(data_, axis=0)): - data_[i_chan,:] = ((1e12*data_[i_chan,:])/ - (360*mtg_mdf[i_mtg][i_chan])) - - # swap channels to match new wavelength order + data_[i_chan, :] = ((1e12*data_[i_chan, :]) + / (360*mtg_mdf[i_mtg][i_chan])) + + # Swap channels to match new wavelength order. for i_chan in range(0, len(data_), 2): data_[[i_chan, i_chan + 1]] = data_[[i_chan + 1, i_chan]] - - # If there was an event file, place those events in our data - # If no, use digaux for our events + + # If there was an event file, place those events in our data. + # If no, use digaux for our events. try: temp_markers = np.zeros((len(data_[0, :]),)) - for event_num, event_info in enumerate(event_data[file_num]): + for event_num, event_info in enumerate( + event_data[file_num]): temp_markers[event_info[0]-1] = event_info[1] block_markers.append(temp_markers) - except: - # add our markers to the data array based on filetype### + except Exception: + # Add our markers to the data array based on filetype. if type(meta_data['digaux']) is not list: if filetype[file_num] == 'non-parsed': - block_markers.append(meta_data['digaux'][np.arange(0, - len(meta_data['digaux']), + block_markers.append( + meta_data['digaux'] + [np.arange(0, len(meta_data['digaux']), source_num[file_num])]) elif filetype[file_num] == 'parsed': block_markers.append(meta_data['digaux']) else: block_markers.append(np.zeros((len(data_[0, :]),))) - - ###check our markers to see if anything is actually in there### - if (all(i_mrk == 0 for i_mrk in block_markers[i_blk]) or - all(i_mrk == 255 for i_mrk in block_markers[i_blk])): - print('No markers for montage ' + mtg_name + - ' and block ' + blk_name) + + # Check our markers to see if anything is actually in there. + if (all(i_mrk == 0 for i_mrk in block_markers[i_blk]) or + all(i_mrk == 255 for i_mrk in block_markers[i_blk])): + print('No markers for montage ' + mtg_name + + ' and block ' + blk_name) else: - print('Found markers for montage ' + mtg_name + - ' and block ' + blk_name + '!') - - #change marker for last timepoint to indicate end of block - #we'll be using digaux to send markers, which is a serial port - #so we can send values between 1-255 - #we'll multiply our block start/end markers by 1000 to ensure - #we aren't within the 1-255 range + print('Found markers for montage ' + mtg_name + + ' and block ' + blk_name + '!') + + # Change marker for last timepoint to indicate end of block + # We'll be using digaux to send markers, a serial port, + # so we can send values between 1-255. + # We'll multiply our block start/end markers by 1000 to ensure + # we aren't within the 1-255 range. block_markers[i_blk][-1] = int(blk_name) * 1000 - + all_blocks.append(data_) - + all_data.extend(np.hstack(all_blocks)) all_markers.append(np.hstack(block_markers)) - # add markers to our data + # Add markers to our data. all_data.extend(all_markers) all_data = np.asarray(all_data) print('Blank Data shape: ', data.shape) print('Input Data shape: ', all_data.shape) - # place our data into the data object in place + + # Place our data into the data object in place. data[:] = all_data return data diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 4f251e022b0..193f6bfb7ec 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -1,18 +1,18 @@ """ .. _tut-fnirs-processing: -Preprocessing functional near-infrared spectroscopy (fNIRS) data +Preprocessing optical imaging data from the Imagent hardware/boxy software ================================================================ -This tutorial covers how to convert functional near-infrared spectroscopy -(fNIRS) data from raw measurements to relative oxyhaemoglobin (HbO) and -deoxyhaemoglobin (HbR) concentration. +This tutorial covers how to convert optical imaging data from raw measurements +to relative oxyhaemoglobin (HbO) and deoxyhaemoglobin (HbR) concentration. +Phase data from the recording is also processed and plotted in several ways. -.. contents:: Page contents - :local: - :depth: 2 + .. contents:: Page contents + :local: + :depth: 2 -Here we will work with the :ref:`fNIRS motor data `. + Here we will work with the :ref:`fNIRS motor data `. """ # sphinx_gallery_thumbnail_number = 1 @@ -24,11 +24,14 @@ import mne -# load AC and Phase data +# get our data boxy_data_folder = mne.datasets.boxy_example.data_path() boxy_raw_dir = os.path.join(boxy_data_folder, 'Participant-1') + +# load AC and Phase data raw_intensity_ac = mne.io.read_raw_boxy(boxy_raw_dir, 'AC', verbose=True).load_data() + raw_intensity_ph = mne.io.read_raw_boxy(boxy_raw_dir, 'Ph', verbose=True).load_data() @@ -36,22 +39,33 @@ mtg_a = [raw_intensity_ac.ch_names[i_index] for i_index, i_label in enumerate(raw_intensity_ac.info['ch_names']) if re.search(r'S[1-5]_', i_label)] + mtg_b = [raw_intensity_ac.ch_names[i_index] for i_index, i_label in enumerate(raw_intensity_ac.info['ch_names']) if re.search(r'S([6-9]|10)_', i_label)] +# plot the raw data for each data type +# AC +scalings = dict(fnirs_raw=1e2) +raw_intensity_ac.plot(n_channels=5, duration=20, scalings=scalings, + show_scrollbars=True) + +# Phase +scalings = dict(fnirs_ph=1e4) +raw_intensity_ph.plot(n_channels=5, duration=20, scalings=scalings, + show_scrollbars=True) + # ############################################################################### # # View location of sensors over brain surface # # ------------------------------------------- # # # # Here we validate that the location of sources-detector pairs and channels -# # are in the expected locations. Source-detector pairs are shown as lines -# # between the optodes, channels (the mid point of source-detector pairs) are -# # shown as dots. +# # are in the expected locations. Sources are bright red dots, detectors are +# # dark red dots, with source-detector pairs connected by white lines. subjects_dir = os.path.dirname(mne.datasets.fetch_fsaverage()) -# plot all montages +# plot both montages together fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') fig = mne.viz.plot_alignment(raw_intensity_ac.info, show_axes=True, @@ -65,7 +79,7 @@ fig=fig) mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) -# montage A +# plot montage A only fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') fig = mne.viz.plot_alignment(raw_intensity_ac.copy().pick_channels(mtg_a).info, show_axes=True, @@ -79,7 +93,7 @@ fig=fig) mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) -# montage B +# plot montage B only fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') fig = mne.viz.plot_alignment(raw_intensity_ac.copy().pick_channels(mtg_b).info, show_axes=True, @@ -98,7 +112,7 @@ # # ------------------------------------------------------------- # # # # First we remove channels that are too close together (short channels) to -# # detect a neural response (less than 1 cm distance between optodes). +# # detect a neural response (less than 3 cm distance between optodes). # # These short channels can be seen in the figure above. # # To achieve this we pick all the channels that are not considered to be short. @@ -107,24 +121,16 @@ dists = mne.preprocessing.nirs.source_detector_distances( raw_intensity_ac.info, picks=picks) -raw_intensity_ac.pick(picks[dists < 0.08]) +raw_intensity_ac.pick(picks[dists < 0.03]) -# AC -scalings = dict(fnirs_raw=1e2) -raw_intensity_ac.plot(n_channels=5, duration=20, scalings=scalings, - show_scrollbars=True) - -# Phase -scalings = dict(fnirs_ph=1e4) -raw_intensity_ph.plot(n_channels=5, duration=20, scalings=scalings, - show_scrollbars=True) # ############################################################################### # # Converting from raw intensity to optical density # # ------------------------------------------------ # # # # The raw intensity values are then converted to optical density. -# # We will only do this for either DC or AC data, since they are intensity data +# # We will only do this for either DC or AC data since they are measures of +# # light intensity. raw_od = mne.preprocessing.nirs.optical_density(raw_intensity_ac) @@ -190,8 +196,10 @@ fig = raw_haemo.plot_psd(average=True) fig.suptitle('Before filtering', weight='bold', size='x-large') fig.subplots_adjust(top=0.88) + raw_haemo = raw_haemo.filter(0.05, 0.7, h_trans_bandwidth=0.2, l_trans_bandwidth=0.02) + fig = raw_haemo.plot_psd(average=True) fig.suptitle('After filtering', weight='bold', size='x-large') fig.subplots_adjust(top=0.88) @@ -252,6 +260,9 @@ # # baseline correction, and extract the epochs. We visualise the log of which # # epochs were dropped. +# # We will make epochs from the ac-derived heamo data and the phase data +# # separately. + # reject_criteria = dict(hbo=80e-6) reject_criteria = None tmin, tmax = -0.2, 2 @@ -329,28 +340,26 @@ # # View consistency of responses across trials # # ------------------------------------------- # # -# # Now we can view the haemodynamic response for our tapping condition. -# # We visualise the response for both the oxy- and deoxyhaemoglobin, and -# # observe the expected peak in HbO at around 6 seconds consistently across -# # trials, and the consistent dip in HbR that is slightly delayed relative to -# # the HbO peak. +# # Now we can view the haemodynamic response for our different events. # haemo plots # Montage A -hbo = [i_index for i_index, i_label - in enumerate(mtg_a_haemo_epochs.info['ch_names']) - if re.search(r'S[1-5]_D[0-9] hbo', i_label)] +hbo_a = [i_index for i_index, i_label + in enumerate(mtg_a_haemo_epochs.info['ch_names']) + if re.search(r'S[1-5]_D[0-9] hbo', i_label)] -hbr = [i_index for i_index, i_label - in enumerate(mtg_a_haemo_epochs.info['ch_names']) - if re.search(r'S[1-5]_D[0-9] hbr', i_label)] +hbr_a = [i_index for i_index, i_label + in enumerate(mtg_a_haemo_epochs.info['ch_names']) + if re.search(r'S[1-5]_D[0-9] hbr', i_label)] mtg_a_haemo_epochs['Montage_A/Event_1'].plot_image( - combine='mean', vmin=-30, vmax=30, group_by={'Oxy': hbo, 'De-Oxy': hbr}, + combine='mean', vmin=-30, vmax=30, + group_by={'Mtg A, Event 1, Oxy': hbo_a, 'Mtg A, Event 1, De-Oxy': hbr_a}, ts_args=dict(ylim=dict(hbo=[-15, 15], hbr=[-15, 15]))) mtg_a_haemo_epochs['Montage_A/Event_2'].plot_image( - combine='mean', vmin=-30, vmax=30, group_by={'Oxy': hbo, 'De-Oxy': hbr}, + combine='mean', vmin=-30, vmax=30, + group_by={'Mtg A, Event 2, Oxy': hbo_a, 'Mtg A, Event 2, De-Oxy': hbr_a}, ts_args=dict(ylim=dict(hbo=[-15, 15], hbr=[-15, 15]))) # ph epochs @@ -364,20 +373,22 @@ # Montage B -hbo = [i_index for i_index, i_label - in enumerate(mtg_a_haemo_epochs.info['ch_names']) - if re.search(r'S([6-9]|10)_D([0-9]|1[0-6]) hbo', i_label)] +hbo_b = [i_index for i_index, i_label + in enumerate(mtg_a_haemo_epochs.info['ch_names']) + if re.search(r'S([6-9]|10)_D([0-9]|1[0-6]) hbo', i_label)] -hbr = [i_index for i_index, i_label - in enumerate(mtg_a_haemo_epochs.info['ch_names']) - if re.search(r'S([6-9]|10)_D([0-9]|1[0-6]) hbr', i_label)] +hbr_b = [i_index for i_index, i_label + in enumerate(mtg_a_haemo_epochs.info['ch_names']) + if re.search(r'S([6-9]|10)_D([0-9]|1[0-6]) hbr', i_label)] mtg_b_haemo_epochs['Montage_B/Event_1'].plot_image( - combine='mean', vmin=-30, vmax=30, group_by={'Oxy': hbo, 'De-Oxy': hbr}, + combine='mean', vmin=-30, vmax=30, + group_by={'Mtg B, Event 1, Oxy': hbo_b, 'Mtg B, Event 1, De-Oxy': hbr_b}, ts_args=dict(ylim=dict(hbo=[-15, 15], hbr=[-15, 15]))) mtg_b_haemo_epochs['Montage_B/Event_2'].plot_image( - combine='mean', vmin=-30, vmax=30, group_by={'Oxy': hbo, 'De-Oxy': hbr}, + combine='mean', vmin=-30, vmax=30, + group_by={'Mtg B, Event 2, Oxy': hbo_b, 'Mtg B, Event 2, De-Oxy': hbr_b}, ts_args=dict(ylim=dict(hbo=[-15, 15], hbr=[-15, 15]))) # ph epochs @@ -397,7 +408,7 @@ # # pairs that we selected. All the channels in this data are located over the # # motor cortex, and all channels show a similar pattern in the data. -# ac evoked +# haemo evoked fig, axes = plt.subplots(nrows=4, ncols=2, figsize=(15, 6)) clim = dict(hbo=[-10, 10], hbr=[-10, 10]) @@ -406,21 +417,21 @@ mtg_b_1_evoked_ac = mtg_b_haemo_epochs['Montage_B/Event_1'].average() mtg_b_2_evoked_ac = mtg_b_haemo_epochs['Montage_B/Event_2'].average() -mtg_a_1_evoked_ac.plot_image(axes=axes[0, 0], picks=hbo, +mtg_a_1_evoked_ac.plot_image(axes=axes[0, 0], picks=hbo_a, titles='HBO Montage A Event 1', clim=clim) -mtg_a_1_evoked_ac.plot_image(axes=axes[0, 1], picks=hbr, +mtg_a_1_evoked_ac.plot_image(axes=axes[0, 1], picks=hbr_a, titles='HBR Montage A Event 1', clim=clim) -mtg_a_2_evoked_ac.plot_image(axes=axes[1, 0], picks=hbo, +mtg_a_2_evoked_ac.plot_image(axes=axes[1, 0], picks=hbo_a, titles='HBO Montage A Event 2', clim=clim) -mtg_a_2_evoked_ac.plot_image(axes=axes[1, 1], picks=hbr, +mtg_a_2_evoked_ac.plot_image(axes=axes[1, 1], picks=hbr_a, titles='HBR Montage A Event 2', clim=clim) -mtg_b_1_evoked_ac.plot_image(axes=axes[2, 0], picks=hbo, +mtg_b_1_evoked_ac.plot_image(axes=axes[2, 0], picks=hbo_b, titles='HBO Montage B Event 1', clim=clim) -mtg_b_1_evoked_ac.plot_image(axes=axes[2, 1], picks=hbr, +mtg_b_1_evoked_ac.plot_image(axes=axes[2, 1], picks=hbr_b, titles='HBR Montage B Event 1', clim=clim) -mtg_b_2_evoked_ac.plot_image(axes=axes[3, 0], picks=hbo, +mtg_b_2_evoked_ac.plot_image(axes=axes[3, 0], picks=hbo_b, titles='HBO Montage B Event 2', clim=clim) -mtg_b_2_evoked_ac.plot_image(axes=axes[3, 1], picks=hbr, +mtg_b_2_evoked_ac.plot_image(axes=axes[3, 1], picks=hbr_b, titles='HBR Montage B Event 2', clim=clim) # Combine Montages @@ -432,6 +443,7 @@ in enumerate(mtg_b_1_evoked_ac.info['ch_names']) if re.search(r'S([6-9]|10)_', i_label)] +# zero channels that don't correspond to montage A/B mtg_a_1_evoked_ac._data[mtg_b_channels_ac, :] = 0 mtg_a_2_evoked_ac._data[mtg_b_channels_ac, :] = 0 mtg_b_1_evoked_ac._data[mtg_a_channels_ac, :] = 0 @@ -479,6 +491,7 @@ in enumerate(mtg_b_1_evoked_ph.info['ch_names']) if re.search(r'S([6-9]|10)_', i_label)] +# zero channels that don't correspond to montage A/B mtg_a_1_evoked_ph._data[mtg_b_channels_ph, :] = 0 mtg_a_2_evoked_ph._data[mtg_b_channels_ph, :] = 0 mtg_b_1_evoked_ph._data[mtg_a_channels_ph, :] = 0 @@ -496,14 +509,15 @@ evoked_event_2_ph.plot_image(axes=axes[1], titles='Event_2', clim=clim) # ############################################################################### -# # Plot standard fNIRS response image +# # Plot standard haemodynamic response image # # ---------------------------------- # # -# # Next we generate the most common visualisation of fNIRS data: plotting -# # both the HbO and HbR on the same figure to illustrate the relation between -# # the two signals. +# # Plot both the HbO and HbR on the same figure to illustrate the relation +# # between the two signals. -# ac +# # We can also plot a similat figure for phase data. + +# haemo evoked_dict_ac = {'Event_1': evoked_event_1_ac, 'Event_2': evoked_event_2_ac} color_dict = {'Event_1': 'r', 'Event_2': 'b'} @@ -523,7 +537,8 @@ # # View topographic representation of activity # # ------------------------------------------- # # -# # Next we view how the topographic activity changes throughout the response. +# # Next we view how the topographic activity changes throughout the +# # haemodynamic and phase response. # ac times = np.arange(0.0, 10.0, 2.0) @@ -536,15 +551,17 @@ times = np.arange(0.0, 2.0, 0.5) topomap_args = dict(extrapolate='local') -fig = evoked_event_1_ph.plot_joint(times=times, topomap_args=topomap_args) -fig = evoked_event_2_ph.plot_joint(times=times, topomap_args=topomap_args) +fig = evoked_event_1_ph.plot_joint(times=times, topomap_args=topomap_args, + title='Event 1 Phase') +fig = evoked_event_2_ph.plot_joint(times=times, topomap_args=topomap_args, + title='Event 2 Phase') # ############################################################################### -# # Compare tapping of left and right hands +# # Compare Events 1 and 2 # # --------------------------------------- # # -# # Finally we generate topo maps for the left and right conditions to view -# # the location of activity. First we visualise the HbO activity. +# # We generate topo maps for events 1 and 2 to view the location of activity. +# # First we visualise the HbO activity. # ac HBO fig, axes = plt.subplots(nrows=2, ncols=4, figsize=(9, 5), @@ -748,8 +765,8 @@ fig, axes = plt.subplots(nrows=1, ncols=4, figsize=(9, 5), gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1])) vmin, vmax, ts = -0.192, 0.992, 0.1 -vmin = -20 -vmax = 20 +vmin = -180 +vmax = 180 evoked_event_1_ph.plot_topomap(times=ts, axes=axes[0], vmin=vmin, vmax=vmax, colorbar=False, **topomap_args) @@ -805,4 +822,4 @@ # Tidy the legend leg_lines = [line for line in axes.lines if line.get_c() == 'b'][:1] leg_lines.append([line for line in axes.lines if line.get_c() == 'r'][0]) -fig.legend(leg_lines, ['Event 1', 'Event 2'], loc='lower right') +fig.legend(leg_lines, ['Phase Event 1', 'Phase Event 2'], loc='lower right') From d68528c15cfad278d2fb66a629c898c626870d65 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Tue, 16 Jun 2020 01:09:27 -0700 Subject: [PATCH 112/167] fixed flake8 on boxy.py --- mne/io/boxy/boxy.py | 106 +++++++++++++++++++++++--------------------- 1 file changed, 56 insertions(+), 50 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index c1c2066d929..2ea42cc87e9 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -242,20 +242,20 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): mtg_det_num.append(detect_num[start_blk]) # Get modulation frequency for each channel and montage. # Assuming modulation freq in MHz. - mtg_mdf.append([int(chan_mdf)*1e6 for chan_mdf + mtg_mdf.append([int(chan_mdf) * 1e6 for chan_mdf in chan_modulation[start:end]]) for i_type in data_types: for i_coord in range(start, end): boxy_coords.append( np.mean(np.vstack((source_coords[i_coord], detect_coords[i_coord])), - axis=0).tolist() + source_coords[i_coord] - + detect_coords[i_coord] + [chan_wavelength[i_coord]] - + [0] + [0]) + axis=0).tolist() + source_coords[i_coord] + + detect_coords[i_coord] + [chan_wavelength[i_coord]] + + [0] + [0]) boxy_labels.append('S' + str(unique_source_labels.index( - source_label[i_coord]) + 1) + '_D' - + str(unique_detect_labels.index(detect_label[i_coord]) - + 1) + ' ' + chan_wavelength[i_coord]) + source_label[i_coord]) + 1) + '_D' + + str(unique_detect_labels.index(detect_label[i_coord]) + + 1) + ' ' + chan_wavelength[i_coord]) # Add extra column for triggers. mrk_labels.append('Markers' + ' ' + mtg_names[mtg_num]) @@ -330,19 +330,25 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): } # Check data start lines. - (print('Start lines the same!') if len(set(start_line)) == 1 else - print('Start lines different!')) + if len(set(start_line)) == 1: + print('Start lines the same!') + else: + print('Start lines different!') # Check data end lines. - (print('End lines the same!') if len(set(end_line)) == 1 else - print('End lines different!')) + if len(set(end_line)) == 1: + print('End lines the same!') + else: + print('End lines different!') # Make sure data lengths are the same. data_length = ([end_line[i_line] - start_line[i_line] for i_line, line_num in enumerate(start_line)]) - (print('Data sizes are the same!') if len(set(data_length)) == 1 else - print('Data sizes are different!')) + if len(set(data_length)) == 1: + print('Data sizes are the same!') + else: + print('Data sizes are different!') print('Start Line: ', start_line[0]) print('End Line: ', end_line[0]) @@ -353,9 +359,9 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): # Number if rows in data file depends on data file type. if filetype[0] == 'non-parsed': - last_samps = ((diff*len(blk_names[0])) // (source_num[0])) + last_samps = ((diff * len(blk_names[0])) // (source_num[0])) elif filetype[0] == 'parsed': - last_samps = diff*len(blk_names[0]) + last_samps = diff * len(blk_names[0]) # First sample is technically sample 0, not the start line in the file. first_samps = 0 @@ -365,7 +371,7 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): super(RawBOXY, self).__init__( info, preload, filenames=[fname], first_samps=[first_samps], - last_samps=[last_samps-1], + last_samps=[last_samps - 1], raw_extras=[raw_extras], verbose=verbose) def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): @@ -418,7 +424,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): all_blocks = [] block_markers = [] for i_blk, blk_name in enumerate(blocks[i_mtg]): - file_num = i_blk + (i_mtg*len(blocks[i_mtg])) + file_num = i_blk + (i_mtg * len(blocks[i_mtg])) boxy_file = boxy_files[file_num] boxy_data = [] with open(boxy_file, 'r') as data_file: @@ -464,15 +470,15 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): # Make some empty variables to store our data. if filetype[file_num] == 'non-parsed': - data_ = np.zeros(((((detect_num[file_num] - * source_num[file_num]) - * len(data_types))), - int(len(boxy_data) - / source_num[file_num]))) + data_ = np.zeros(((((detect_num[file_num] * + source_num[file_num]) * + len(data_types))), + int(len(boxy_data) / + source_num[file_num]))) elif filetype[file_num] == 'parsed': - data_ = np.zeros(((((detect_num[file_num] - * source_num[file_num]) - * len(data_types))), + data_ = np.zeros(((((detect_num[file_num] * + source_num[file_num]) * + len(data_types))), int(len(boxy_data)))) # Loop through data types. @@ -485,12 +491,12 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): for i_source in sources: # Determine where to store our data. - index_loc = (detectors.index(i_detect) - * source_num[file_num] - + (i_source - 1) - + (data_types.index(i_data) - * (source_num[file_num] - * detect_num[file_num]))) + index_loc = (detectors.index(i_detect) * + source_num[file_num] + + (i_source - 1) + + (data_types.index(i_data) * + (source_num[file_num] * + detect_num[file_num]))) # Need to treat our filetypes differently. if filetype[file_num] == 'non-parsed': @@ -499,14 +505,14 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): # this should account for that. time_points = np.arange( i_source - 1, - int(meta_data['record'][-1]) - * source_num[file_num], + int(meta_data['record'][-1]) * + source_num[file_num], source_num[file_num]) # Determine which channel to # look for in boxy_array. - channel = np.where(col_names == i_detect - + '-' + i_data)[0][0] + channel = np.where(col_names == i_detect + + '-' + i_data)[0][0] # Save our data based on data type. data_[index_loc, :] = boxy_array[time_points, @@ -515,9 +521,9 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): elif filetype[file_num] == 'parsed': # Which channel to look for in boxy_array. - channel = np.where(col_names == i_detect - + '-' + i_data - + str(i_source))[0][0] + channel = np.where(col_names == i_detect + + '-' + i_data + + str(i_source))[0][0] # Save our data based on data type. data_[index_loc, :] = boxy_array[:, channel] @@ -545,8 +551,8 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): x = np.transpose(y) for i_chan in range(np.size(data_, axis=0)): poly_coeffs = np.polyfit(x, data_[i_chan, :], 3) - tmp_ph = (data_[i_chan, :] - - np.polyval(poly_coeffs, x)) + tmp_ph = (data_[i_chan, :] - + np.polyval(poly_coeffs, x)) data_[i_chan, :] = tmp_ph print('Removing phase mean') @@ -554,8 +560,8 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): mrph = np.mean(data_, axis=1) for i_chan in range(np.size(data_, axis=0)): - data_[i_chan, :] = (data_[i_chan, :] - - mrph[i_chan]) + data_[i_chan, :] = (data_[i_chan, :] - + mrph[i_chan]) print('Removing phase outliers') # Remove data points that are larger than three SDs. @@ -581,13 +587,13 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): for i_pt in range(n_ph_out[i_chan]): j_pt = outliers[i_pt] data_[i_chan, j_pt] = ( - (data_[i_chan, j_pt-1] - + data_[i_chan, j_pt+1])/2) + (data_[i_chan, j_pt - 1] + + data_[i_chan, j_pt + 1]) / 2) # Convert phase to pico seconds. for i_chan in range(np.size(data_, axis=0)): - data_[i_chan, :] = ((1e12*data_[i_chan, :]) - / (360*mtg_mdf[i_mtg][i_chan])) + data_[i_chan, :] = ((1e12 * data_[i_chan, :]) / + (360 * mtg_mdf[i_mtg][i_chan])) # Swap channels to match new wavelength order. for i_chan in range(0, len(data_), 2): @@ -617,11 +623,11 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): # Check our markers to see if anything is actually in there. if (all(i_mrk == 0 for i_mrk in block_markers[i_blk]) or all(i_mrk == 255 for i_mrk in block_markers[i_blk])): - print('No markers for montage ' + mtg_name - + ' and block ' + blk_name) + print('No markers for montage ' + mtg_name + + ' and block ' + blk_name) else: - print('Found markers for montage ' + mtg_name - + ' and block ' + blk_name + '!') + print('Found markers for montage ' + mtg_name + + ' and block ' + blk_name + '!') # Change marker for last timepoint to indicate end of block # We'll be using digaux to send markers, a serial port, From f746b97016ab57a7ce83e8794e495dda21b4b548 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Tue, 16 Jun 2020 01:11:48 -0700 Subject: [PATCH 113/167] flake8 on other files --- mne/defaults.py | 2 +- .../preprocessing/plot_80_boxy_processing.py | 34 +++++++++---------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/mne/defaults.py b/mne/defaults.py index 972eb60c885..6289f9a4a36 100644 --- a/mne/defaults.py +++ b/mne/defaults.py @@ -42,7 +42,7 @@ dipole='Dipole', ecog='ECoG', hbo='Oxyhemoglobin', ref_meg='Reference Magnetometers', fnirs_raw='fNIRS (raw)', fnirs_od='fNIRS (OD)', fnirs_ph='fNIRS (Ph)', - hbr='Deoxyhemoglobin', gof='Goodness of fit', + hbr='Deoxyhemoglobin', gof='Goodness of fit', csd='Current source density'), mask_params=dict(marker='o', markerfacecolor='w', diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 193f6bfb7ec..c87573cfaa2 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -114,7 +114,7 @@ # # First we remove channels that are too close together (short channels) to # # detect a neural response (less than 3 cm distance between optodes). # # These short channels can be seen in the figure above. -# # To achieve this we pick all the channels that are not considered to be short. +# # To achieve this we pick all the channels not considered to be short. picks = mne.pick_types(raw_intensity_ac.info, meg=False, fnirs=True, stim=True) @@ -598,15 +598,15 @@ colorbar=False, **topomap_args) -evoked_event_1_ac.copy().pick(hbo_a+hbo_b).plot_topomap(times=times, - axes=axes[0, 2:], - colorbar=True, - **topomap_args) +evoked_event_1_ac.copy().pick(hbo_a + hbo_b).plot_topomap(times=times, + axes=axes[0, 2:], + colorbar=True, + **topomap_args) -evoked_event_2_ac.copy().pick(hbo_a+hbo_b).plot_topomap(times=times, - axes=axes[1, 2:], - colorbar=True, - **topomap_args) +evoked_event_2_ac.copy().pick(hbo_a + hbo_b).plot_topomap(times=times, + axes=axes[1, 2:], + colorbar=True, + **topomap_args) for column, condition in enumerate(['Montage A', 'Montage B', 'Combined']): for row, chroma in enumerate(['HBO Event 1', 'HBO Event 2']): @@ -650,14 +650,14 @@ colorbar=False, **topomap_args) -evoked_event_1_ac.copy().pick(hbr_a+hbr_b).plot_topomap(times=times, - axes=axes[0, 2:], - colorbar=True, - **topomap_args) -evoked_event_2_ac.copy().pick(hbr_a+hbr_b).plot_topomap(times=times, - axes=axes[1, 2:], - colorbar=True, - **topomap_args) +evoked_event_1_ac.copy().pick(hbr_a + hbr_b).plot_topomap(times=times, + axes=axes[0, 2:], + colorbar=True, + **topomap_args) +evoked_event_2_ac.copy().pick(hbr_a + hbr_b).plot_topomap(times=times, + axes=axes[1, 2:], + colorbar=True, + **topomap_args) for column, condition in enumerate(['Montage A', 'Montage B', 'Combined']): for row, chroma in enumerate(['HBR Event 1', 'HBR Event 2']): From 070db84b5940035862fad5ed90bc63521239ecb0 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Tue, 16 Jun 2020 01:12:57 -0700 Subject: [PATCH 114/167] removed dev folder --- dev/plot_test.py | 14 -------------- 1 file changed, 14 deletions(-) delete mode 100644 dev/plot_test.py diff --git a/dev/plot_test.py b/dev/plot_test.py deleted file mode 100644 index 54de0eb4cc8..00000000000 --- a/dev/plot_test.py +++ /dev/null @@ -1,14 +0,0 @@ - - -import os -import matplotlib.pyplot as plt - -import mne - - -boxy_data_folder = mne.datasets.boxy_example.data_path() -boxy_raw_dir = os.path.join(boxy_data_folder, 'Participant-1') -raw_intensity = mne.io.read_raw_boxy(boxy_raw_dir, 'AC', verbose=True).load_data() - -### plot the raw data ### -raw_intensity.plot(n_channels=10) From 2d7a2bb3879f21d4b8144ae7ddf1a9c3dbc33b99 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Tue, 16 Jun 2020 01:21:28 -0700 Subject: [PATCH 115/167] a few more --- mne/channels/channels.py | 2 +- mne/datasets/boxy_example/boxy_example.py | 3 ++- mne/utils/_bunch.py | 3 +++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/mne/channels/channels.py b/mne/channels/channels.py index 993a563d45a..78231817417 100644 --- a/mne/channels/channels.py +++ b/mne/channels/channels.py @@ -73,7 +73,7 @@ def _get_ch_type(inst, ch_type, allow_ref_meg=False): """ if ch_type is None: allowed_types = ['mag', 'grad', 'planar1', 'planar2', 'eeg', 'csd', - 'fnirs_raw', 'fnirs_od', 'fnirs_ph', + 'fnirs_raw', 'fnirs_od', 'fnirs_ph', 'hbo', 'hbr', 'ecog', 'seeg'] allowed_types += ['ref_meg'] if allow_ref_meg else [] for type_ in allowed_types: diff --git a/mne/datasets/boxy_example/boxy_example.py b/mne/datasets/boxy_example/boxy_example.py index 3aa114aa3eb..eb73ce6a8cb 100644 --- a/mne/datasets/boxy_example/boxy_example.py +++ b/mne/datasets/boxy_example/boxy_example.py @@ -20,7 +20,8 @@ def data_path(path=None, force_update=False, update_path=True, download=True, data_path.__doc__ = _data_path_doc.format(name='boxy_example', - conf='MNE_DATASETS_BOXY_EXAMPLE_PATH') + conf='MNE_DATASETS_BOXY_EXAMPLE_PATH' + ) def get_version(): # noqa: D103 diff --git a/mne/utils/_bunch.py b/mne/utils/_bunch.py index 3659116110f..3db11a4390c 100644 --- a/mne/utils/_bunch.py +++ b/mne/utils/_bunch.py @@ -14,6 +14,7 @@ class Bunch(dict): """Dictionary-like object that exposes its keys as attributes.""" + def __init__(self, **kwargs): # noqa: D102 dict.__init__(self, kwargs) self.__dict__ = self @@ -24,6 +25,7 @@ def __init__(self, **kwargs): # noqa: D102 class BunchConst(Bunch): """Class to prevent us from re-defining constants (DRY).""" + def __setattr__(self, attr, val): # noqa: D105 if attr != '__dict__' and hasattr(self, attr): raise AttributeError('Attribute "%s" already set' % attr) @@ -38,6 +40,7 @@ class BunchConstNamed(BunchConst): Only supports string keys and int or float values. """ + def __setattr__(self, attr, val): # noqa: D105 assert isinstance(attr, str) if isinstance(val, int): From 47e5ac3e28976ea79448f360be700e7a16e6d8f2 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Tue, 16 Jun 2020 15:04:08 -0600 Subject: [PATCH 116/167] only combined montages plotted, scales have been adjusted, fixed change in optical density --- mne/preprocessing/nirs/_optical_density.py | 6 +- .../preprocessing/plot_80_boxy_processing.py | 567 ++++-------------- 2 files changed, 135 insertions(+), 438 deletions(-) diff --git a/mne/preprocessing/nirs/_optical_density.py b/mne/preprocessing/nirs/_optical_density.py index 0c58a987508..dc43d2f81d5 100644 --- a/mne/preprocessing/nirs/_optical_density.py +++ b/mne/preprocessing/nirs/_optical_density.py @@ -27,10 +27,8 @@ def optical_density(raw): """ raw = raw.copy().load_data() _validate_type(raw, BaseRaw, 'raw') - try: - picks = _picks_to_idx(raw.info, 'fnirs_raw') - except: - picks = _picks_to_idx(raw.info, 'fnirs_ph') + picks = _picks_to_idx(raw.info, 'fnirs_raw') + data_means = np.mean(raw.get_data(), axis=1) # The devices measure light intensity. Negative light intensities should diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 193f6bfb7ec..282fade7b82 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -46,13 +46,14 @@ # plot the raw data for each data type # AC -scalings = dict(fnirs_raw=1e2) -raw_intensity_ac.plot(n_channels=5, duration=20, scalings=scalings, +scalings = dict(fnirs_raw=2e2, fnirs_ph=4e3, fnirs_od=2, + hbo=2e-3, hbr=2e-3) + +raw_intensity_ac.plot(n_channels=10, duration=20, scalings=scalings, show_scrollbars=True) # Phase -scalings = dict(fnirs_ph=1e4) -raw_intensity_ph.plot(n_channels=5, duration=20, scalings=scalings, +raw_intensity_ph.plot(n_channels=10, duration=20, scalings=scalings, show_scrollbars=True) # ############################################################################### @@ -79,34 +80,6 @@ fig=fig) mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) -# plot montage A only -fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') -fig = mne.viz.plot_alignment(raw_intensity_ac.copy().pick_channels(mtg_a).info, - show_axes=True, - subject='fsaverage', - trans='fsaverage', - surfaces=['head-dense', 'brain'], - fnirs=['sources', 'detectors', 'pairs'], - mri_fiducials=True, - dig=True, - subjects_dir=subjects_dir, - fig=fig) -mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) - -# plot montage B only -fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') -fig = mne.viz.plot_alignment(raw_intensity_ac.copy().pick_channels(mtg_b).info, - show_axes=True, - subject='fsaverage', - trans='fsaverage', - surfaces=['head-dense', 'brain'], - fnirs=['sources', 'detectors', 'pairs'], - mri_fiducials=True, - dig=True, - subjects_dir=subjects_dir, - fig=fig) -mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) - # ############################################################################### # # Selecting channels appropriate for detecting neural responses # # ------------------------------------------------------------- @@ -123,7 +96,6 @@ raw_intensity_ac.pick(picks[dists < 0.03]) - # ############################################################################### # # Converting from raw intensity to optical density # # ------------------------------------------------ @@ -135,7 +107,7 @@ raw_od = mne.preprocessing.nirs.optical_density(raw_intensity_ac) raw_od.plot(n_channels=len(raw_od.ch_names), - duration=500, show_scrollbars=False) + duration=500, show_scrollbars=False, scalings=scalings) # ############################################################################### # # Evaluating the quality of the data @@ -180,8 +152,8 @@ raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od) -raw_haemo.plot(n_channels=len(raw_haemo.ch_names), - duration=500, show_scrollbars=False) +raw_haemo.plot(n_channels=len(raw_haemo.ch_names), duration=500, + show_scrollbars=False, scalings=scalings) # ############################################################################### # # Removing heart rate from signal @@ -193,14 +165,17 @@ # # remove this. A high pass filter is also included to remove slow drifts # # in the data. -fig = raw_haemo.plot_psd(average=True) +fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 6)) + +fig = raw_haemo.plot_psd(average=True, ax=axes) fig.suptitle('Before filtering', weight='bold', size='x-large') fig.subplots_adjust(top=0.88) raw_haemo = raw_haemo.filter(0.05, 0.7, h_trans_bandwidth=0.2, l_trans_bandwidth=0.02) -fig = raw_haemo.plot_psd(average=True) +fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 6)) +fig = raw_haemo.plot_psd(average=True, ax=axes) fig.suptitle('After filtering', weight='bold', size='x-large') fig.subplots_adjust(top=0.88) @@ -218,42 +193,21 @@ # Since our events and timings for this data set are the same across montages, # we are going to find events for each montage separately and combine them later -# Montage A Events -mtg_a_events = mne.find_events(raw_intensity_ac, stim_channel=['Markers a']) +# All events +all_events = mne.find_events(raw_intensity_ac, stim_channel=['Markers a', + 'Markers b']) -mtg_a_event_dict = {'Montage_A/Event_1': 1, - 'Montage_A/Event_2': 2, - 'Montage A/Block 1 End': 1000, - 'Montage A/Block 2 End': 2000} +all_event_dict = {'Event_1': 1, + 'Event_2': 2, + 'Block 1 End': 1000, + 'Block 2 End': 2000} -fig = mne.viz.plot_events(mtg_a_events) +fig = mne.viz.plot_events(all_events) fig.subplots_adjust(right=0.7) # make room for the legend -raw_intensity_ac.copy().pick_channels(mtg_a).plot(events=mtg_a_events, start=0, - duration=10, color='gray', - event_color={1: 'r', - 2: 'b', - 1000: 'k', - 2000: 'k'}) - -# Montage B Events -mtg_b_events = mne.find_events(raw_intensity_ac, stim_channel=['Markers b']) - -mtg_b_event_dict = {'Montage_B/Event_1': 1, - 'Montage_B/Event_2': 2, - 'Montage B/Block 1 End': 1000, - 'Montage B/Block 2 End': 2000} - -fig = mne.viz.plot_events(mtg_b_events) -fig.subplots_adjust(right=0.7) # make room for the legend - -raw_intensity_ac.copy().pick_channels(mtg_b).plot(events=mtg_b_events, - start=0, duration=10, - color='gray', - event_color={1: 'r', - 2: 'b', - 1000: 'k', - 2000: 'k'}) +raw_intensity_ac.plot(events=all_events, start=0, duration=10, color='gray', + event_color={1: 'r', 2: 'b', 1000: 'k', 2000: 'k'}, + scalings=scalings) # ############################################################################### # # Next we define the range of our epochs, the rejection criteria, @@ -263,78 +217,30 @@ # # We will make epochs from the ac-derived heamo data and the phase data # # separately. -# reject_criteria = dict(hbo=80e-6) reject_criteria = None -tmin, tmax = -0.2, 2 -tmin_AC, tmax_AC = -2, 10 - -# Montage A -mtg_a = [i_index for i_index, i_label - in enumerate(raw_haemo.info['ch_names']) - if re.search(r'S[1-5]_', i_label)] - -# haemo epochs -mtg_a_haemo_epochs = mne.Epochs(raw_haemo, mtg_a_events, - event_id=mtg_a_event_dict, tmin=tmin_AC, - tmax=tmax_AC, reject=reject_criteria, - reject_by_annotation=False, proj=True, - baseline=(None, 0), preload=True, detrend=None, - verbose=True, event_repeated='drop') -mtg_a_haemo_epochs.plot_drop_log() - -mtg_a_epochs_ph = mne.Epochs(raw_intensity_ph, mtg_a_events, - event_id=mtg_a_event_dict, tmin=tmin, tmax=tmax, - reject=None, reject_by_annotation=False, - proj=False, baseline=(-0.2, 0), preload=True, - detrend=None, verbose=True) - -# two ways to plot epochs, should be the same - -# haemo epochs -fig = mne.viz.plot_epochs(mtg_a_haemo_epochs, n_epochs=5, n_channels=5, - scalings='auto', picks=mtg_a) -fig = mtg_a_haemo_epochs.plot(n_epochs=5, n_channels=5, scalings='auto', - picks=mtg_a) - -# ph epochs -fig = mne.viz.plot_epochs(mtg_a_epochs_ph, n_epochs=5, n_channels=5, - scalings='auto', picks=mtg_a) -fig = mtg_a_epochs_ph.plot(n_epochs=5, n_channels=5, scalings='auto', - picks=mtg_a) - - -# Montage B -mtg_b = [i_index for i_index, i_label - in enumerate(raw_haemo.info['ch_names']) - if re.search(r'S([6-9]|10)_', i_label)] - -# haemo epochs -mtg_b_haemo_epochs = mne.Epochs(raw_haemo, mtg_b_events, - event_id=mtg_b_event_dict, tmin=tmin_AC, - tmax=tmax_AC, reject=reject_criteria, - reject_by_annotation=False, proj=True, - baseline=(None, 0), preload=True, detrend=None, - verbose=True, event_repeated='drop') -mtg_b_haemo_epochs.plot_drop_log() - -mtg_b_epochs_ph = mne.Epochs(raw_intensity_ph, mtg_b_events, - event_id=mtg_b_event_dict, tmin=tmin, tmax=tmax, - reject=None, reject_by_annotation=False, - proj=False, baseline=(-0.2, 0), preload=True, - detrend=None, verbose=True) - -# two ways to plot epochs, should be the same -# haemo epochs -fig = mne.viz.plot_epochs(mtg_b_haemo_epochs, n_epochs=5, n_channels=5, - scalings='auto', picks=mtg_b) -fig = mtg_b_haemo_epochs.plot(n_epochs=5, n_channels=5, scalings='auto', - picks=mtg_b) - -# ph epochs -fig = mne.viz.plot_epochs(mtg_b_epochs_ph, n_epochs=5, n_channels=5, - scalings='auto', picks=mtg_b) -fig = mtg_b_epochs_ph.plot(n_epochs=5, n_channels=5, scalings='auto', - picks=mtg_b) +tmin_ph, tmax_ph = -0.2, 2 +tmin_ac, tmax_ac = -2, 10 + +all_haemo_epochs = mne.Epochs(raw_haemo, all_events, + event_id=all_event_dict, tmin=tmin_ac, + tmax=tmax_ac, reject=reject_criteria, + reject_by_annotation=False, proj=True, + baseline=(None, 0), preload=True, detrend=None, + verbose=True, event_repeated='drop') +all_haemo_epochs.plot_drop_log() + +all_phase_epochs = mne.Epochs(raw_intensity_ph, all_events, + event_id=all_event_dict, tmin=tmin_ph, + tmax=tmax_ph, reject=None, + reject_by_annotation=False, proj=False, + baseline=(-0.2, 0), preload=True, + detrend=None, verbose=True, + event_repeated='drop') +all_phase_epochs.plot_drop_log() + +# plot epochs +fig = all_haemo_epochs.plot(scalings=scalings) +fig = all_phase_epochs.plot(scalings=scalings) # ############################################################################### # # View consistency of responses across trials @@ -342,63 +248,31 @@ # # # # Now we can view the haemodynamic response for our different events. -# haemo plots -# Montage A -hbo_a = [i_index for i_index, i_label - in enumerate(mtg_a_haemo_epochs.info['ch_names']) - if re.search(r'S[1-5]_D[0-9] hbo', i_label)] - -hbr_a = [i_index for i_index, i_label - in enumerate(mtg_a_haemo_epochs.info['ch_names']) - if re.search(r'S[1-5]_D[0-9] hbr', i_label)] - -mtg_a_haemo_epochs['Montage_A/Event_1'].plot_image( - combine='mean', vmin=-30, vmax=30, - group_by={'Mtg A, Event 1, Oxy': hbo_a, 'Mtg A, Event 1, De-Oxy': hbr_a}, - ts_args=dict(ylim=dict(hbo=[-15, 15], hbr=[-15, 15]))) - -mtg_a_haemo_epochs['Montage_A/Event_2'].plot_image( - combine='mean', vmin=-30, vmax=30, - group_by={'Mtg A, Event 2, Oxy': hbo_a, 'Mtg A, Event 2, De-Oxy': hbr_a}, - ts_args=dict(ylim=dict(hbo=[-15, 15], hbr=[-15, 15]))) - -# ph epochs -fig = mtg_a_epochs_ph['Montage_A/Event_1'].plot_image( - combine='mean', vmin=-180, vmax=180, picks=mtg_a, colorbar=True, - title='Montage A Event 1') +# Haemo plots +vmin_ac = -60 +vmax_ac = 60 -fig = mtg_a_epochs_ph['Montage_A/Event_2'].plot_image( - combine='mean', vmin=-180, vmax=180, picks=mtg_a, colorbar=True, - title='Montage A Event 2') +all_haemo_epochs['Event_1'].plot_image(combine='mean', vmin=vmin_ac, + vmax=vmax_ac, ts_args=dict( + ylim=dict(hbo=[vmin_ac, vmax_ac], + hbr=[vmin_ac, vmax_ac])), + title='Haemo Event 1') +all_haemo_epochs['Event_2'].plot_image(combine='mean', vmin=vmin_ac, + vmax=vmax_ac, ts_args=dict( + ylim=dict(hbo=[vmin_ac, vmax_ac], + hbr=[vmin_ac, vmax_ac])), + title='Haemo Event 2') -# Montage B -hbo_b = [i_index for i_index, i_label - in enumerate(mtg_a_haemo_epochs.info['ch_names']) - if re.search(r'S([6-9]|10)_D([0-9]|1[0-6]) hbo', i_label)] - -hbr_b = [i_index for i_index, i_label - in enumerate(mtg_a_haemo_epochs.info['ch_names']) - if re.search(r'S([6-9]|10)_D([0-9]|1[0-6]) hbr', i_label)] - -mtg_b_haemo_epochs['Montage_B/Event_1'].plot_image( - combine='mean', vmin=-30, vmax=30, - group_by={'Mtg B, Event 1, Oxy': hbo_b, 'Mtg B, Event 1, De-Oxy': hbr_b}, - ts_args=dict(ylim=dict(hbo=[-15, 15], hbr=[-15, 15]))) - -mtg_b_haemo_epochs['Montage_B/Event_2'].plot_image( - combine='mean', vmin=-30, vmax=30, - group_by={'Mtg B, Event 2, Oxy': hbo_b, 'Mtg B, Event 2, De-Oxy': hbr_b}, - ts_args=dict(ylim=dict(hbo=[-15, 15], hbr=[-15, 15]))) +# Phase +vmin_ph = -180 +vmax_ph = 180 -# ph epochs -fig = mtg_b_epochs_ph['Montage_B/Event_1'].plot_image( - combine='mean', vmin=-180, vmax=180, picks=mtg_b, colorbar=True, - title='Montage B Event 1') +all_phase_epochs['Event_1'].plot_image(combine='mean', vmin=vmin_ph, + vmax=vmax_ph, title='Phase Event 1') -fig = mtg_b_epochs_ph['Montage_B/Event_2'].plot_image( - combine='mean', vmin=-180, vmax=180, picks=mtg_b, colorbar=True, - title='Montage B Event 2') +all_phase_epochs['Event_2'].plot_image(combine='mean', vmin=vmin_ph, + vmax=vmax_ph, title='Phase Event 2') # ############################################################################### # # View consistency of responses across channels @@ -408,54 +282,12 @@ # # pairs that we selected. All the channels in this data are located over the # # motor cortex, and all channels show a similar pattern in the data. -# haemo evoked -fig, axes = plt.subplots(nrows=4, ncols=2, figsize=(15, 6)) -clim = dict(hbo=[-10, 10], hbr=[-10, 10]) - -mtg_a_1_evoked_ac = mtg_a_haemo_epochs['Montage_A/Event_1'].average() -mtg_a_2_evoked_ac = mtg_a_haemo_epochs['Montage_A/Event_2'].average() -mtg_b_1_evoked_ac = mtg_b_haemo_epochs['Montage_B/Event_1'].average() -mtg_b_2_evoked_ac = mtg_b_haemo_epochs['Montage_B/Event_2'].average() - -mtg_a_1_evoked_ac.plot_image(axes=axes[0, 0], picks=hbo_a, - titles='HBO Montage A Event 1', clim=clim) -mtg_a_1_evoked_ac.plot_image(axes=axes[0, 1], picks=hbr_a, - titles='HBR Montage A Event 1', clim=clim) -mtg_a_2_evoked_ac.plot_image(axes=axes[1, 0], picks=hbo_a, - titles='HBO Montage A Event 2', clim=clim) -mtg_a_2_evoked_ac.plot_image(axes=axes[1, 1], picks=hbr_a, - titles='HBR Montage A Event 2', clim=clim) -mtg_b_1_evoked_ac.plot_image(axes=axes[2, 0], picks=hbo_b, - titles='HBO Montage B Event 1', clim=clim) -mtg_b_1_evoked_ac.plot_image(axes=axes[2, 1], picks=hbr_b, - titles='HBR Montage B Event 1', clim=clim) -mtg_b_2_evoked_ac.plot_image(axes=axes[3, 0], picks=hbo_b, - titles='HBO Montage B Event 2', clim=clim) -mtg_b_2_evoked_ac.plot_image(axes=axes[3, 1], picks=hbr_b, - titles='HBR Montage B Event 2', clim=clim) - -# Combine Montages -mtg_a_channels_ac = [i_index for i_index, i_label - in enumerate(mtg_a_1_evoked_ac.info['ch_names']) - if re.search(r'S[1-5]_', i_label)] - -mtg_b_channels_ac = [i_index for i_index, i_label - in enumerate(mtg_b_1_evoked_ac.info['ch_names']) - if re.search(r'S([6-9]|10)_', i_label)] - -# zero channels that don't correspond to montage A/B -mtg_a_1_evoked_ac._data[mtg_b_channels_ac, :] = 0 -mtg_a_2_evoked_ac._data[mtg_b_channels_ac, :] = 0 -mtg_b_1_evoked_ac._data[mtg_a_channels_ac, :] = 0 -mtg_b_2_evoked_ac._data[mtg_a_channels_ac, :] = 0 - -evoked_event_1_ac = mne.combine_evoked([mtg_a_1_evoked_ac, mtg_b_1_evoked_ac], - 'equal') -evoked_event_2_ac = mne.combine_evoked([mtg_a_2_evoked_ac, mtg_b_2_evoked_ac], - 'equal') +# Haemo +evoked_event_1_ac = all_haemo_epochs['Event_1'].average() +evoked_event_2_ac = all_haemo_epochs['Event_2'].average() fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(15, 6)) -clim = dict(fnirs_raw=[-20, 20]) +clim = dict(hbo=[-30, 30], hbr=[-30, 30]) evoked_event_1_ac.plot_image(axes=axes[:, 0], titles=dict(hbo='HBO_Event_1', hbr='HBR_Event_1'), @@ -464,43 +296,9 @@ titles=dict(hbo='HBO_Event_2', hbr='HBR_Event_2'), clim=clim) -# ph evoked -fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(15, 6)) -clim = dict(fnirs_ph=[-180, 180]) - -mtg_a_1_evoked_ph = mtg_a_epochs_ph['Montage_A/Event_1'].average() -mtg_a_2_evoked_ph = mtg_a_epochs_ph['Montage_A/Event_2'].average() -mtg_b_1_evoked_ph = mtg_b_epochs_ph['Montage_B/Event_1'].average() -mtg_b_2_evoked_ph = mtg_b_epochs_ph['Montage_B/Event_2'].average() - -mtg_a_1_evoked_ph.plot_image(axes=axes[0, 0], picks=mtg_a, - titles='Montage A Event 1', clim=clim) -mtg_a_2_evoked_ph.plot_image(axes=axes[1, 0], picks=mtg_a, - titles='Montage A Event 2', clim=clim) -mtg_b_1_evoked_ph.plot_image(axes=axes[0, 1], picks=mtg_b, - titles='Montage B Event 1', clim=clim) -mtg_b_2_evoked_ph.plot_image(axes=axes[1, 1], picks=mtg_b, - titles='Montage B Event 2', clim=clim) - -# Combine Montages -mtg_a_channels_ph = [i_index for i_index, i_label - in enumerate(mtg_a_1_evoked_ph.info['ch_names']) - if re.search(r'S[1-5]_', i_label)] - -mtg_b_channels_ph = [i_index for i_index, i_label - in enumerate(mtg_b_1_evoked_ph.info['ch_names']) - if re.search(r'S([6-9]|10)_', i_label)] - -# zero channels that don't correspond to montage A/B -mtg_a_1_evoked_ph._data[mtg_b_channels_ph, :] = 0 -mtg_a_2_evoked_ph._data[mtg_b_channels_ph, :] = 0 -mtg_b_1_evoked_ph._data[mtg_a_channels_ph, :] = 0 -mtg_b_2_evoked_ph._data[mtg_a_channels_ph, :] = 0 - -evoked_event_1_ph = mne.combine_evoked([mtg_a_1_evoked_ph, mtg_b_1_evoked_ph], - 'equal') -evoked_event_2_ph = mne.combine_evoked([mtg_a_2_evoked_ph, mtg_b_2_evoked_ph], - 'equal') +# Phase +evoked_event_1_ph = all_phase_epochs['Event_1'].average() +evoked_event_2_ph = all_phase_epochs['Event_2'].average() fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 6)) clim = dict(fnirs_ph=[-180, 180]) @@ -517,21 +315,23 @@ # # We can also plot a similat figure for phase data. -# haemo +# Haemo +fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 6)) + evoked_dict_ac = {'Event_1': evoked_event_1_ac, 'Event_2': evoked_event_2_ac} color_dict = {'Event_1': 'r', 'Event_2': 'b'} mne.viz.plot_compare_evokeds(evoked_dict_ac, combine="mean", ci=0.95, - colors=color_dict) + colors=color_dict, axes=axes.tolist()) -# ph +# Phase evoked_dict_ph = {'Event_1': evoked_event_1_ph, 'Event_2': evoked_event_2_ph} color_dict = {'Event_1': 'r', 'Event_2': 'b'} mne.viz.plot_compare_evokeds(evoked_dict_ph, combine="mean", ci=0.95, - colors=color_dict) + colors=color_dict, title='Phase') # ############################################################################### # # View topographic representation of activity @@ -540,14 +340,14 @@ # # Next we view how the topographic activity changes throughout the # # haemodynamic and phase response. -# ac +# Haemo times = np.arange(0.0, 10.0, 2.0) topomap_args = dict(extrapolate='local') fig = evoked_event_1_ac.plot_joint(times=times, topomap_args=topomap_args) fig = evoked_event_2_ac.plot_joint(times=times, topomap_args=topomap_args) -# ph +# Phase times = np.arange(0.0, 2.0, 0.5) topomap_args = dict(extrapolate='local') @@ -563,205 +363,104 @@ # # We generate topo maps for events 1 and 2 to view the location of activity. # # First we visualise the HbO activity. -# ac HBO -fig, axes = plt.subplots(nrows=2, ncols=4, figsize=(9, 5), - gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1])) +# Haemo +fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(9, 5), + gridspec_kw=dict(width_ratios=[1, 1, 0.1])) topomap_args = dict(extrapolate='local', size=3, res=256, sensors='k.') times = 1.0 -hbo_a = [i_index for i_index, i_label - in enumerate(mtg_a_1_evoked_ac.info['ch_names']) - if re.search(r'S[1-5]_D[0-9] hbo', i_label)] - -hbo_b = [i_index for i_index, i_label - in enumerate(mtg_b_1_evoked_ac.info['ch_names']) - if re.search(r'S([6-9]|10)_D([0-9]|1[0-6]) hbo', i_label)] - -evoked_event_1_ac.copy().pick(hbo_a).plot_topomap(times=times, - axes=axes[0, 0], - colorbar=False, - **topomap_args) - -evoked_event_2_ac.copy().pick(hbo_a).plot_topomap(times=times, - axes=axes[1, 0], - colorbar=False, - **topomap_args) - -evoked_event_1_ac.copy().pick(hbo_b).plot_topomap(times=times, - axes=axes[0, 1], - colorbar=False, - **topomap_args) - -evoked_event_2_ac.copy().pick(hbo_b).plot_topomap(times=times, - axes=axes[1, 1], - colorbar=False, - **topomap_args) - -evoked_event_1_ac.copy().pick(hbo_a+hbo_b).plot_topomap(times=times, - axes=axes[0, 2:], - colorbar=True, - **topomap_args) - -evoked_event_2_ac.copy().pick(hbo_a+hbo_b).plot_topomap(times=times, - axes=axes[1, 2:], - colorbar=True, - **topomap_args) - -for column, condition in enumerate(['Montage A', 'Montage B', 'Combined']): - for row, chroma in enumerate(['HBO Event 1', 'HBO Event 2']): - axes[row, column].set_title('{}: {}'.format(chroma, condition)) -fig.tight_layout() +all_haemo_epochs['Event_1'].average(picks='hbo').plot_topomap(times=times, + axes=axes[0, 0], + colorbar=False, + **topomap_args) -# ac HBR -fig, axes = plt.subplots(nrows=2, ncols=4, figsize=(9, 5), - gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1])) +all_haemo_epochs['Event_2'].average(picks='hbo').plot_topomap(times=times, + axes=axes[0, 1:], + colorbar=True, + **topomap_args) -topomap_args = dict(extrapolate='local', size=3, res=256, sensors='k.') -times = 1.0 +all_haemo_epochs['Event_1'].average(picks='hbr').plot_topomap(times=times, + axes=axes[1, 0], + colorbar=False, + **topomap_args) + +all_haemo_epochs['Event_2'].average(picks='hbr').plot_topomap(times=times, + axes=axes[1, 1:], + colorbar=True, + **topomap_args) -hbr_a = [i_index for i_index, i_label - in enumerate(mtg_a_1_evoked_ac.info['ch_names']) - if re.search(r'S[1-5]_D[0-9] hbr', i_label)] - -hbr_b = [i_index for i_index, i_label - in enumerate(mtg_b_1_evoked_ac.info['ch_names']) - if re.search(r'S([6-9]|10)_D([0-9]|1[0-6]) hbr', i_label)] - - -evoked_event_1_ac.copy().pick(hbr_a).plot_topomap(times=times, - axes=axes[0, 0], - colorbar=False, - **topomap_args) - -evoked_event_2_ac.copy().pick(hbr_a).plot_topomap(times=times, - axes=axes[1, 0], - colorbar=False, - **topomap_args) - -evoked_event_1_ac.copy().pick(hbr_b).plot_topomap(times=times, - axes=axes[0, 1], - colorbar=False, - **topomap_args) - -evoked_event_2_ac.copy().pick(hbr_b).plot_topomap(times=times, - axes=axes[1, 1], - colorbar=False, - **topomap_args) - -evoked_event_1_ac.copy().pick(hbr_a+hbr_b).plot_topomap(times=times, - axes=axes[0, 2:], - colorbar=True, - **topomap_args) -evoked_event_2_ac.copy().pick(hbr_a+hbr_b).plot_topomap(times=times, - axes=axes[1, 2:], - colorbar=True, - **topomap_args) - -for column, condition in enumerate(['Montage A', 'Montage B', 'Combined']): - for row, chroma in enumerate(['HBR Event 1', 'HBR Event 2']): +for column, condition in enumerate(['Event 1', 'Event 2']): + for row, chroma in enumerate(['HBO', 'HBR']): axes[row, column].set_title('{}: {}'.format(chroma, condition)) fig.tight_layout() -# ph -fig, axes = plt.subplots(nrows=2, ncols=4, figsize=(9, 5), - gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1])) +# Phase +fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(9, 5), + gridspec_kw=dict(width_ratios=[1, 1, 0.1])) topomap_args = dict(extrapolate='local', size=3, res=256, sensors='k.') times = 1.0 -evoked_event_1_ph.copy().pick(mtg_a_channels_ph).plot_topomap(times=times, - axes=axes[0, 0], - colorbar=False, - **topomap_args) - -evoked_event_2_ph.copy().pick(mtg_a_channels_ph).plot_topomap(times=times, - axes=axes[1, 0], - colorbar=False, - **topomap_args) -evoked_event_1_ph.copy().pick(mtg_b_channels_ph).plot_topomap(times=times, - axes=axes[0, 1], - colorbar=False, - **topomap_args) +all_phase_epochs['Event_1'].average().plot_topomap(times=times, axes=axes[0], + colorbar=False, + **topomap_args) -evoked_event_2_ph.copy().pick(mtg_b_channels_ph).plot_topomap(times=times, - axes=axes[1, 1], - colorbar=False, - **topomap_args) +all_phase_epochs['Event_2'].average().plot_topomap(times=times, axes=axes[1:], + colorbar=True, + **topomap_args) -evoked_event_1_ph.plot_topomap(times=times, axes=axes[0, 2:], colorbar=True, - **topomap_args) -evoked_event_2_ph.plot_topomap(times=times, axes=axes[1, 2:], colorbar=True, - **topomap_args) - -for column, condition in enumerate(['Montage A', 'Montage B', 'Combined']): - for row, chroma in enumerate(['Event 1', 'Event 2']): - axes[row, column].set_title('{}: {}'.format(chroma, condition)) +for column, condition in enumerate(['Event 1', 'Event 2']): + axes[column].set_title('{}: {}'.format(chroma, condition)) fig.tight_layout() # ############################################################################### # # And we can plot the comparison at a single time point for two conditions. -# ac HBO -fig, axes = plt.subplots(nrows=1, ncols=4, figsize=(9, 5), +# Haemo +fig, axes = plt.subplots(nrows=2, ncols=4, figsize=(9, 5), gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1])) vmin, vmax, ts = -0.192, 0.992, 0.1 vmin = -5 vmax = 5 -evoked_event_1_ac.plot_topomap(ch_type='hbo', times=ts, axes=axes[0], +evoked_diff_ac = mne.combine_evoked([evoked_event_1_ac, -evoked_event_2_ac], + weights='equal') + +evoked_event_1_ac.plot_topomap(ch_type='hbo', times=ts, axes=axes[0, 0], vmin=vmin, vmax=vmax, colorbar=False, **topomap_args) -evoked_event_2_ac.plot_topomap(ch_type='hbo', times=ts, axes=axes[1], +evoked_event_2_ac.plot_topomap(ch_type='hbo', times=ts, axes=axes[0, 1], vmin=vmin, vmax=vmax, colorbar=False, **topomap_args) -evoked_diff_ac = mne.combine_evoked([evoked_event_1_ac, -evoked_event_2_ac], - weights='equal') - -evoked_diff_ac.plot_topomap(ch_type='hbo', times=ts, axes=axes[2:], +evoked_diff_ac.plot_topomap(ch_type='hbo', times=ts, axes=axes[0, 2:], vmin=vmin, vmax=vmax, colorbar=True, **topomap_args) -for column, condition in enumerate( - ['HBO Event 1', 'HBO Event 2', 'HBO Difference']): - axes[column].set_title('{}'.format(condition)) -fig.tight_layout() - - -# ac HBR -fig, axes = plt.subplots(nrows=1, ncols=4, figsize=(9, 5), - gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1])) -vmin, vmax, ts = -0.192, 0.992, 0.1 -vmin = -5 -vmax = 5 - -evoked_event_1_ac.plot_topomap(ch_type='hbr', times=ts, axes=axes[0], +evoked_event_1_ac.plot_topomap(ch_type='hbr', times=ts, axes=axes[1, 0], vmin=vmin, vmax=vmax, colorbar=False, **topomap_args) -evoked_event_2_ac.plot_topomap(ch_type='hbr', times=ts, axes=axes[1], +evoked_event_2_ac.plot_topomap(ch_type='hbr', times=ts, axes=axes[1, 1], vmin=vmin, vmax=vmax, colorbar=False, **topomap_args) -evoked_diff_ac = mne.combine_evoked([evoked_event_1_ac, -evoked_event_2_ac], - weights='equal') - -evoked_diff_ac.plot_topomap(ch_type='hbr', times=ts, axes=axes[2:], +evoked_diff_ac.plot_topomap(ch_type='hbr', times=ts, axes=axes[1, 2:], vmin=vmin, vmax=vmax, colorbar=True, **topomap_args) -for column, condition in enumerate( - ['HBR Event 1', 'HBR Event 2', 'HBR Difference']): - axes[column].set_title('{}'.format(condition)) +for column, condition in enumerate(['Event 1', 'Event 2', 'Difference']): + for row, chroma in enumerate(['HBO', 'HBR']): + axes[row, column].set_title('{}: {}'.format(chroma, condition)) fig.tight_layout() -# ph +# Phase fig, axes = plt.subplots(nrows=1, ncols=4, figsize=(9, 5), gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1])) vmin, vmax, ts = -0.192, 0.992, 0.1 @@ -788,7 +487,7 @@ # # Lastly, we can also look at the individual waveforms to see what is # # driving the topographic plot above. -# ac HBO +# HBO fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(6, 4)) mne.viz.plot_evoked_topo(evoked_event_1_ac.copy().pick('hbo'), color='b', axes=axes, legend=False) @@ -801,7 +500,7 @@ fig.legend(leg_lines, ['HBO Event 1', 'HBO Event 2'], loc='lower right') -# ac HBR +# HBR fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(6, 4)) mne.viz.plot_evoked_topo(evoked_event_1_ac.copy().pick('hbr'), color='b', axes=axes, legend=False) @@ -814,7 +513,7 @@ fig.legend(leg_lines, ['HBR Event 1', 'HBR Event 2'], loc='lower right') -# ph +# Phase fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(6, 4)) mne.viz.plot_evoked_topo(evoked_event_1_ph, color='b', axes=axes, legend=False) mne.viz.plot_evoked_topo(evoked_event_2_ph, color='r', axes=axes, legend=False) From 3d15b5c24fc93caf6c0ab5aadfb62f0ece3dd973 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Wed, 17 Jun 2020 14:35:50 -0600 Subject: [PATCH 117/167] in progress changes for testing --- mne/cov.py | 10 +++-- mne/io/boxy/boxy.py | 38 ++++++++++++------- mne/io/tests/test_constants.py | 2 + mne/tests/test_defaults.py | 6 ++- .../preprocessing/plot_80_boxy_processing.py | 20 +++++----- 5 files changed, 49 insertions(+), 27 deletions(-) diff --git a/mne/cov.py b/mne/cov.py index 69f7530f225..3fe8d9e601f 100644 --- a/mne/cov.py +++ b/mne/cov.py @@ -1183,7 +1183,7 @@ class _RegCovariance(BaseEstimator): """Aux class.""" def __init__(self, info, grad=0.1, mag=0.1, eeg=0.1, seeg=0.1, ecog=0.1, - hbo=0.1, hbr=0.1, fnirs_raw=0.1, fnirs_od=0.1, + hbo=0.1, hbr=0.1, fnirs_raw=0.1, fnirs_od=0.1, fnirs_ph=0.1, csd=0.1, store_precision=False, assume_centered=False): self.info = info # For sklearn compat, these cannot (easily?) be combined into @@ -1197,6 +1197,7 @@ def __init__(self, info, grad=0.1, mag=0.1, eeg=0.1, seeg=0.1, ecog=0.1, self.hbr = hbr self.fnirs_raw = fnirs_raw self.fnirs_od = fnirs_od + self.fnirs_ph = fnirs_ph self.csd = csd self.store_precision = store_precision self.assume_centered = assume_centered @@ -1473,7 +1474,7 @@ def _smart_eigh(C, info, rank, scalings=None, projs=None, @verbose def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads', proj=True, seeg=0.1, ecog=0.1, hbo=0.1, hbr=0.1, - fnirs_raw=0.1, fnirs_od=0.1, csd=0.1, + fnirs_raw=0.1, fnirs_od=0.1, fnirs_ph=0.1, csd=0.1, rank=None, scalings=None, verbose=None): """Regularize noise covariance matrix. @@ -1518,6 +1519,8 @@ def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads', Regularization factor for fNIRS raw signals. fnirs_od : float (default 0.1) Regularization factor for fNIRS optical density signals. + fnirs_ph : float (default 0.1) + Regularization factor for fNIRS phase. csd : float (default 0.1) Regularization factor for EEG-CSD signals. %(rank_None)s @@ -1546,7 +1549,8 @@ def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads', info._check_consistency() scalings = _handle_default('scalings_cov_rank', scalings) regs = dict(eeg=eeg, seeg=seeg, ecog=ecog, hbo=hbo, hbr=hbr, - fnirs_raw=fnirs_raw, fnirs_od=fnirs_od, csd=csd) + fnirs_raw=fnirs_raw, fnirs_od=fnirs_od, fnirs_ph=fnirs_ph, + csd=csd) if exclude is None: raise ValueError('exclude must be a list of strings or "bads"') diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index c1c2066d929..a0483407a19 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -5,7 +5,7 @@ import glob as glob import re as re import numpy as np -import scipy.io +import scipy.io as spio import os from ..base import BaseRaw @@ -18,6 +18,7 @@ @fill_doc def read_raw_boxy(fname, datatype='AC', preload=False, verbose=None): """Reader for a BOXY optical imaging recording. + Parameters ---------- fname : str @@ -26,10 +27,12 @@ def read_raw_boxy(fname, datatype='AC', preload=False, verbose=None): Type of data to return (AC, DC, or Ph) %(preload)s %(verbose)s + Returns ------- raw : instance of RawBOXY A Raw object containing BOXY data. + See Also -------- mne.io.Raw : Documentation of attribute and methods. @@ -40,6 +43,7 @@ def read_raw_boxy(fname, datatype='AC', preload=False, verbose=None): @fill_doc class RawBOXY(BaseRaw): """Raw object from a BOXY optical imaging file. + Parameters ---------- fname : str @@ -48,6 +52,7 @@ class RawBOXY(BaseRaw): Type of data to return (AC, DC, or Ph) %(preload)s %(verbose)s + See Also -------- mne.io.Raw : Documentation of attribute and methods. @@ -242,7 +247,7 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): mtg_det_num.append(detect_num[start_blk]) # Get modulation frequency for each channel and montage. # Assuming modulation freq in MHz. - mtg_mdf.append([int(chan_mdf)*1e6 for chan_mdf + mtg_mdf.append([int(chan_mdf) * 1e6 for chan_mdf in chan_modulation[start:end]]) for i_type in data_types: for i_coord in range(start, end): @@ -353,9 +358,9 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): # Number if rows in data file depends on data file type. if filetype[0] == 'non-parsed': - last_samps = ((diff*len(blk_names[0])) // (source_num[0])) + last_samps = ((diff * len(blk_names[0])) // (source_num[0])) elif filetype[0] == 'parsed': - last_samps = diff*len(blk_names[0]) + last_samps = diff * len(blk_names[0]) # First sample is technically sample 0, not the start line in the file. first_samps = 0 @@ -365,11 +370,15 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): super(RawBOXY, self).__init__( info, preload, filenames=[fname], first_samps=[first_samps], - last_samps=[last_samps-1], + last_samps=[last_samps - 1], raw_extras=[raw_extras], verbose=verbose) def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a segment of data from a file. + + Boxy file organises data in two ways, parsed or un-parsed. + Regardless of type, output has (n_montages x n_sources x n_detectors + + n_marker_channels) rows, and (n_timepoints x n_blocks) columns. """ source_num = self._raw_extras[fi]['source_num'] detect_num = self._raw_extras[fi]['detect_num'] @@ -395,7 +404,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): event_data = [] for file_num, i_file in enumerate(event_files[key]): - event_data.append(scipy.io.loadmat( + event_data.append(spio.loadmat( event_files[key][file_num])['event']) if event_data != []: print('Event file found!') @@ -418,7 +427,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): all_blocks = [] block_markers = [] for i_blk, blk_name in enumerate(blocks[i_mtg]): - file_num = i_blk + (i_mtg*len(blocks[i_mtg])) + file_num = i_blk + (i_mtg * len(blocks[i_mtg])) boxy_file = boxy_files[file_num] boxy_data = [] with open(boxy_file, 'r') as data_file: @@ -540,7 +549,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): print('Detrending phase data') # Remove trends and drifts that occur over time. - y = np.linspace(0, np.size(data_, axis=1)-1, + y = np.linspace(0, np.size(data_, axis=1) - 1, np.size(data_, axis=1)) x = np.transpose(y) for i_chan in range(np.size(data_, axis=0)): @@ -568,7 +577,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): for i_chan in range(np.size(data_, axis=0)): outliers = np.where(np.abs(data_[i_chan, :]) > - (ph_out_thr*sdph[i_chan])) + (ph_out_thr * sdph[i_chan])) outliers = outliers[0] if len(outliers) > 0: if outliers[0] == 0: @@ -581,13 +590,14 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): for i_pt in range(n_ph_out[i_chan]): j_pt = outliers[i_pt] data_[i_chan, j_pt] = ( - (data_[i_chan, j_pt-1] - + data_[i_chan, j_pt+1])/2) + (data_[i_chan, j_pt - 1] + + data_[i_chan, j_pt + 1]) / 2) # Convert phase to pico seconds. for i_chan in range(np.size(data_, axis=0)): - data_[i_chan, :] = ((1e12*data_[i_chan, :]) - / (360*mtg_mdf[i_mtg][i_chan])) + data_[i_chan, :] = ((1e12 * data_[i_chan, :]) + / (360 + * mtg_mdf[i_mtg][i_chan])) # Swap channels to match new wavelength order. for i_chan in range(0, len(data_), 2): @@ -599,7 +609,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): temp_markers = np.zeros((len(data_[0, :]),)) for event_num, event_info in enumerate( event_data[file_num]): - temp_markers[event_info[0]-1] = event_info[1] + temp_markers[event_info[0] - 1] = event_info[1] block_markers.append(temp_markers) except Exception: # Add our markers to the data array based on filetype. diff --git a/mne/io/tests/test_constants.py b/mne/io/tests/test_constants.py index 5b6c84fe96b..cbf51ec0610 100644 --- a/mne/io/tests/test_constants.py +++ b/mne/io/tests/test_constants.py @@ -47,6 +47,7 @@ 301, # FNIRS deoxyhemoglobin 302, # FNIRS raw data 303, # FNIRS optical density + 304, # FNIRS phase data 1000, # For testing the MCG software 2001, # Generic axial gradiometer 3011, # VV prototype wirewound planar sensor @@ -73,6 +74,7 @@ def test_constants(tmpdir): """Test compensation.""" tmpdir = str(tmpdir) # old pytest... + print('ASDASDASDSADASDSADSADSADSA ' + str(tmpdir)) dest = op.join(tmpdir, 'fiff.zip') _fetch_file('https://codeload.github.com/mne-tools/fiff-constants/zip/' + commit, dest) diff --git a/mne/tests/test_defaults.py b/mne/tests/test_defaults.py index aa3eee4af7c..2a5fb2c0f6a 100644 --- a/mne/tests/test_defaults.py +++ b/mne/tests/test_defaults.py @@ -39,7 +39,7 @@ def test_si_units(): 'n': 1e-9, 'f': 1e-15, } - known_SI = {'V', 'T', 'Am', 'm', 'M', + known_SI = {'V', 'T', 'Am', 'm', 'M', u'\N{DEGREE SIGN}', 'AU', 'GOF'} # not really SI but we tolerate them powers = '²' @@ -79,4 +79,8 @@ def _split_si(x): if key == 'csd_bad': assert not np.isclose(scale, want_scale, rtol=10) else: + print('DASDASDASDASDSA') + print(key) + print(scale) + print(want_scale) assert_allclose(scale, want_scale, rtol=1e-12) diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 282fade7b82..bd5e3771197 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -87,7 +87,8 @@ # # First we remove channels that are too close together (short channels) to # # detect a neural response (less than 3 cm distance between optodes). # # These short channels can be seen in the figure above. -# # To achieve this we pick all the channels that are not considered to be short. +# # To achieve this we pick all the channels that are not +# # considered to be short. picks = mne.pick_types(raw_intensity_ac.info, meg=False, fnirs=True, stim=True) @@ -183,15 +184,16 @@ # # Extract epochs # # -------------- # # -# # Now that the signal has been converted to relative haemoglobin concentration, -# # and the unwanted heart rate component has been removed, we can extract epochs -# # related to each of the experimental conditions. +# # Now that the signal has been converted to relative haemoglobin +# # concentration, and the unwanted heart rate component has been removed, +# # we can extract epochs related to each of the experimental conditions. # # -# # First we extract the events of interest and visualise them to ensure they are -# # correct. +# # First we extract the events of interest and visualise them to +# # ensure they are correct. -# Since our events and timings for this data set are the same across montages, -# we are going to find events for each montage separately and combine them later +# # Since our events and timings for this data set are the same +# # across montages, we are going to find events for each montage separately +# # and combine them later # All events all_events = mne.find_events(raw_intensity_ac, stim_channel=['Markers a', @@ -313,7 +315,7 @@ # # Plot both the HbO and HbR on the same figure to illustrate the relation # # between the two signals. -# # We can also plot a similat figure for phase data. +# # We can also plot a similar figure for phase data. # Haemo fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 6)) From 84f9425223774bb5a5e930ebfa7f2f66b576fb3d Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Wed, 17 Jun 2020 15:48:07 -0600 Subject: [PATCH 118/167] further changes for tests --- doc/python_reference.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/python_reference.rst b/doc/python_reference.rst index 2f4909f843b..23a3731c945 100644 --- a/doc/python_reference.rst +++ b/doc/python_reference.rst @@ -71,6 +71,7 @@ Reading raw data read_raw_fif read_raw_eximia read_raw_fieldtrip + read_raw_boxy Base class: From 4ffd552af5775136084eef9e131e9d98fbd64fb9 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Thu, 18 Jun 2020 10:01:40 -0600 Subject: [PATCH 119/167] fixed some apparent style issues, removed print statements added previously --- mne/io/boxy/boxy.py | 11 +++++------ mne/io/tests/test_constants.py | 1 - 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index ddd1b79b2bc..6a602d71ec7 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -544,7 +544,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): # such as crossing over from 0/360 degrees. # Estimate mean phase of first 50 points. # If a point differs more than 90 degrees from the - # mean, add or subtract 360 degress from that point. + # mean, add or subtract 360 degrees from that point. for i_chan in range(np.size(data_, axis=0)): if np.mean(data_[i_chan, :50]) < 180: wrapped_points = data_[i_chan, :] > 270 @@ -596,14 +596,13 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): for i_pt in range(n_ph_out[i_chan]): j_pt = outliers[i_pt] data_[i_chan, j_pt] = ( - (data_[i_chan, j_pt - 1] - + data_[i_chan, j_pt + 1]) / 2) + (data_[i_chan, j_pt - 1] + + data_[i_chan, j_pt + 1]) / 2) # Convert phase to pico seconds. for i_chan in range(np.size(data_, axis=0)): - data_[i_chan, :] = ((1e12 * data_[i_chan, :]) - / (360 - * mtg_mdf[i_mtg][i_chan])) + data_[i_chan, :] = ((1e12 * data_[i_chan, :]) / + (360 * mtg_mdf[i_mtg][i_chan])) # Swap channels to match new wavelength order. for i_chan in range(0, len(data_), 2): diff --git a/mne/io/tests/test_constants.py b/mne/io/tests/test_constants.py index cbf51ec0610..0859969327b 100644 --- a/mne/io/tests/test_constants.py +++ b/mne/io/tests/test_constants.py @@ -74,7 +74,6 @@ def test_constants(tmpdir): """Test compensation.""" tmpdir = str(tmpdir) # old pytest... - print('ASDASDASDSADASDSADSADSADSA ' + str(tmpdir)) dest = op.join(tmpdir, 'fiff.zip') _fetch_file('https://codeload.github.com/mne-tools/fiff-constants/zip/' + commit, dest) From 1b2ae92d46bb7458d4b76391896321f2ebb6b480 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Thu, 18 Jun 2020 14:28:31 -0600 Subject: [PATCH 120/167] more fixes, and removed forgotten print commands --- mne/io/boxy/boxy.py | 3 ++- mne/tests/test_defaults.py | 4 ---- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 6a602d71ec7..3249301a075 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -5,7 +5,6 @@ import glob as glob import re as re import numpy as np -import scipy.io as spio import os from ..base import BaseRaw @@ -386,6 +385,8 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): Regardless of type, output has (n_montages x n_sources x n_detectors + n_marker_channels) rows, and (n_timepoints x n_blocks) columns. """ + import scipy.io as spio + source_num = self._raw_extras[fi]['source_num'] detect_num = self._raw_extras[fi]['detect_num'] start_line = self._raw_extras[fi]['start_line'] diff --git a/mne/tests/test_defaults.py b/mne/tests/test_defaults.py index 2a5fb2c0f6a..fc603a20dd7 100644 --- a/mne/tests/test_defaults.py +++ b/mne/tests/test_defaults.py @@ -79,8 +79,4 @@ def _split_si(x): if key == 'csd_bad': assert not np.isclose(scale, want_scale, rtol=10) else: - print('DASDASDASDASDSA') - print(key) - print(scale) - print(want_scale) assert_allclose(scale, want_scale, rtol=1e-12) From b49868995ceb6a099900fffa7bd0cdf23a7b9cb2 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Mon, 22 Jun 2020 12:00:57 -0600 Subject: [PATCH 121/167] fixes for style and CI tests --- mne/io/boxy/boxy.py | 4 ++-- .../preprocessing/plot_80_boxy_processing.py | 16 +++++++--------- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 3249301a075..a82f5f61f92 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -23,7 +23,7 @@ def read_raw_boxy(fname, datatype='AC', preload=False, verbose=None): fname : str Path to the BOXY data folder. datatype : str - Type of data to return (AC, DC, or Ph) + Type of data to return (AC, DC, or Ph). %(preload)s %(verbose)s @@ -48,7 +48,7 @@ class RawBOXY(BaseRaw): fname : str Path to the BOXY data folder. datatype : str - Type of data to return (AC, DC, or Ph) + Type of data to return (AC, DC, or Ph). %(preload)s %(verbose)s diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 1c7ae234839..4321fc6b128 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -38,11 +38,11 @@ # get channel indices for our two montages mtg_a = [raw_intensity_ac.ch_names[i_index] for i_index, i_label in enumerate(raw_intensity_ac.info['ch_names']) - if re.search(r'S[1-5]_', i_label)] + if re.search(r'_D[1-8] ', i_label)] mtg_b = [raw_intensity_ac.ch_names[i_index] for i_index, i_label in enumerate(raw_intensity_ac.info['ch_names']) - if re.search(r'S([6-9]|10)_', i_label)] + if re.search(r'_D(9|1[0-6]) ', i_label)] # plot the raw data for each data type # AC @@ -171,8 +171,7 @@ fig.suptitle('Before filtering', weight='bold', size='x-large') fig.subplots_adjust(top=0.88) -raw_haemo = raw_haemo.filter(0.05, 0.7, h_trans_bandwidth=0.2, - l_trans_bandwidth=0.02) +raw_haemo = raw_haemo.filter(0.05, 0.7) fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 6)) fig = raw_haemo.plot_psd(average=True, ax=axes) @@ -195,8 +194,7 @@ # # and combine them later # All events -all_events = mne.find_events(raw_intensity_ac, stim_channel=['Markers a', - 'Markers b']) +all_events = mne.find_events(raw_intensity_ac, stim_channel=['Markers b']) all_event_dict = {'Event_1': 1, 'Event_2': 2, @@ -414,8 +412,8 @@ **topomap_args) for column, condition in enumerate(['Event 1', 'Event 2']): - axes[column].set_title('{}: {}'.format(chroma, condition)) -fig.tight_layout() + axes[column].set_title('Phase: {}'.format(condition)) +# fig.tight_layout() # ############################################################################### # # And we can plot the comparison at a single time point for two conditions. @@ -480,7 +478,7 @@ colorbar=True, **topomap_args) for column, condition in enumerate(['Event 1', 'Event 2', 'Difference']): - axes[column].set_title('{}'.format(condition)) + axes[column].set_title('Phase: {}'.format(condition)) fig.tight_layout() # ############################################################################# From 3468f2ba4c68d68c201029c9c908a3f152a25e95 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Mon, 22 Jun 2020 16:34:34 -0600 Subject: [PATCH 122/167] start of making the tutorial a bit more simple --- .../preprocessing/plot_80_boxy_processing.py | 376 +++++++----------- 1 file changed, 136 insertions(+), 240 deletions(-) diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 4321fc6b128..e0f520470d5 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -6,7 +6,8 @@ This tutorial covers how to convert optical imaging data from raw measurements to relative oxyhaemoglobin (HbO) and deoxyhaemoglobin (HbR) concentration. -Phase data from the recording is also processed and plotted in several ways. +Phase data from the recording is also processed and plotted in several ways +in the latter half. .. contents:: Page contents :local: @@ -17,45 +18,26 @@ # sphinx_gallery_thumbnail_number = 1 import os -import numpy as np import matplotlib.pyplot as plt -from itertools import compress import re as re import mne -# get our data +# Get our data boxy_data_folder = mne.datasets.boxy_example.data_path() boxy_raw_dir = os.path.join(boxy_data_folder, 'Participant-1') -# load AC and Phase data +# Load AC and Phase data raw_intensity_ac = mne.io.read_raw_boxy(boxy_raw_dir, 'AC', verbose=True).load_data() -raw_intensity_ph = mne.io.read_raw_boxy(boxy_raw_dir, 'Ph', - verbose=True).load_data() - -# get channel indices for our two montages -mtg_a = [raw_intensity_ac.ch_names[i_index] for i_index, i_label - in enumerate(raw_intensity_ac.info['ch_names']) - if re.search(r'_D[1-8] ', i_label)] - -mtg_b = [raw_intensity_ac.ch_names[i_index] for i_index, i_label - in enumerate(raw_intensity_ac.info['ch_names']) - if re.search(r'_D(9|1[0-6]) ', i_label)] - -# plot the raw data for each data type -# AC +# Plot the raw data scalings = dict(fnirs_raw=2e2, fnirs_ph=4e3, fnirs_od=2, hbo=2e-3, hbr=2e-3) raw_intensity_ac.plot(n_channels=10, duration=20, scalings=scalings, show_scrollbars=True) -# Phase -raw_intensity_ph.plot(n_channels=10, duration=20, scalings=scalings, - show_scrollbars=True) - # ############################################################################### # # View location of sensors over brain surface # # ------------------------------------------- @@ -66,7 +48,6 @@ subjects_dir = os.path.dirname(mne.datasets.fetch_fsaverage()) -# plot both montages together fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') fig = mne.viz.plot_alignment(raw_intensity_ac.info, show_axes=True, @@ -85,7 +66,7 @@ # # ------------------------------------------------------------- # # # # First we remove channels that are too close together (short channels) to -# # detect a neural response (less than 3 cm distance between optodes). +# # detect a neural response (less than 1 cm distance between optodes). # # These short channels can be seen in the figure above. # # To achieve this we pick all the channels not considered to be short. @@ -94,7 +75,12 @@ dists = mne.preprocessing.nirs.source_detector_distances( raw_intensity_ac.info, picks=picks) -raw_intensity_ac.pick(picks[dists < 0.03]) +# Grab our marker channels so they don't get thrown out later +markers = [i_index for i_index, i_label + in enumerate(raw_intensity_ac.info['ch_names']) + if re.search(r'Markers ', i_label)] + +raw_intensity_ac.pick(picks[dists > 0.01].tolist() + markers) # ############################################################################### # # Converting from raw intensity to optical density @@ -109,40 +95,6 @@ raw_od.plot(n_channels=len(raw_od.ch_names), duration=500, show_scrollbars=False, scalings=scalings) -# ############################################################################### -# # Evaluating the quality of the data -# # ---------------------------------- -# # -# # At this stage we can quantify the quality of the coupling -# # between the scalp and the optodes using the scalp coupling index. This -# # method looks for the presence of a prominent synchronous signal in the -# # frequency range of cardiac signals across both photodetected signals. -# # -# # In this example the data is clean and the coupling is good for all -# # channels, so we will not mark any channels as bad based on the scalp -# # coupling index. - -sci = mne.preprocessing.nirs.scalp_coupling_index(raw_od) - -fig, ax = plt.subplots() -ax.hist(sci) -ax.set(xlabel='Scalp Coupling Index', ylabel='Count', xlim=[0, 1]) - -# ############################################################################### -# # In this example we will mark all channels with a SCI less than 0.5 as bad -# # (this dataset is quite clean, so no channels are marked as bad). - -raw_od.info['bads'] = list(compress(raw_od.ch_names, sci < 0.5)) - -# ############################################################################### -# # At this stage it is appropriate to inspect your data -# # (for instructions on how to use the interactive data visualisation tool -# # see :ref:`tut-visualize-raw`) -# # to ensure that channels with poor scalp coupling have been removed. -# # If your data contains lots of artifacts you may decide to apply -# # artifact reduction techniques as described in :ref:`ex-fnirs-artifacts`. - - # ############################################################################### # # Converting from optical density to haemoglobin # # ---------------------------------------------- @@ -171,7 +123,7 @@ fig.suptitle('Before filtering', weight='bold', size='x-large') fig.subplots_adjust(top=0.88) -raw_haemo = raw_haemo.filter(0.05, 0.7) +raw_haemo = raw_haemo.filter(0.02, 0.7) fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 6)) fig = raw_haemo.plot_psd(average=True, ax=axes) @@ -188,14 +140,13 @@ # # # # First we extract the events of interest and visualise them to # # ensure they are correct. - +# # # # Since our events and timings for this data set are the same -# # across montages, we are going to find events for each montage separately -# # and combine them later +# # across montages, we will just use the 'Markers b' channel to find events -# All events all_events = mne.find_events(raw_intensity_ac, stim_channel=['Markers b']) + all_event_dict = {'Event_1': 1, 'Event_2': 2, 'Block 1 End': 1000, @@ -213,9 +164,6 @@ # # baseline correction, and extract the epochs. We visualise the log of which # # epochs were dropped. -# # We will make epochs from the ac-derived heamo data and the phase data -# # separately. - reject_criteria = None tmin_ph, tmax_ph = -0.2, 2 tmin_ac, tmax_ac = -2, 10 @@ -228,18 +176,8 @@ verbose=True, event_repeated='drop') all_haemo_epochs.plot_drop_log() -all_phase_epochs = mne.Epochs(raw_intensity_ph, all_events, - event_id=all_event_dict, tmin=tmin_ph, - tmax=tmax_ph, reject=None, - reject_by_annotation=False, proj=False, - baseline=(-0.2, 0), preload=True, - detrend=None, verbose=True, - event_repeated='drop') -all_phase_epochs.plot_drop_log() - -# plot epochs +# Plot epochs fig = all_haemo_epochs.plot(scalings=scalings) -fig = all_phase_epochs.plot(scalings=scalings) # ############################################################################### # # View consistency of responses across trials @@ -247,7 +185,6 @@ # # # # Now we can view the haemodynamic response for our different events. -# Haemo plots vmin_ac = -60 vmax_ac = 60 @@ -263,16 +200,6 @@ hbr=[vmin_ac, vmax_ac])), title='Haemo Event 2') -# Phase -vmin_ph = -180 -vmax_ph = 180 - -all_phase_epochs['Event_1'].plot_image(combine='mean', vmin=vmin_ph, - vmax=vmax_ph, title='Phase Event 1') - -all_phase_epochs['Event_2'].plot_image(combine='mean', vmin=vmin_ph, - vmax=vmax_ph, title='Phase Event 2') - # ############################################################################### # # View consistency of responses across channels # # --------------------------------------------- @@ -281,12 +208,11 @@ # # pairs that we selected. All the channels in this data are located over the # # motor cortex, and all channels show a similar pattern in the data. -# Haemo evoked_event_1_ac = all_haemo_epochs['Event_1'].average() evoked_event_2_ac = all_haemo_epochs['Event_2'].average() fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(15, 6)) -clim = dict(hbo=[-30, 30], hbr=[-30, 30]) +clim = dict(hbo=[-60, 60], hbr=[-60, 60]) evoked_event_1_ac.plot_image(axes=axes[:, 0], titles=dict(hbo='HBO_Event_1', hbr='HBR_Event_1'), @@ -295,78 +221,29 @@ titles=dict(hbo='HBO_Event_2', hbr='HBR_Event_2'), clim=clim) -# Phase -evoked_event_1_ph = all_phase_epochs['Event_1'].average() -evoked_event_2_ph = all_phase_epochs['Event_2'].average() - -fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 6)) -clim = dict(fnirs_ph=[-180, 180]) - -evoked_event_1_ph.plot_image(axes=axes[0], titles='Event_1', clim=clim) -evoked_event_2_ph.plot_image(axes=axes[1], titles='Event_2', clim=clim) - # ############################################################################### -# # Plot standard haemodynamic response image -# # ---------------------------------- -# # -# # Plot both the HbO and HbR on the same figure to illustrate the relation -# # between the two signals. +# # Compare Events 1 and 2 +# # --------------------------------------- -# # We can also plot a similar figure for phase data. +# Evoked Activity +evoked_diff_ac = mne.combine_evoked([evoked_event_1_ac, -evoked_event_2_ac], + weights='equal') -# Haemo fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 6)) -evoked_dict_ac = {'Event_1': evoked_event_1_ac, 'Event_2': evoked_event_2_ac} +evoked_dict_ac = {'Event_1': evoked_event_1_ac, 'Event_2': evoked_event_2_ac, + 'Difference': evoked_diff_ac} -color_dict = {'Event_1': 'r', 'Event_2': 'b'} +color_dict = {'Event_1': 'r', 'Event_2': 'b', 'Difference': 'g'} mne.viz.plot_compare_evokeds(evoked_dict_ac, combine="mean", ci=0.95, colors=color_dict, axes=axes.tolist()) -# Phase -evoked_dict_ph = {'Event_1': evoked_event_1_ph, 'Event_2': evoked_event_2_ph} - -color_dict = {'Event_1': 'r', 'Event_2': 'b'} - -mne.viz.plot_compare_evokeds(evoked_dict_ph, combine="mean", ci=0.95, - colors=color_dict, title='Phase') - -# ############################################################################### -# # View topographic representation of activity -# # ------------------------------------------- -# # -# # Next we view how the topographic activity changes throughout the -# # haemodynamic and phase response. - -# Haemo -times = np.arange(0.0, 10.0, 2.0) -topomap_args = dict(extrapolate='local') - -fig = evoked_event_1_ac.plot_joint(times=times, topomap_args=topomap_args) -fig = evoked_event_2_ac.plot_joint(times=times, topomap_args=topomap_args) - -# Phase -times = np.arange(0.0, 2.0, 0.5) -topomap_args = dict(extrapolate='local') - -fig = evoked_event_1_ph.plot_joint(times=times, topomap_args=topomap_args, - title='Event 1 Phase') -fig = evoked_event_2_ph.plot_joint(times=times, topomap_args=topomap_args, - title='Event 2 Phase') - -# ############################################################################### -# # Compare Events 1 and 2 -# # --------------------------------------- -# # -# # We generate topo maps for events 1 and 2 to view the location of activity. -# # First we visualise the HbO activity. - -# Haemo +# Topographies fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(9, 5), gridspec_kw=dict(width_ratios=[1, 1, 0.1])) -topomap_args = dict(extrapolate='local', size=3, res=256, sensors='k.') +topomap_args = dict(extrapolate='local', size=1, res=256, sensors='k.') times = 1.0 all_haemo_epochs['Event_1'].average(picks='hbo').plot_topomap(times=times, @@ -394,71 +271,133 @@ axes[row, column].set_title('{}: {}'.format(chroma, condition)) fig.tight_layout() +# ############################################################################### +# # Extracting and Plotting Phase Data +# # ------------------------------------------------------------- +# # Now we can extract phase data from the boxy file and generate similar +# # plots as done above with the AC data. -# Phase -fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(9, 5), - gridspec_kw=dict(width_ratios=[1, 1, 0.1])) +# Get our data +boxy_data_folder = mne.datasets.boxy_example.data_path() +boxy_raw_dir = os.path.join(boxy_data_folder, 'Participant-1') -topomap_args = dict(extrapolate='local', size=3, res=256, sensors='k.') -times = 1.0 +# Load Phase data +raw_intensity_ph = mne.io.read_raw_boxy(boxy_raw_dir, 'Ph', + verbose=True).load_data() +# Plot the raw data +scalings = dict(fnirs_raw=2e2, fnirs_ph=4e3, fnirs_od=2, + hbo=2e-3, hbr=2e-3) -all_phase_epochs['Event_1'].average().plot_topomap(times=times, axes=axes[0], - colorbar=False, - **topomap_args) +raw_intensity_ph.plot(n_channels=10, duration=20, scalings=scalings, + show_scrollbars=True) -all_phase_epochs['Event_2'].average().plot_topomap(times=times, axes=axes[1:], - colorbar=True, - **topomap_args) +# ############################################################################### +# # Selecting channels appropriate for detecting neural responses +# # ------------------------------------------------------------- +# # +# # First we remove channels that are too close together (short channels) to +# # detect a neural response (less than 1 cm distance between optodes). +# # These short channels can be seen in the figure above. +# # To achieve this we pick all the channels not considered to be short. -for column, condition in enumerate(['Event 1', 'Event 2']): - axes[column].set_title('Phase: {}'.format(condition)) -# fig.tight_layout() +picks = mne.pick_types(raw_intensity_ph.info, meg=False, fnirs=True, stim=True) + +dists = mne.preprocessing.nirs.source_detector_distances( + raw_intensity_ph.info, picks=picks) + +# Grab our marker channels so they don't get thrown out later +markers = [i_index for i_index, i_label + in enumerate(raw_intensity_ph.info['ch_names']) + if re.search(r'Markers ', i_label)] + +raw_intensity_ph.pick(picks[dists > 0.01].tolist() + markers) # ############################################################################### -# # And we can plot the comparison at a single time point for two conditions. +# # Extract epochs +# # -------------- -# Haemo -fig, axes = plt.subplots(nrows=2, ncols=4, figsize=(9, 5), - gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1])) -vmin, vmax, ts = -0.192, 0.992, 0.1 -vmin = -5 -vmax = 5 +all_events = mne.find_events(raw_intensity_ph, stim_channel=['Markers b']) -evoked_diff_ac = mne.combine_evoked([evoked_event_1_ac, -evoked_event_2_ac], - weights='equal') -evoked_event_1_ac.plot_topomap(ch_type='hbo', times=ts, axes=axes[0, 0], - vmin=vmin, vmax=vmax, - colorbar=False, **topomap_args) +all_event_dict = {'Event_1': 1, + 'Event_2': 2, + 'Block 1 End': 1000, + 'Block 2 End': 2000} -evoked_event_2_ac.plot_topomap(ch_type='hbo', times=ts, axes=axes[0, 1], - vmin=vmin, vmax=vmax, - colorbar=False, **topomap_args) +fig = mne.viz.plot_events(all_events) +fig.subplots_adjust(right=0.7) # make room for the legend -evoked_diff_ac.plot_topomap(ch_type='hbo', times=ts, axes=axes[0, 2:], - vmin=vmin, vmax=vmax, - colorbar=True, **topomap_args) +raw_intensity_ph.plot(events=all_events, start=0, duration=10, color='gray', + event_color={1: 'r', 2: 'b', 1000: 'k', 2000: 'k'}, + scalings=scalings) -evoked_event_1_ac.plot_topomap(ch_type='hbr', times=ts, axes=axes[1, 0], - vmin=vmin, vmax=vmax, - colorbar=False, **topomap_args) +# ############################################################################### +# # Next we define the range of our epochs, the rejection criteria, +# # baseline correction, and extract the epochs. We visualise the log of which +# # epochs were dropped. -evoked_event_2_ac.plot_topomap(ch_type='hbr', times=ts, axes=axes[1, 1], - vmin=vmin, vmax=vmax, - colorbar=False, **topomap_args) +reject_criteria = None +tmin_ph, tmax_ph = -0.2, 2 -evoked_diff_ac.plot_topomap(ch_type='hbr', times=ts, axes=axes[1, 2:], - vmin=vmin, vmax=vmax, - colorbar=True, **topomap_args) +all_phase_epochs = mne.Epochs(raw_intensity_ph, all_events, + event_id=all_event_dict, tmin=tmin_ph, + tmax=tmax_ph, reject=None, + reject_by_annotation=False, proj=False, + baseline=(-0.2, 0), preload=True, + detrend=None, verbose=True, + event_repeated='drop') +all_phase_epochs.plot_drop_log() -for column, condition in enumerate(['Event 1', 'Event 2', 'Difference']): - for row, chroma in enumerate(['HBO', 'HBR']): - axes[row, column].set_title('{}: {}'.format(chroma, condition)) -fig.tight_layout() +# Plot epochs +fig = all_phase_epochs.plot(scalings=scalings) + +# ############################################################################### +# # View consistency of responses across trials +# # ------------------------------------------- +vmin_ph = -180 +vmax_ph = 180 + +all_phase_epochs['Event_1'].plot_image(combine='mean', vmin=vmin_ph, + vmax=vmax_ph, title='Phase Event 1') + +all_phase_epochs['Event_2'].plot_image(combine='mean', vmin=vmin_ph, + vmax=vmax_ph, title='Phase Event 2') + +# ############################################################################### +# # View consistency of responses across channels +# # --------------------------------------------- + +evoked_event_1_ph = all_phase_epochs['Event_1'].average() +evoked_event_2_ph = all_phase_epochs['Event_2'].average() + +fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 6)) +clim = dict(fnirs_ph=[-180, 180]) + +evoked_event_1_ph.plot_image(axes=axes[0], titles='Event_1', clim=clim) +evoked_event_2_ph.plot_image(axes=axes[1], titles='Event_2', clim=clim) + +# ############################################################################### +# # Compare Events 1 and 2 +# # --------------------------------------- + +# Evoked Activity +evoked_diff_ph = mne.combine_evoked([evoked_event_1_ph, -evoked_event_2_ph], + weights='equal') + +evoked_dict_ph = {'Event_1': evoked_event_1_ph, 'Event_2': evoked_event_2_ph, + 'Difference': evoked_diff_ph} + +color_dict = {'Event_1': 'r', 'Event_2': 'b', 'Difference': 'g'} + +mne.viz.plot_compare_evokeds(evoked_dict_ph, combine="mean", ci=0.95, + colors=color_dict, title='Phase') + +# Topographies +topomap_args = dict(extrapolate='local', size=1, res=256, sensors='k.') +times = 1.0 -# Phase fig, axes = plt.subplots(nrows=1, ncols=4, figsize=(9, 5), gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1])) vmin, vmax, ts = -0.192, 0.992, 0.1 @@ -471,52 +410,9 @@ evoked_event_2_ph.plot_topomap(times=ts, axes=axes[1], vmin=vmin, vmax=vmax, colorbar=False, **topomap_args) -evoked_diff_ph = mne.combine_evoked([evoked_event_1_ph, -evoked_event_2_ph], - weights='equal') - evoked_diff_ph.plot_topomap(times=ts, axes=axes[2:], vmin=vmin, vmax=vmax, colorbar=True, **topomap_args) for column, condition in enumerate(['Event 1', 'Event 2', 'Difference']): axes[column].set_title('Phase: {}'.format(condition)) fig.tight_layout() - -# ############################################################################# -# # Lastly, we can also look at the individual waveforms to see what is -# # driving the topographic plot above. - -# HBO -fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(6, 4)) -mne.viz.plot_evoked_topo(evoked_event_1_ac.copy().pick('hbo'), - color='b', axes=axes, legend=False) -mne.viz.plot_evoked_topo(evoked_event_2_ac.copy().pick('hbo'), - color='r', axes=axes, legend=False) - -# Tidy the legend -leg_lines = [line for line in axes.lines if line.get_c() == 'b'][:1] -leg_lines.append([line for line in axes.lines if line.get_c() == 'r'][0]) -fig.legend(leg_lines, ['HBO Event 1', 'HBO Event 2'], loc='lower right') - - -# HBR -fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(6, 4)) -mne.viz.plot_evoked_topo(evoked_event_1_ac.copy().pick('hbr'), - color='b', axes=axes, legend=False) -mne.viz.plot_evoked_topo(evoked_event_2_ac.copy().pick('hbr'), - color='r', axes=axes, legend=False) - -# Tidy the legend -leg_lines = [line for line in axes.lines if line.get_c() == 'b'][:1] -leg_lines.append([line for line in axes.lines if line.get_c() == 'r'][0]) -fig.legend(leg_lines, ['HBR Event 1', 'HBR Event 2'], loc='lower right') - - -# Phase -fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(6, 4)) -mne.viz.plot_evoked_topo(evoked_event_1_ph, color='b', axes=axes, legend=False) -mne.viz.plot_evoked_topo(evoked_event_2_ph, color='r', axes=axes, legend=False) - -# Tidy the legend -leg_lines = [line for line in axes.lines if line.get_c() == 'b'][:1] -leg_lines.append([line for line in axes.lines if line.get_c() == 'r'][0]) -fig.legend(leg_lines, ['Phase Event 1', 'Phase Event 2'], loc='lower right') From 441697f0cb9e9cf6227a1afc6d60444d21ca5480 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Mon, 22 Jun 2020 16:50:40 -0600 Subject: [PATCH 123/167] removed some plots to keep total under 20, and keep CI test happy --- .../preprocessing/plot_80_boxy_processing.py | 54 ++----------------- 1 file changed, 4 insertions(+), 50 deletions(-) diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index e0f520470d5..4659d517b0e 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -152,13 +152,6 @@ 'Block 1 End': 1000, 'Block 2 End': 2000} -fig = mne.viz.plot_events(all_events) -fig.subplots_adjust(right=0.7) # make room for the legend - -raw_intensity_ac.plot(events=all_events, start=0, duration=10, color='gray', - event_color={1: 'r', 2: 'b', 1000: 'k', 2000: 'k'}, - scalings=scalings) - # ############################################################################### # # Next we define the range of our epochs, the rejection criteria, # # baseline correction, and extract the epochs. We visualise the log of which @@ -174,7 +167,6 @@ reject_by_annotation=False, proj=True, baseline=(None, 0), preload=True, detrend=None, verbose=True, event_repeated='drop') -all_haemo_epochs.plot_drop_log() # Plot epochs fig = all_haemo_epochs.plot(scalings=scalings) @@ -200,32 +192,13 @@ hbr=[vmin_ac, vmax_ac])), title='Haemo Event 2') -# ############################################################################### -# # View consistency of responses across channels -# # --------------------------------------------- -# # -# # Similarly we can view how consistent the response is across the optode -# # pairs that we selected. All the channels in this data are located over the -# # motor cortex, and all channels show a similar pattern in the data. - -evoked_event_1_ac = all_haemo_epochs['Event_1'].average() -evoked_event_2_ac = all_haemo_epochs['Event_2'].average() - -fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(15, 6)) -clim = dict(hbo=[-60, 60], hbr=[-60, 60]) - -evoked_event_1_ac.plot_image(axes=axes[:, 0], - titles=dict(hbo='HBO_Event_1', hbr='HBR_Event_1'), - clim=clim) -evoked_event_2_ac.plot_image(axes=axes[:, 1], - titles=dict(hbo='HBO_Event_2', hbr='HBR_Event_2'), - clim=clim) - # ############################################################################### # # Compare Events 1 and 2 # # --------------------------------------- # Evoked Activity +evoked_event_1_ac = all_haemo_epochs['Event_1'].average() +evoked_event_2_ac = all_haemo_epochs['Event_2'].average() evoked_diff_ac = mne.combine_evoked([evoked_event_1_ac, -evoked_event_2_ac], weights='equal') @@ -325,13 +298,6 @@ 'Block 1 End': 1000, 'Block 2 End': 2000} -fig = mne.viz.plot_events(all_events) -fig.subplots_adjust(right=0.7) # make room for the legend - -raw_intensity_ph.plot(events=all_events, start=0, duration=10, color='gray', - event_color={1: 'r', 2: 'b', 1000: 'k', 2000: 'k'}, - scalings=scalings) - # ############################################################################### # # Next we define the range of our epochs, the rejection criteria, # # baseline correction, and extract the epochs. We visualise the log of which @@ -347,7 +313,6 @@ baseline=(-0.2, 0), preload=True, detrend=None, verbose=True, event_repeated='drop') -all_phase_epochs.plot_drop_log() # Plot epochs fig = all_phase_epochs.plot(scalings=scalings) @@ -365,24 +330,13 @@ all_phase_epochs['Event_2'].plot_image(combine='mean', vmin=vmin_ph, vmax=vmax_ph, title='Phase Event 2') -# ############################################################################### -# # View consistency of responses across channels -# # --------------------------------------------- - -evoked_event_1_ph = all_phase_epochs['Event_1'].average() -evoked_event_2_ph = all_phase_epochs['Event_2'].average() - -fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 6)) -clim = dict(fnirs_ph=[-180, 180]) - -evoked_event_1_ph.plot_image(axes=axes[0], titles='Event_1', clim=clim) -evoked_event_2_ph.plot_image(axes=axes[1], titles='Event_2', clim=clim) - # ############################################################################### # # Compare Events 1 and 2 # # --------------------------------------- # Evoked Activity +evoked_event_1_ph = all_phase_epochs['Event_1'].average() +evoked_event_2_ph = all_phase_epochs['Event_2'].average() evoked_diff_ph = mne.combine_evoked([evoked_event_1_ph, -evoked_event_2_ph], weights='equal') From 2a36c418647d62d33b9fcfec967303d5997a88fe Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Mon, 22 Jun 2020 16:58:00 -0600 Subject: [PATCH 124/167] changed filter settings so an error is not returned --- tutorials/preprocessing/plot_80_boxy_processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 4659d517b0e..ef8f26ad97a 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -123,7 +123,7 @@ fig.suptitle('Before filtering', weight='bold', size='x-large') fig.subplots_adjust(top=0.88) -raw_haemo = raw_haemo.filter(0.02, 0.7) +raw_haemo = raw_haemo.filter(0.05, 0.7) fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 6)) fig = raw_haemo.plot_psd(average=True, ax=axes) From 3f85a7f98da79250d6261c6861f46e676c81191c Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Wed, 24 Jun 2020 11:34:21 -0600 Subject: [PATCH 125/167] quick change to see if comment characters change html test --- .../preprocessing/plot_80_boxy_processing.py | 176 +++++++++--------- 1 file changed, 88 insertions(+), 88 deletions(-) diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index ef8f26ad97a..4d62f446fd2 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -38,13 +38,13 @@ raw_intensity_ac.plot(n_channels=10, duration=20, scalings=scalings, show_scrollbars=True) -# ############################################################################### -# # View location of sensors over brain surface -# # ------------------------------------------- -# # -# # Here we validate that the location of sources-detector pairs and channels -# # are in the expected locations. Sources are bright red dots, detectors are -# # dark red dots, with source-detector pairs connected by white lines. +############################################################################### +# View location of sensors over brain surface +# ------------------------------------------- +# +# Here we validate that the location of sources-detector pairs and channels +# are in the expected locations. Sources are bright red dots, detectors are +# dark red dots, with source-detector pairs connected by white lines. subjects_dir = os.path.dirname(mne.datasets.fetch_fsaverage()) @@ -61,14 +61,14 @@ fig=fig) mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) -# ############################################################################### -# # Selecting channels appropriate for detecting neural responses -# # ------------------------------------------------------------- -# # -# # First we remove channels that are too close together (short channels) to -# # detect a neural response (less than 1 cm distance between optodes). -# # These short channels can be seen in the figure above. -# # To achieve this we pick all the channels not considered to be short. +############################################################################### +# Selecting channels appropriate for detecting neural responses +# ------------------------------------------------------------- +# +# First we remove channels that are too close together (short channels) to +# detect a neural response (less than 1 cm distance between optodes). +# These short channels can be seen in the figure above. +# To achieve this we pick all the channels not considered to be short. picks = mne.pick_types(raw_intensity_ac.info, meg=False, fnirs=True, stim=True) @@ -82,40 +82,40 @@ raw_intensity_ac.pick(picks[dists > 0.01].tolist() + markers) -# ############################################################################### -# # Converting from raw intensity to optical density -# # ------------------------------------------------ -# # -# # The raw intensity values are then converted to optical density. -# # We will only do this for either DC or AC data since they are measures of -# # light intensity. +############################################################################### +# Converting from raw intensity to optical density +# ------------------------------------------------ +# +# The raw intensity values are then converted to optical density. +# We will only do this for either DC or AC data since they are measures of +# light intensity. raw_od = mne.preprocessing.nirs.optical_density(raw_intensity_ac) raw_od.plot(n_channels=len(raw_od.ch_names), duration=500, show_scrollbars=False, scalings=scalings) -# ############################################################################### -# # Converting from optical density to haemoglobin -# # ---------------------------------------------- -# # -# # Next we convert the optical density data to haemoglobin concentration using -# # the modified Beer-Lambert law. +############################################################################### +# Converting from optical density to haemoglobin +# ---------------------------------------------- +# +# Next we convert the optical density data to haemoglobin concentration using +# the modified Beer-Lambert law. raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od) raw_haemo.plot(n_channels=len(raw_haemo.ch_names), duration=500, show_scrollbars=False, scalings=scalings) -# ############################################################################### -# # Removing heart rate from signal -# # ------------------------------- -# # -# # The haemodynamic response has frequency content predominantly below 0.5 Hz. -# # An increase in activity around 1 Hz can be seen in the data that is due to -# # the person's heart beat and is unwanted. So we use a low pass filter to -# # remove this. A high pass filter is also included to remove slow drifts -# # in the data. +############################################################################### +# Removing heart rate from signal +# ------------------------------- +# +# The haemodynamic response has frequency content predominantly below 0.5 Hz. +# An increase in activity around 1 Hz can be seen in the data that is due to +# the person's heart beat and is unwanted. So we use a low pass filter to +# remove this. A high pass filter is also included to remove slow drifts +# in the data. fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 6)) @@ -130,19 +130,19 @@ fig.suptitle('After filtering', weight='bold', size='x-large') fig.subplots_adjust(top=0.88) -# ############################################################################### -# # Extract epochs -# # -------------- -# # -# # Now that the signal has been converted to relative haemoglobin -# # concentration, and the unwanted heart rate component has been removed, -# # we can extract epochs related to each of the experimental conditions. -# # -# # First we extract the events of interest and visualise them to -# # ensure they are correct. -# # -# # Since our events and timings for this data set are the same -# # across montages, we will just use the 'Markers b' channel to find events +############################################################################### +# Extract epochs +# -------------- +# +# Now that the signal has been converted to relative haemoglobin +# concentration, and the unwanted heart rate component has been removed, +# we can extract epochs related to each of the experimental conditions. +# +# First we extract the events of interest and visualise them to +# ensure they are correct. +# +# Since our events and timings for this data set are the same +# across montages, we will just use the 'Markers b' channel to find events all_events = mne.find_events(raw_intensity_ac, stim_channel=['Markers b']) @@ -152,10 +152,10 @@ 'Block 1 End': 1000, 'Block 2 End': 2000} -# ############################################################################### -# # Next we define the range of our epochs, the rejection criteria, -# # baseline correction, and extract the epochs. We visualise the log of which -# # epochs were dropped. +############################################################################### +# Next we define the range of our epochs, the rejection criteria, +# baseline correction, and extract the epochs. We visualise the log of which +# epochs were dropped. reject_criteria = None tmin_ph, tmax_ph = -0.2, 2 @@ -171,11 +171,11 @@ # Plot epochs fig = all_haemo_epochs.plot(scalings=scalings) -# ############################################################################### -# # View consistency of responses across trials -# # ------------------------------------------- -# # -# # Now we can view the haemodynamic response for our different events. +############################################################################### +# View consistency of responses across trials +# ------------------------------------------- +# +# Now we can view the haemodynamic response for our different events. vmin_ac = -60 vmax_ac = 60 @@ -192,9 +192,9 @@ hbr=[vmin_ac, vmax_ac])), title='Haemo Event 2') -# ############################################################################### -# # Compare Events 1 and 2 -# # --------------------------------------- +############################################################################### +# Compare Events 1 and 2 +# --------------------------------------- # Evoked Activity evoked_event_1_ac = all_haemo_epochs['Event_1'].average() @@ -244,11 +244,11 @@ axes[row, column].set_title('{}: {}'.format(chroma, condition)) fig.tight_layout() -# ############################################################################### -# # Extracting and Plotting Phase Data -# # ------------------------------------------------------------- -# # Now we can extract phase data from the boxy file and generate similar -# # plots as done above with the AC data. +############################################################################### +# Extracting and Plotting Phase Data +# ------------------------------------------------------------- +# Now we can extract phase data from the boxy file and generate similar +# plots as done above with the AC data. # Get our data boxy_data_folder = mne.datasets.boxy_example.data_path() @@ -265,14 +265,14 @@ raw_intensity_ph.plot(n_channels=10, duration=20, scalings=scalings, show_scrollbars=True) -# ############################################################################### -# # Selecting channels appropriate for detecting neural responses -# # ------------------------------------------------------------- -# # -# # First we remove channels that are too close together (short channels) to -# # detect a neural response (less than 1 cm distance between optodes). -# # These short channels can be seen in the figure above. -# # To achieve this we pick all the channels not considered to be short. +############################################################################### +# Selecting channels appropriate for detecting neural responses +# ------------------------------------------------------------- +# +# First we remove channels that are too close together (short channels) to +# detect a neural response (less than 1 cm distance between optodes). +# These short channels can be seen in the figure above. +# To achieve this we pick all the channels not considered to be short. picks = mne.pick_types(raw_intensity_ph.info, meg=False, fnirs=True, stim=True) @@ -286,9 +286,9 @@ raw_intensity_ph.pick(picks[dists > 0.01].tolist() + markers) -# ############################################################################### -# # Extract epochs -# # -------------- +############################################################################### +# Extract epochs +# -------------- all_events = mne.find_events(raw_intensity_ph, stim_channel=['Markers b']) @@ -298,10 +298,10 @@ 'Block 1 End': 1000, 'Block 2 End': 2000} -# ############################################################################### -# # Next we define the range of our epochs, the rejection criteria, -# # baseline correction, and extract the epochs. We visualise the log of which -# # epochs were dropped. +############################################################################### +# Next we define the range of our epochs, the rejection criteria, +# baseline correction, and extract the epochs. We visualise the log of which +# epochs were dropped. reject_criteria = None tmin_ph, tmax_ph = -0.2, 2 @@ -317,9 +317,9 @@ # Plot epochs fig = all_phase_epochs.plot(scalings=scalings) -# ############################################################################### -# # View consistency of responses across trials -# # ------------------------------------------- +############################################################################### +# View consistency of responses across trials +# ------------------------------------------- vmin_ph = -180 vmax_ph = 180 @@ -330,9 +330,9 @@ all_phase_epochs['Event_2'].plot_image(combine='mean', vmin=vmin_ph, vmax=vmax_ph, title='Phase Event 2') -# ############################################################################### -# # Compare Events 1 and 2 -# # --------------------------------------- +############################################################################### +# Compare Events 1 and 2 +# --------------------------------------- # Evoked Activity evoked_event_1_ph = all_phase_epochs['Event_1'].average() From af5224f942c407a2e6c2682ec58286f863b309de Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Thu, 25 Jun 2020 12:10:44 -0600 Subject: [PATCH 126/167] testing out a method of plotting hbo and hbr evoked --- mne/datasets/boxy_example/__init__.py | 2 +- .../preprocessing/plot_80_boxy_processing.py | 83 ++++++++++++++++++- 2 files changed, 81 insertions(+), 4 deletions(-) diff --git a/mne/datasets/boxy_example/__init__.py b/mne/datasets/boxy_example/__init__.py index a90c5723ce8..9e1776a5268 100644 --- a/mne/datasets/boxy_example/__init__.py +++ b/mne/datasets/boxy_example/__init__.py @@ -1,3 +1,3 @@ -"""fNIRS motor dataset.""" +"""boxy example dataset.""" from .boxy_example import data_path, has_boxy_example_data, get_version diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 4d62f446fd2..52a7f5658c0 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -1,7 +1,7 @@ """ .. _tut-fnirs-processing: -Preprocessing optical imaging data from the Imagent hardware/boxy software +Preprocessing optical imaging data from the Imagent hardware/BOXY software ================================================================ This tutorial covers how to convert optical imaging data from raw measurements @@ -13,13 +13,14 @@ :local: :depth: 2 - Here we will work with the :ref:`fNIRS motor data `. + Here we will work with the :ref:`BOXY example data `. """ # sphinx_gallery_thumbnail_number = 1 import os import matplotlib.pyplot as plt import re as re +import numpy as np import mne @@ -199,6 +200,63 @@ # Evoked Activity evoked_event_1_ac = all_haemo_epochs['Event_1'].average() evoked_event_2_ac = all_haemo_epochs['Event_2'].average() + +fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(15, 6)) + +axes[0].plot(evoked_event_1_ac.times, + np.sqrt((evoked_event_1_ac.copy().pick('hbo') + ._data ** 2).mean(axis=0))*1e6, 'r', + evoked_event_1_ac.times, + np.sqrt((evoked_event_1_ac.copy().pick('hbr') + ._data ** 2).mean(axis=0))*1e6, 'b', + evoked_event_1_ac.times, + ((np.sqrt((evoked_event_1_ac.copy().pick('hbo') + ._data ** 2).mean(axis=0))*1e6) - + (np.sqrt((evoked_event_1_ac.copy().pick('hbr') + ._data ** 2).mean(axis=0))*1e6)), 'g') +axes[0].set_ylim([-40, 100]) +axes[0].set_xlabel('Time (s)') +axes[0].set_ylabel('\u03BCM') +axes[0].set_title('Event 1') +axes[0].legend(['HBO', 'HBR', 'Diff']) + +axes[1].plot(evoked_event_2_ac.times, + np.sqrt((evoked_event_2_ac.copy().pick('hbo') + ._data ** 2).mean(axis=0))*1e6, 'r', + evoked_event_1_ac.times, + np.sqrt((evoked_event_2_ac.copy().pick('hbr') + ._data ** 2).mean(axis=0))*1e6, 'b', + evoked_event_1_ac.times, + ((np.sqrt((evoked_event_2_ac.copy().pick('hbo') + ._data ** 2).mean(axis=0))*1e6) - + (np.sqrt((evoked_event_2_ac.copy().pick('hbr') + ._data ** 2).mean(axis=0))*1e6)), 'g') +axes[1].set_ylim([-40, 100]) +axes[1].set_xlabel('Time (s)') +axes[1].set_ylabel('\u03BCM') +axes[1].set_title('Event 2') +axes[1].legend(['HBO', 'HBR', 'Diff']) + +axes[2].plot(evoked_event_1_ac.times, + ((np.sqrt((evoked_event_1_ac.copy().pick('hbo') + ._data ** 2).mean(axis=0))*1e6) - + (np.sqrt((evoked_event_1_ac.copy().pick('hbr') + ._data ** 2).mean(axis=0))*1e6)) - + ((np.sqrt((evoked_event_2_ac.copy().pick('hbo') + ._data ** 2).mean(axis=0))*1e6) - + (np.sqrt((evoked_event_2_ac.copy().pick('hbr') + ._data ** 2).mean(axis=0))*1e6)), 'k') +axes[2].set_ylim([-40, 100]) +axes[2].set_xlabel('Time (s)') +axes[2].set_ylabel('\u03BCM') +axes[2].set_title('Event 1 Diff - Event 2 Diff') + +### Other ways to plot HBO and HBR (to comapre with above) +### seems 'plot_compare_evoked' can't compare HBO and HBR because of the +### different channel names +### uncomment if you want to test + +### Method 1 (Original) evoked_diff_ac = mne.combine_evoked([evoked_event_1_ac, -evoked_event_2_ac], weights='equal') @@ -210,7 +268,26 @@ color_dict = {'Event_1': 'r', 'Event_2': 'b', 'Difference': 'g'} mne.viz.plot_compare_evokeds(evoked_dict_ac, combine="mean", ci=0.95, - colors=color_dict, axes=axes.tolist()) + colors=color_dict, axes=axes.tolist()) + +### Method 2 +fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 6)) + +mne.viz.plot_compare_evokeds( + {'Event_1_HBO': evoked_event_1_ac.copy().pick('hbo')}, combine=None, + ci=0.95, colors={'Event_1_HBO': 'r'}, axes=axes[0], ylim=ylim) + +mne.viz.plot_compare_evokeds( + {'Event_1_HBR': evoked_event_1_ac.copy().pick('hbr')}, combine=None, + ci=0.95, colors={'Event_1_HBR': 'b'}, axes=axes[0], ylim=ylim) + +mne.viz.plot_compare_evokeds( + {'Event_2_HBO': evoked_event_2_ac.copy().pick('hbo')}, combine=None, + ci=0.95, colors={'Event_2_HBO': 'r'}, axes=axes[1], ylim=ylim) + +mne.viz.plot_compare_evokeds( + {'Event_2_HBR': evoked_event_2_ac.copy().pick('hbr')}, combine=None, + ci=0.95, colors={'Event_2_HBR': 'b'}, axes=axes[1], ylim=ylim) # Topographies fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(9, 5), From 2a239d6e47aa74b1611bf7a96159f673738aee7e Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Thu, 25 Jun 2020 14:27:53 -0600 Subject: [PATCH 127/167] added more to difference evoked subplot, fixed a missing variable --- .../preprocessing/plot_80_boxy_processing.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 52a7f5658c0..ec90db59501 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -223,10 +223,10 @@ axes[1].plot(evoked_event_2_ac.times, np.sqrt((evoked_event_2_ac.copy().pick('hbo') ._data ** 2).mean(axis=0))*1e6, 'r', - evoked_event_1_ac.times, + evoked_event_2_ac.times, np.sqrt((evoked_event_2_ac.copy().pick('hbr') ._data ** 2).mean(axis=0))*1e6, 'b', - evoked_event_1_ac.times, + evoked_event_2_ac.times, ((np.sqrt((evoked_event_2_ac.copy().pick('hbo') ._data ** 2).mean(axis=0))*1e6) - (np.sqrt((evoked_event_2_ac.copy().pick('hbr') @@ -238,6 +238,16 @@ axes[1].legend(['HBO', 'HBR', 'Diff']) axes[2].plot(evoked_event_1_ac.times, + ((np.sqrt((evoked_event_1_ac.copy().pick('hbo') + ._data ** 2).mean(axis=0))*1e6) - + (np.sqrt((evoked_event_1_ac.copy().pick('hbr') + ._data ** 2).mean(axis=0))*1e6)), 'm', + evoked_event_1_ac.times, + ((np.sqrt((evoked_event_2_ac.copy().pick('hbo') + ._data ** 2).mean(axis=0))*1e6) - + (np.sqrt((evoked_event_2_ac.copy().pick('hbr') + ._data ** 2).mean(axis=0))*1e6)), 'c', + evoked_event_1_ac.times, ((np.sqrt((evoked_event_1_ac.copy().pick('hbo') ._data ** 2).mean(axis=0))*1e6) - (np.sqrt((evoked_event_1_ac.copy().pick('hbr') @@ -249,7 +259,8 @@ axes[2].set_ylim([-40, 100]) axes[2].set_xlabel('Time (s)') axes[2].set_ylabel('\u03BCM') -axes[2].set_title('Event 1 Diff - Event 2 Diff') +axes[2].set_title('HBO - HBR') +axes[2].legend(['Event 1', 'Event 2', 'Diff']) ### Other ways to plot HBO and HBR (to comapre with above) ### seems 'plot_compare_evoked' can't compare HBO and HBR because of the @@ -272,6 +283,7 @@ ### Method 2 fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 6)) +ylim=dict(hbo=[0, 100], hbr=[0, 100]) mne.viz.plot_compare_evokeds( {'Event_1_HBO': evoked_event_1_ac.copy().pick('hbo')}, combine=None, From 61962a7bedb7235099ad026b322e98984bac5ad9 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Fri, 26 Jun 2020 13:04:34 -0600 Subject: [PATCH 128/167] removed code for older plots --- .../preprocessing/plot_80_boxy_processing.py | 39 ------------------- 1 file changed, 39 deletions(-) diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index ec90db59501..fb8a706a69d 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -262,45 +262,6 @@ axes[2].set_title('HBO - HBR') axes[2].legend(['Event 1', 'Event 2', 'Diff']) -### Other ways to plot HBO and HBR (to comapre with above) -### seems 'plot_compare_evoked' can't compare HBO and HBR because of the -### different channel names -### uncomment if you want to test - -### Method 1 (Original) -evoked_diff_ac = mne.combine_evoked([evoked_event_1_ac, -evoked_event_2_ac], - weights='equal') - -fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 6)) - -evoked_dict_ac = {'Event_1': evoked_event_1_ac, 'Event_2': evoked_event_2_ac, - 'Difference': evoked_diff_ac} - -color_dict = {'Event_1': 'r', 'Event_2': 'b', 'Difference': 'g'} - -mne.viz.plot_compare_evokeds(evoked_dict_ac, combine="mean", ci=0.95, - colors=color_dict, axes=axes.tolist()) - -### Method 2 -fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 6)) -ylim=dict(hbo=[0, 100], hbr=[0, 100]) - -mne.viz.plot_compare_evokeds( - {'Event_1_HBO': evoked_event_1_ac.copy().pick('hbo')}, combine=None, - ci=0.95, colors={'Event_1_HBO': 'r'}, axes=axes[0], ylim=ylim) - -mne.viz.plot_compare_evokeds( - {'Event_1_HBR': evoked_event_1_ac.copy().pick('hbr')}, combine=None, - ci=0.95, colors={'Event_1_HBR': 'b'}, axes=axes[0], ylim=ylim) - -mne.viz.plot_compare_evokeds( - {'Event_2_HBO': evoked_event_2_ac.copy().pick('hbo')}, combine=None, - ci=0.95, colors={'Event_2_HBO': 'r'}, axes=axes[1], ylim=ylim) - -mne.viz.plot_compare_evokeds( - {'Event_2_HBR': evoked_event_2_ac.copy().pick('hbr')}, combine=None, - ci=0.95, colors={'Event_2_HBR': 'b'}, axes=axes[1], ylim=ylim) - # Topographies fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(9, 5), gridspec_kw=dict(width_ratios=[1, 1, 0.1])) From 55d359d724eb355a387ebe0f23ed2d43f2aaed3e Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Sat, 27 Jun 2020 14:20:08 -0600 Subject: [PATCH 129/167] missing blank line at end of eog.py --- mne/preprocessing/eog.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mne/preprocessing/eog.py b/mne/preprocessing/eog.py index 27d0ac6252c..9481eef862d 100644 --- a/mne/preprocessing/eog.py +++ b/mne/preprocessing/eog.py @@ -247,4 +247,4 @@ def create_eog_epochs(raw, ch_name=None, event_id=998, picks=None, tmin=-0.5, tmax=tmax, proj=False, reject=reject, flat=flat, picks=picks, baseline=baseline, preload=preload, reject_by_annotation=reject_by_annotation) - return eog_epochs \ No newline at end of file + return eog_epochs From 7384f8a6f08b118eb0e7698a77b599986356db9b Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Thu, 2 Jul 2020 14:00:15 -0600 Subject: [PATCH 130/167] removed stray print statement --- mne/io/meas_info.py | 1 - 1 file changed, 1 deletion(-) diff --git a/mne/io/meas_info.py b/mne/io/meas_info.py index 1a5f033d755..40eb904b993 100644 --- a/mne/io/meas_info.py +++ b/mne/io/meas_info.py @@ -920,7 +920,6 @@ def read_info(fname, verbose=None): info : instance of Info Measurement information for the dataset. """ - print('###############################################################') f, tree, _ = fiff_open(fname) with f as fid: info = read_meas_info(fid, tree)[0] From 058df18e34f54c6ef5e05ffe89d0e4373485f047 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Fri, 3 Jul 2020 14:17:14 -0600 Subject: [PATCH 131/167] made sure files are sorted in the correct order --- mne/io/boxy/boxy.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 8510e2c25e8..7e775074008 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -68,6 +68,8 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): for key in keys: if key == '*.[000-999]*': files[key] = [glob.glob('%s/*%s' % (fname, key))] + # make sure filenames are in order + files[key][0].sort() else: files[key] = glob.glob('%s/*%s' % (fname, key)) if len(files[key]) != 1: From 82a9f397c0c90b2ea5f32aad7ef5ea04997844fa Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Tue, 7 Jul 2020 11:11:05 -0600 Subject: [PATCH 132/167] made changes for checklist --- mne/io/boxy/boxy.py | 83 ++++++++++--------- mne/io/meas_info.py | 1 + mne/io/pick.py | 1 + mne/viz/epochs.py | 1 + .../preprocessing/plot_80_boxy_processing.py | 4 +- 5 files changed, 47 insertions(+), 43 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 7e775074008..1b0c01fcd50 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -4,9 +4,10 @@ import glob as glob import re as re -import numpy as np import os +import numpy as np + from ..base import BaseRaw from ..meas_info import create_info from ...transforms import apply_trans, get_ras_to_neuromag_trans @@ -84,24 +85,24 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): raise RuntimeError('Expect AC, DC, or Ph, got %s' % datatype) # Determine how many blocks we have per montage. - blk_names = [] - mtg_names = [] + blk_names = list() + mtg_names = list() mtgs = re.findall(r'\w\.\d+', str(files['*.[000-999]*'])) [mtg_names.append(i_mtg[0]) for i_mtg in mtgs if i_mtg[0] not in mtg_names] for i_mtg in mtg_names: - temp = [] + temp = list() [temp.append(ii_mtg[2:]) for ii_mtg in mtgs if ii_mtg[0] == i_mtg] blk_names.append(temp) # Read header file and grab some info. - detect_num = [] - source_num = [] - aux_num = [] - ccf_ha = [] - srate = [] - start_line = [] - end_line = [] + detect_num = list() + source_num = list() + aux_num = list() + ccf_ha = list() + srate = list() + start_line = list() + end_line = list() filetype = ['parsed' for i_file in files['*.[000-999]*']] for file_num, i_file in enumerate(files['*.[000-999]*'], 0): with open(i_file, 'r') as data: @@ -129,12 +130,12 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): filetype[file_num] = 'non-parsed' # Extract source-detectors. - chan_num_1 = [] - chan_num_2 = [] - source_label = [] - detect_label = [] - chan_wavelength = [] - chan_modulation = [] + chan_num_1 = list() + chan_num_2 = list() + source_label = list() + detect_label = list() + chan_wavelength = list() + chan_modulation = list() # Load and read each line of the .mtg file. with open(files['mtg'], 'r') as data: @@ -160,9 +161,9 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): # Channels are defined as the midpoint between source and detector # Load and read .elp file. - all_labels = [] - all_coords = [] - fiducial_coords = [] + all_labels = list() + all_coords = list() + fiducial_coords = list() get_label = 0 get_coords = 0 @@ -190,22 +191,22 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): fiducial_coords[i_index]]) # Get coordinates from .elp file, for sources in .mtg file. - source_coords = [] + source_coords = list() for i_chan in source_label: if i_chan in all_labels: chan_index = all_labels.index(i_chan) source_coords.append(all_coords[chan_index]) # get coordinates from .elp file, for detectors in .mtg file. - detect_coords = [] + detect_coords = list() for i_chan in detect_label: if i_chan in all_labels: chan_index = all_labels.index(i_chan) detect_coords.append(all_coords[chan_index]) # Generate meaningful channel names for each montage. - unique_source_labels = [] - unique_detect_labels = [] + unique_source_labels = list() + unique_detect_labels = list() for mtg_num, i_mtg in enumerate(mtg_chan_num, 0): start = int(np.sum(mtg_chan_num[:mtg_num])) end = int(np.sum(mtg_chan_num[:mtg_num + 1])) @@ -225,15 +226,15 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): # Data is organised by channels x timepoint, where the first # 'source_num' rows correspond to the first detector, the next # 'source_num' rows correspond to the second detector, and so on. - boxy_coords = [] - boxy_labels = [] - mrk_coords = [] - mrk_labels = [] - mtg_start = [] - mtg_end = [] - mtg_src_num = [] - mtg_det_num = [] - mtg_mdf = [] + boxy_coords = list() + boxy_labels = list() + mrk_coords = list() + mrk_labels = list() + mtg_start = list() + mtg_end = list() + mtg_src_num = list() + mtg_det_num = list() + mtg_mdf = list() blk_num = [len(blk) for blk in blk_names] for mtg_num, i_mtg in enumerate(mtg_chan_num, 0): start = int(np.sum(mtg_chan_num[:mtg_num])) @@ -410,12 +411,12 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): print(event_fname) event_files[key] = [glob.glob('%s/*%s' % (event_fname, key))] event_files[key] = event_files[key][0] - event_data = [] + event_data = list() for file_num, i_file in enumerate(event_files[key]): event_data.append(spio.loadmat( event_files[key][file_num])['event']) - if event_data != []: + if event_data != list(): print('Event file found!') else: print('No event file found. Using digaux!') @@ -430,15 +431,15 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): 'W', 'X', 'Y', 'Z'] # Load our optical data. - all_data = [] - all_markers = [] + all_data = list() + all_markers = list() for i_mtg, mtg_name in enumerate(montages): - all_blocks = [] - block_markers = [] + all_blocks = list() + block_markers = list() for i_blk, blk_name in enumerate(blocks[i_mtg]): file_num = i_blk + (i_mtg * len(blocks[i_mtg])) boxy_file = boxy_files[file_num] - boxy_data = [] + boxy_data = list() with open(boxy_file, 'r') as data_file: for line_num, i_line in enumerate(data_file, 1): if line_num == (start_line[i_blk] - 1): @@ -478,7 +479,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): for key in keys: meta_data[key] = (boxy_array[:, np.where(col_names == key)[0][0]] if - key in col_names else []) + key in col_names else list()) # Make some empty variables to store our data. if filetype[file_num] == 'non-parsed': diff --git a/mne/io/meas_info.py b/mne/io/meas_info.py index 40eb904b993..e1526e3eaaf 100644 --- a/mne/io/meas_info.py +++ b/mne/io/meas_info.py @@ -5,6 +5,7 @@ # Stefan Appelhoff # # License: BSD (3-clause) + from collections import Counter import contextlib from copy import deepcopy diff --git a/mne/io/pick.py b/mne/io/pick.py index eb6518fae6d..2f44408d4b2 100644 --- a/mne/io/pick.py +++ b/mne/io/pick.py @@ -454,6 +454,7 @@ def pick_types(info, meg=None, eeg=False, stim=False, eog=False, ecg=False, myinclude = [info['ch_names'][k] for k in range(nchan) if pick[k]] myinclude += include + if len(myinclude) == 0: sel = np.array([], int) else: diff --git a/mne/viz/epochs.py b/mne/viz/epochs.py index a00881e570c..9cc518fde4c 100644 --- a/mne/viz/epochs.py +++ b/mne/viz/epochs.py @@ -1392,6 +1392,7 @@ def _plot_update_epochs_proj(params, bools=None): else: # this is faster than epochs.get_data()[start:end] when not preloaded data = np.concatenate(epochs[start:end].get_data(), axis=1) + if params['projector'] is not None: data = np.dot(params['projector'], data) types = params['types'] diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 9ca98a773b1..dba97d6c701 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -209,7 +209,7 @@ color_dict = {'Event_1': 'r', 'Event_2': 'b', 'Difference': 'g'} -mne.viz.plot_compare_evokeds(evoked_dict_ac, combine="mean", ci=0.95, +mne.viz.plot_compare_evokeds(evoked_dict_ac, combine='mean', ci=0.95, colors=color_dict, axes=axes.tolist()) # Topographies @@ -345,7 +345,7 @@ color_dict = {'Event_1': 'r', 'Event_2': 'b', 'Difference': 'g'} -mne.viz.plot_compare_evokeds(evoked_dict_ph, combine="mean", ci=0.95, +mne.viz.plot_compare_evokeds(evoked_dict_ph, combine='mean', ci=0.95, colors=color_dict, title='Phase') # Topographies From 02e7f683a391dfb6eadea3cb60f0405d7453f3e1 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Tue, 7 Jul 2020 13:26:03 -0600 Subject: [PATCH 133/167] table of contents should now display properly --- .../preprocessing/plot_80_boxy_processing.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index fb8a706a69d..e1fa1211b14 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -1,19 +1,19 @@ """ -.. _tut-fnirs-processing: +.. _tut-boxy-processing: Preprocessing optical imaging data from the Imagent hardware/BOXY software -================================================================ +========================================================================== This tutorial covers how to convert optical imaging data from raw measurements to relative oxyhaemoglobin (HbO) and deoxyhaemoglobin (HbR) concentration. Phase data from the recording is also processed and plotted in several ways in the latter half. - .. contents:: Page contents - :local: - :depth: 2 +.. contents:: Page contents + :local: + :depth: 2 - Here we will work with the :ref:`BOXY example data `. +Here we will work with the :ref:`BOXY example data `. """ # sphinx_gallery_thumbnail_number = 1 @@ -195,7 +195,7 @@ ############################################################################### # Compare Events 1 and 2 -# --------------------------------------- +# ---------------------- # Evoked Activity evoked_event_1_ac = all_haemo_epochs['Event_1'].average() @@ -296,7 +296,7 @@ ############################################################################### # Extracting and Plotting Phase Data -# ------------------------------------------------------------- +# ---------------------------------- # Now we can extract phase data from the boxy file and generate similar # plots as done above with the AC data. @@ -382,7 +382,7 @@ ############################################################################### # Compare Events 1 and 2 -# --------------------------------------- +# ---------------------- # Evoked Activity evoked_event_1_ph = all_phase_epochs['Event_1'].average() From aaa02a4d7d8d7b0f2e420ed935b47a24ea2d098e Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Tue, 7 Jul 2020 15:24:09 -0600 Subject: [PATCH 134/167] added references, and description, of boxy example dataset --- .circleci/config.yml | 3 +++ doc/overview/datasets_index.rst | 18 ++++++++++++++++++ doc/python_reference.rst | 1 + 3 files changed, 22 insertions(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index 460f672a30a..42f1238f4f4 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -202,6 +202,9 @@ jobs: if [[ $(cat $FNAME | grep -x ".*datasets.*fnirs_motor.*" | wc -l) -gt 0 ]]; then python -c "import mne; print(mne.datasets.fnirs_motor.data_path(update_path=True))"; fi; + if [[ $(cat $FNAME | grep -x ".*datasets.*boxy_example.*" | wc -l) -gt 0 ]]; then + python -c "import mne; print(mne.datasets.boxy_example.data_path(update_path=True))"; + fi; if [[ $(cat $FNAME | grep -x ".*datasets.*opm.*" | wc -l) -gt 0 ]]; then python -c "import mne; print(mne.datasets.opm.data_path(update_path=True))"; fi; diff --git a/doc/overview/datasets_index.rst b/doc/overview/datasets_index.rst index 7f5cdfc8857..6bb4488534e 100644 --- a/doc/overview/datasets_index.rst +++ b/doc/overview/datasets_index.rst @@ -212,6 +212,24 @@ The tapping lasts 5 seconds, and there are 30 trials of each condition. .. topic:: Examples * :ref:`tut-fnirs-processing` + +.. _boxy-example-dataset: + +BOXY Example +============ +:func:`mne.datasets.boxy_example.data_path` + +This dataset was used for an optical imaging workshop. +Sources and detectors are placed over the occipital lobe. +This set contains data for two montages, each with two blocks. +Each montage and block contains two conditions: + +- 1 +- 2 + +.. topic:: Examples + + * :ref:`tut-boxy-processing` High frequency SEF ================== diff --git a/doc/python_reference.rst b/doc/python_reference.rst index 23a3731c945..0cc53bb6dbb 100644 --- a/doc/python_reference.rst +++ b/doc/python_reference.rst @@ -183,6 +183,7 @@ Datasets .. autosummary:: :toctree: generated/ + boxy_example.data_path brainstorm.bst_auditory.data_path brainstorm.bst_resting.data_path brainstorm.bst_raw.data_path From e5eed6d6ebd1010f8fa4ba0e3e12ce34ec85d746 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Wed, 8 Jul 2020 13:32:22 -0600 Subject: [PATCH 135/167] fixed style issue in plot_80 --- .../preprocessing/plot_80_boxy_processing.py | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py index 9c25411e708..9220957a129 100644 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ b/tutorials/preprocessing/plot_80_boxy_processing.py @@ -205,15 +205,15 @@ axes[0].plot(evoked_event_1_ac.times, np.sqrt((evoked_event_1_ac.copy().pick('hbo') - ._data ** 2).mean(axis=0))*1e6, 'r', + ._data ** 2).mean(axis=0)) * 1e6, 'r', evoked_event_1_ac.times, np.sqrt((evoked_event_1_ac.copy().pick('hbr') - ._data ** 2).mean(axis=0))*1e6, 'b', + ._data ** 2).mean(axis=0)) * 1e6, 'b', evoked_event_1_ac.times, ((np.sqrt((evoked_event_1_ac.copy().pick('hbo') - ._data ** 2).mean(axis=0))*1e6) - + ._data ** 2).mean(axis=0)) * 1e6) - (np.sqrt((evoked_event_1_ac.copy().pick('hbr') - ._data ** 2).mean(axis=0))*1e6)), 'g') + ._data ** 2).mean(axis=0)) * 1e6)), 'g') axes[0].set_ylim([-40, 100]) axes[0].set_xlabel('Time (s)') axes[0].set_ylabel('\u03BCM') @@ -222,15 +222,15 @@ axes[1].plot(evoked_event_2_ac.times, np.sqrt((evoked_event_2_ac.copy().pick('hbo') - ._data ** 2).mean(axis=0))*1e6, 'r', + ._data ** 2).mean(axis=0)) * 1e6, 'r', evoked_event_2_ac.times, np.sqrt((evoked_event_2_ac.copy().pick('hbr') - ._data ** 2).mean(axis=0))*1e6, 'b', + ._data ** 2).mean(axis=0)) * 1e6, 'b', evoked_event_2_ac.times, ((np.sqrt((evoked_event_2_ac.copy().pick('hbo') - ._data ** 2).mean(axis=0))*1e6) - + ._data ** 2).mean(axis=0)) * 1e6) - (np.sqrt((evoked_event_2_ac.copy().pick('hbr') - ._data ** 2).mean(axis=0))*1e6)), 'g') + ._data ** 2).mean(axis=0)) * 1e6)), 'g') axes[1].set_ylim([-40, 100]) axes[1].set_xlabel('Time (s)') axes[1].set_ylabel('\u03BCM') @@ -239,23 +239,23 @@ axes[2].plot(evoked_event_1_ac.times, ((np.sqrt((evoked_event_1_ac.copy().pick('hbo') - ._data ** 2).mean(axis=0))*1e6) - + ._data ** 2).mean(axis=0)) * 1e6) - (np.sqrt((evoked_event_1_ac.copy().pick('hbr') - ._data ** 2).mean(axis=0))*1e6)), 'm', + ._data ** 2).mean(axis=0)) * 1e6)), 'm', evoked_event_1_ac.times, ((np.sqrt((evoked_event_2_ac.copy().pick('hbo') - ._data ** 2).mean(axis=0))*1e6) - + ._data ** 2).mean(axis=0)) * 1e6) - (np.sqrt((evoked_event_2_ac.copy().pick('hbr') - ._data ** 2).mean(axis=0))*1e6)), 'c', + ._data ** 2).mean(axis=0)) * 1e6)), 'c', evoked_event_1_ac.times, ((np.sqrt((evoked_event_1_ac.copy().pick('hbo') - ._data ** 2).mean(axis=0))*1e6) - + ._data ** 2).mean(axis=0)) * 1e6) - (np.sqrt((evoked_event_1_ac.copy().pick('hbr') - ._data ** 2).mean(axis=0))*1e6)) - + ._data ** 2).mean(axis=0)) * 1e6)) - ((np.sqrt((evoked_event_2_ac.copy().pick('hbo') - ._data ** 2).mean(axis=0))*1e6) - + ._data ** 2).mean(axis=0)) * 1e6) - (np.sqrt((evoked_event_2_ac.copy().pick('hbr') - ._data ** 2).mean(axis=0))*1e6)), 'k') + ._data ** 2).mean(axis=0)) * 1e6)), 'k') axes[2].set_ylim([-40, 100]) axes[2].set_xlabel('Time (s)') axes[2].set_ylabel('\u03BCM') From 71b388e5a7ef1cd83381b28b2c8811ad180afc88 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Thu, 9 Jul 2020 15:55:57 -0600 Subject: [PATCH 136/167] addressed some checklist items --- mne/io/boxy/boxy.py | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 1b0c01fcd50..f28b87903e0 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -276,8 +276,7 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): boxy_coords = np.array(boxy_coords, float) all_coords = np.array(all_coords, float) - # Montage only wants channel coords, so need to grab those, - # convert to array, then make a dict with labels. + # Montage wants channel coords and labels as a dict. all_chan_dict = dict(zip(all_labels, all_coords)) my_dig_montage = make_dig_montage(ch_pos=all_chan_dict, @@ -286,7 +285,7 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): lpa=fiducial_coords[1], rpa=fiducial_coords[2]) - # Create info structure. + # Determine channel types. if datatype == 'Ph': chan_type = 'fnirs_fd_phase' else: @@ -294,9 +293,11 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): ch_types = ([chan_type if i_chan < np.sum(mtg_chan_num) else 'stim' for i_chan, _ in enumerate(boxy_labels)]) + + # Create info structure. info = create_info(boxy_labels, srate[0], ch_types=ch_types) - # Add dig to info. + # Add montage to info. info.set_montage(my_dig_montage) # Store channel, source, and detector locations. @@ -304,13 +305,12 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): # The source location is stored in the second 3 entries of loc. # The detector location is stored in the third 3 entries of loc. # Also encode the light frequency in the structure. - - # These are all in actual 3d individual coordinates, - # so let's transform them to the Neuromag head coordinate frame. native_head_t = get_ras_to_neuromag_trans(fiducial_coords[0], fiducial_coords[1], fiducial_coords[2]) + # These are all in actual 3d individual coordinates, + # so let's transform them to the Neuromag head coordinate frame. for i_chan in range(len(boxy_labels)): if i_chan < np.sum(mtg_chan_num): temp_ch_src_det = apply_trans( @@ -433,24 +433,34 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): # Load our optical data. all_data = list() all_markers = list() + + # Loop through montages. for i_mtg, mtg_name in enumerate(montages): all_blocks = list() block_markers = list() + + # Loop through blocks. for i_blk, blk_name in enumerate(blocks[i_mtg]): file_num = i_blk + (i_mtg * len(blocks[i_mtg])) boxy_file = boxy_files[file_num] boxy_data = list() + + # Loop through our data. with open(boxy_file, 'r') as data_file: for line_num, i_line in enumerate(data_file, 1): if line_num == (start_line[i_blk] - 1): + # Grab column names. col_names = np.asarray( re.findall(r'\w+\-\w+|\w+\-\d+|\w+', i_line.rsplit(' ')[0])) if (line_num > start_line[file_num] and line_num <= end_line[file_num]): + + # Grab actual data. boxy_data.append(i_line.rsplit(' ')) + # Get number of sources. sources = np.arange(1, source_num[file_num] + 1, 1) # Grab the individual data points for each column. @@ -462,6 +472,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): boxy_length = len(col_names) boxy_array = np.full((len(boxy_data), boxy_length), np.nan) for ii, i_data in enumerate(boxy_data): + # Need to make sure our rows are the same length. # This is done by padding the shorter ones. padding = boxy_length - len(i_data) @@ -570,7 +581,6 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): print('Removing phase mean') # Subtract mean to better detect outliers using SD. - mrph = np.mean(data_, axis=1) for i_chan in range(np.size(data_, axis=0)): data_[i_chan, :] = (data_[i_chan, :] - @@ -621,6 +631,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): temp_markers[event_info[0] - 1] = event_info[1] block_markers.append(temp_markers) except Exception: + # Add our markers to the data array based on filetype. if type(meta_data['digaux']) is not list: if filetype[file_num] == 'non-parsed': From 1c7b8feb04a58e2bea27cde9ba840b480e483a1f Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Fri, 17 Jul 2020 10:12:05 -0600 Subject: [PATCH 137/167] updated info about boxy dataset --- doc/overview/datasets_index.rst | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/doc/overview/datasets_index.rst b/doc/overview/datasets_index.rst index 6bb4488534e..4a986db4da3 100644 --- a/doc/overview/datasets_index.rst +++ b/doc/overview/datasets_index.rst @@ -219,13 +219,16 @@ BOXY Example ============ :func:`mne.datasets.boxy_example.data_path` -This dataset was used for an optical imaging workshop. +This dataset is of a single participant. +Recorded at the University of Illinois at Urbana-Champaign. Sources and detectors are placed over the occipital lobe. +The participant was shown a checkerboard pattern, alternating at 1Hz. +This reversal starts half-way through the recording. This set contains data for two montages, each with two blocks. -Each montage and block contains two conditions: +Each montage and block contains two marker types: -- 1 -- 2 +- 1 = checkerboard reversal +- 2 = same as 1 but for the first few trials (to keep separate if needed) .. topic:: Examples From e788598758f28a0cde2bc492e5ac45b12f3a6d94 Mon Sep 17 00:00:00 2001 From: kuziekj Date: Wed, 29 Jul 2020 10:34:33 -0600 Subject: [PATCH 138/167] should now only read in boxy data file (#26) * should now only read in boxy data file * added tutorial and test for loading boxy data, remove plot_80 tutorial and reference for now * now will only load a single boxy file Co-authored-by: Kyle Mathewson --- doc/overview/datasets_index.rst | 4 - mne/io/boxy/boxy.py | 680 ++++-------------- mne/io/boxy/tests/test_boxy.py | 253 +------ tutorials/io/plot_40_reading_boxy_data.py | 30 + .../preprocessing/plot_80_boxy_processing.py | 422 ----------- 5 files changed, 203 insertions(+), 1186 deletions(-) create mode 100644 tutorials/io/plot_40_reading_boxy_data.py delete mode 100644 tutorials/preprocessing/plot_80_boxy_processing.py diff --git a/doc/overview/datasets_index.rst b/doc/overview/datasets_index.rst index 4a986db4da3..9167a352f42 100644 --- a/doc/overview/datasets_index.rst +++ b/doc/overview/datasets_index.rst @@ -230,10 +230,6 @@ Each montage and block contains two marker types: - 1 = checkerboard reversal - 2 = same as 1 but for the first few trials (to keep separate if needed) -.. topic:: Examples - - * :ref:`tut-boxy-processing` - High frequency SEF ================== :func:`mne.datasets.hf_sef.data_path()` diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index f28b87903e0..719cb5106ac 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -4,15 +4,12 @@ import glob as glob import re as re -import os import numpy as np from ..base import BaseRaw from ..meas_info import create_info -from ...transforms import apply_trans, get_ras_to_neuromag_trans from ...utils import logger, verbose, fill_doc -from ...channels.montage import make_dig_montage @fill_doc @@ -64,226 +61,52 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): # Check if required files exist and store names for later use. files = dict() - keys = ('mtg', 'elp', '*.[000-999]*') + key = '*.txt' print(fname) - for key in keys: - if key == '*.[000-999]*': - files[key] = [glob.glob('%s/*%s' % (fname, key))] - # make sure filenames are in order - files[key][0].sort() - else: - files[key] = glob.glob('%s/*%s' % (fname, key)) - if len(files[key]) != 1: - raise RuntimeError('Expect one %s file, got %d' % - (key, len(files[key]),)) - files[key] = files[key][0] + files[key] = [glob.glob('%s/*%s' % (fname, key))] + + # Make sure filenames are in order. + files[key][0].sort() + if len(files[key]) != 1: + raise RuntimeError('Expect one %s file, got %d' % + (key, len(files[key]),)) + files[key] = files[key][0] # Determine which data type to return. - if datatype in ['AC', 'DC', 'Ph']: - data_types = [datatype] - else: + if datatype not in ['AC', 'DC', 'Ph']: raise RuntimeError('Expect AC, DC, or Ph, got %s' % datatype) - # Determine how many blocks we have per montage. - blk_names = list() - mtg_names = list() - mtgs = re.findall(r'\w\.\d+', str(files['*.[000-999]*'])) - [mtg_names.append(i_mtg[0]) for i_mtg in mtgs - if i_mtg[0] not in mtg_names] - for i_mtg in mtg_names: - temp = list() - [temp.append(ii_mtg[2:]) for ii_mtg in mtgs if ii_mtg[0] == i_mtg] - blk_names.append(temp) - # Read header file and grab some info. - detect_num = list() - source_num = list() - aux_num = list() - ccf_ha = list() - srate = list() - start_line = list() - end_line = list() - filetype = ['parsed' for i_file in files['*.[000-999]*']] - for file_num, i_file in enumerate(files['*.[000-999]*'], 0): - with open(i_file, 'r') as data: - for line_num, i_line in enumerate(data, 1): - if '#DATA ENDS' in i_line: - # Data ends just before this. - end_line.append(line_num - 1) - break - if 'Detector Channels' in i_line: - detect_num.append(int(i_line.rsplit(' ')[0])) - elif 'External MUX Channels' in i_line: - source_num.append(int(i_line.rsplit(' ')[0])) - elif 'Auxiliary Channels' in i_line: - aux_num.append(int(i_line.rsplit(' ')[0])) - elif 'Waveform (CCF) Frequency (Hz)' in i_line: - ccf_ha.append(float(i_line.rsplit(' ')[0])) - elif 'Update Rate (Hz)' in i_line: - srate.append(float(i_line.rsplit(' ')[0])) - elif 'Updata Rate (Hz)' in i_line: - srate.append(float(i_line.rsplit(' ')[0])) - elif '#DATA BEGINS' in i_line: - # Data should start a couple lines later. - start_line.append(line_num + 2) - elif 'exmux' in i_line: - filetype[file_num] = 'non-parsed' - - # Extract source-detectors. - chan_num_1 = list() - chan_num_2 = list() - source_label = list() - detect_label = list() - chan_wavelength = list() - chan_modulation = list() - - # Load and read each line of the .mtg file. - with open(files['mtg'], 'r') as data: + filetype = 'parsed' + with open(files[key][0], 'r') as data: for line_num, i_line in enumerate(data, 1): - if line_num == 2: - mtg_chan_num = [int(num) for num in i_line.split()] - elif line_num > 2: - (chan1, chan2, source, detector, - wavelength, modulation) = i_line.split() - chan_num_1.append(chan1) - chan_num_2.append(chan2) - source_label.append(source) - detect_label.append(detector) - chan_wavelength.append(wavelength) - chan_modulation.append(modulation) - - # Read information about probe/montage/optodes. - # A word on terminology used here: - # Sources produce light - # Detectors measure light - # Sources and detectors are both called optodes - # Each source - detector pair produces a channel - # Channels are defined as the midpoint between source and detector - - # Load and read .elp file. - all_labels = list() - all_coords = list() - fiducial_coords = list() - get_label = 0 - get_coords = 0 - - with open(files['elp'], 'r') as data: - for i_line in data: - # First let's get our fiducial coordinates. - if '%F' in i_line: - fiducial_coords.append(i_line.split()[1:]) - # Check where sensor info starts. - if '//Sensor name' in i_line: - get_label = 1 - elif get_label == 1: - # Grab the part after '%N' for the label. - label = i_line.split()[1] - all_labels.append(label) - get_label = 0 - get_coords = 1 - elif get_coords == 1: - X, Y, Z = i_line.split() - all_coords.append([float(X), float(Y), float(Z)]) - get_coords = 0 - for i_index in range(3): - fiducial_coords[i_index] = np.asarray([float(x) - for x in - fiducial_coords[i_index]]) - - # Get coordinates from .elp file, for sources in .mtg file. - source_coords = list() - for i_chan in source_label: - if i_chan in all_labels: - chan_index = all_labels.index(i_chan) - source_coords.append(all_coords[chan_index]) - - # get coordinates from .elp file, for detectors in .mtg file. - detect_coords = list() - for i_chan in detect_label: - if i_chan in all_labels: - chan_index = all_labels.index(i_chan) - detect_coords.append(all_coords[chan_index]) - - # Generate meaningful channel names for each montage. - unique_source_labels = list() - unique_detect_labels = list() - for mtg_num, i_mtg in enumerate(mtg_chan_num, 0): - start = int(np.sum(mtg_chan_num[:mtg_num])) - end = int(np.sum(mtg_chan_num[:mtg_num + 1])) - [unique_source_labels.append(label) - for label in source_label[start:end] - if label not in unique_source_labels] - [unique_detect_labels.append(label) - for label in detect_label[start:end] - if label not in unique_detect_labels] - - # Swap order to have lower wavelength first. - for i_chan in range(0, len(chan_wavelength), 2): - chan_wavelength[i_chan], chan_wavelength[i_chan + 1] = ( - chan_wavelength[i_chan + 1], chan_wavelength[i_chan]) + if '#DATA ENDS' in i_line: + # Data ends just before this. + end_line = line_num - 1 + break + if 'Detector Channels' in i_line: + detect_num = int(i_line.rsplit(' ')[0]) + elif 'External MUX Channels' in i_line: + source_num = int(i_line.rsplit(' ')[0]) + elif 'Update Rate (Hz)' in i_line: + srate = float(i_line.rsplit(' ')[0]) + elif 'Updata Rate (Hz)' in i_line: + srate = float(i_line.rsplit(' ')[0]) + elif '#DATA BEGINS' in i_line: + # Data should start a couple lines later. + start_line = line_num + 2 + elif 'exmux' in i_line: + filetype = 'non-parsed' # Label each channel in our data. # Data is organised by channels x timepoint, where the first # 'source_num' rows correspond to the first detector, the next # 'source_num' rows correspond to the second detector, and so on. - boxy_coords = list() boxy_labels = list() - mrk_coords = list() - mrk_labels = list() - mtg_start = list() - mtg_end = list() - mtg_src_num = list() - mtg_det_num = list() - mtg_mdf = list() - blk_num = [len(blk) for blk in blk_names] - for mtg_num, i_mtg in enumerate(mtg_chan_num, 0): - start = int(np.sum(mtg_chan_num[:mtg_num])) - end = int(np.sum(mtg_chan_num[:mtg_num + 1])) - # Organise some data for each montage. - start_blk = int(np.sum(blk_num[:mtg_num])) - # Get stop and stop lines for each montage. - mtg_start.append(start_line[start_blk]) - mtg_end.append(end_line[start_blk]) - # Get source and detector numbers for each montage. - mtg_src_num.append(source_num[start_blk]) - mtg_det_num.append(detect_num[start_blk]) - # Get modulation frequency for each channel and montage. - # Assuming modulation freq in MHz. - mtg_mdf.append([int(chan_mdf) * 1e6 for chan_mdf - in chan_modulation[start:end]]) - for i_type in data_types: - for i_coord in range(start, end): - boxy_coords.append( - np.mean(np.vstack((source_coords[i_coord], - detect_coords[i_coord])), - axis=0).tolist() + source_coords[i_coord] + - detect_coords[i_coord] + [chan_wavelength[i_coord]] + - [0] + [0]) - boxy_labels.append('S' + str(unique_source_labels.index( - source_label[i_coord]) + 1) + '_D' + - str(unique_detect_labels.index(detect_label[i_coord]) + - 1) + ' ' + chan_wavelength[i_coord]) - - # Add extra column for triggers. - mrk_labels.append('Markers' + ' ' + mtg_names[mtg_num]) - mrk_coords.append(np.zeros((12,))) - - # Add triggers to the end of our data. - boxy_labels.extend(mrk_labels) - boxy_coords.extend(mrk_coords) - - # Convert to floats. - boxy_coords = np.array(boxy_coords, float) - all_coords = np.array(all_coords, float) - - # Montage wants channel coords and labels as a dict. - all_chan_dict = dict(zip(all_labels, all_coords)) - - my_dig_montage = make_dig_montage(ch_pos=all_chan_dict, - coord_frame='unknown', - nasion=fiducial_coords[0], - lpa=fiducial_coords[1], - rpa=fiducial_coords[2]) + for det_num in range(detect_num): + for src_num in range(source_num): + boxy_labels.append('S' + str(src_num + 1) + + '_D' + str(det_num + 1)) # Determine channel types. if datatype == 'Ph': @@ -291,84 +114,33 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): else: chan_type = 'fnirs_cw_amplitude' - ch_types = ([chan_type if i_chan < np.sum(mtg_chan_num) else 'stim' - for i_chan, _ in enumerate(boxy_labels)]) + ch_types = ([chan_type for i_chan in boxy_labels]) # Create info structure. - info = create_info(boxy_labels, srate[0], ch_types=ch_types) - - # Add montage to info. - info.set_montage(my_dig_montage) - - # Store channel, source, and detector locations. - # The channel location is stored in the first 3 entries of loc. - # The source location is stored in the second 3 entries of loc. - # The detector location is stored in the third 3 entries of loc. - # Also encode the light frequency in the structure. - native_head_t = get_ras_to_neuromag_trans(fiducial_coords[0], - fiducial_coords[1], - fiducial_coords[2]) - - # These are all in actual 3d individual coordinates, - # so let's transform them to the Neuromag head coordinate frame. - for i_chan in range(len(boxy_labels)): - if i_chan < np.sum(mtg_chan_num): - temp_ch_src_det = apply_trans( - native_head_t, - boxy_coords[i_chan][:9].reshape(3, 3)).ravel() - else: - # Don't want to transform markers. - temp_ch_src_det = np.zeros(9,) - # Add wavelength and placeholders. - temp_other = np.asarray(boxy_coords[i_chan][9:], dtype=np.float64) - info['chs'][i_chan]['loc'] = np.concatenate((temp_ch_src_det, - temp_other), axis=0) + info = create_info(boxy_labels, srate, ch_types=ch_types) raw_extras = {'source_num': source_num, 'detect_num': detect_num, 'start_line': start_line, 'end_line': end_line, 'filetype': filetype, - 'files': files, - 'montages': mtg_names, - 'blocks': blk_names, - 'data_types': data_types, - 'mtg_mdf': mtg_mdf, + 'files': files[key][0], + 'datatype': datatype, } - # Check data start lines. - if len(set(start_line)) == 1: - print('Start lines the same!') - else: - print('Start lines different!') - - # Check data end lines. - if len(set(end_line)) == 1: - print('End lines the same!') - else: - print('End lines different!') - # Make sure data lengths are the same. - data_length = ([end_line[i_line] - start_line[i_line] for i_line, - line_num in enumerate(start_line)]) - - if len(set(data_length)) == 1: - print('Data sizes are the same!') - else: - print('Data sizes are different!') - - print('Start Line: ', start_line[0]) - print('End Line: ', end_line[0]) - print('Original Difference: ', end_line[0] - start_line[0]) - first_samps = start_line[0] + print('Start Line: ', start_line) + print('End Line: ', end_line) + print('Original Difference: ', end_line - start_line) + first_samps = start_line print('New first_samps: ', first_samps) - diff = end_line[0] - (start_line[0]) + diff = end_line - (start_line) # Number if rows in data file depends on data file type. - if filetype[0] == 'non-parsed': - last_samps = ((diff * len(blk_names[0])) // (source_num[0])) - elif filetype[0] == 'parsed': - last_samps = diff * len(blk_names[0]) + if filetype == 'non-parsed': + last_samps = diff // (source_num) + elif filetype == 'parsed': + last_samps = diff # First sample is technically sample 0, not the start line in the file. first_samps = 0 @@ -388,42 +160,13 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): Regardless of type, output has (n_montages x n_sources x n_detectors + n_marker_channels) rows, and (n_timepoints x n_blocks) columns. """ - import scipy.io as spio - source_num = self._raw_extras[fi]['source_num'] detect_num = self._raw_extras[fi]['detect_num'] start_line = self._raw_extras[fi]['start_line'] end_line = self._raw_extras[fi]['end_line'] filetype = self._raw_extras[fi]['filetype'] - data_types = self._raw_extras[fi]['data_types'] - montages = self._raw_extras[fi]['montages'] - blocks = self._raw_extras[fi]['blocks'] - mtg_mdf = self._raw_extras[fi]['mtg_mdf'] - boxy_files = self._raw_extras[fi]['files']['*.[000-999]*'] - event_fname = os.path.join(self._filenames[fi], 'evt') - - # Check if event files are available. - # Mostly for older boxy files since we'll be using the digaux channel - # for markers in further recordings. - try: - event_files = dict() - key = ('*.[000-999]*') - print(event_fname) - event_files[key] = [glob.glob('%s/*%s' % (event_fname, key))] - event_files[key] = event_files[key][0] - event_data = list() - - for file_num, i_file in enumerate(event_files[key]): - event_data.append(spio.loadmat( - event_files[key][file_num])['event']) - if event_data != list(): - print('Event file found!') - else: - print('No event file found. Using digaux!') - - except Exception: - print('No event file found. Using digaux!') - pass + datatype = self._raw_extras[fi]['datatype'] + boxy_files = self._raw_extras[fi]['files'] # Possible detector names. detectors = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', @@ -431,242 +174,97 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): 'W', 'X', 'Y', 'Z'] # Load our optical data. - all_data = list() - all_markers = list() - - # Loop through montages. - for i_mtg, mtg_name in enumerate(montages): - all_blocks = list() - block_markers = list() - - # Loop through blocks. - for i_blk, blk_name in enumerate(blocks[i_mtg]): - file_num = i_blk + (i_mtg * len(blocks[i_mtg])) - boxy_file = boxy_files[file_num] - boxy_data = list() - - # Loop through our data. - with open(boxy_file, 'r') as data_file: - for line_num, i_line in enumerate(data_file, 1): - if line_num == (start_line[i_blk] - 1): - - # Grab column names. - col_names = np.asarray( - re.findall(r'\w+\-\w+|\w+\-\d+|\w+', - i_line.rsplit(' ')[0])) - if (line_num > start_line[file_num] and - line_num <= end_line[file_num]): - - # Grab actual data. - boxy_data.append(i_line.rsplit(' ')) - - # Get number of sources. - sources = np.arange(1, source_num[file_num] + 1, 1) - - # Grab the individual data points for each column. - boxy_data = [re.findall(r'[-+]?\d*\.?\d+', i_row[0]) - for i_row in boxy_data] - - # Make variable to store our data as an array - # rather than list of strings. - boxy_length = len(col_names) - boxy_array = np.full((len(boxy_data), boxy_length), np.nan) - for ii, i_data in enumerate(boxy_data): - - # Need to make sure our rows are the same length. - # This is done by padding the shorter ones. - padding = boxy_length - len(i_data) - boxy_array[ii] = np.pad(np.asarray(i_data, dtype=float), - (0, padding), mode='empty') - - # Grab data from the other columns that aren't AC, DC, or Ph. - meta_data = dict() - keys = ['time', 'record', 'group', 'exmux', 'step', 'mark', - 'flag', 'aux1', 'digaux'] - for i_detect in detectors[0:detect_num[file_num]]: - keys.append('bias-' + i_detect) - - # Data that isn't in our boxy file will be an empty list. - for key in keys: - meta_data[key] = (boxy_array[:, - np.where(col_names == key)[0][0]] if - key in col_names else list()) - - # Make some empty variables to store our data. - if filetype[file_num] == 'non-parsed': - data_ = np.zeros(((((detect_num[file_num] * - source_num[file_num]) * - len(data_types))), - int(len(boxy_data) / - source_num[file_num]))) - elif filetype[file_num] == 'parsed': - data_ = np.zeros(((((detect_num[file_num] * - source_num[file_num]) * - len(data_types))), - int(len(boxy_data)))) - - # Loop through data types. - for i_data in data_types: - - # Loop through detectors. - for i_detect in detectors[0:detect_num[file_num]]: - - # Loop through sources. - for i_source in sources: - - # Determine where to store our data. - index_loc = (detectors.index(i_detect) * - source_num[file_num] + - (i_source - 1) + - (data_types.index(i_data) * - (source_num[file_num] * - detect_num[file_num]))) - - # Need to treat our filetypes differently. - if filetype[file_num] == 'non-parsed': - - # Non-parsed saves timepoints in groups and - # this should account for that. - time_points = np.arange( - i_source - 1, - int(meta_data['record'][-1]) * - source_num[file_num], - source_num[file_num]) - - # Determine which channel to - # look for in boxy_array. - channel = np.where(col_names == i_detect + - '-' + i_data)[0][0] - - # Save our data based on data type. - data_[index_loc, :] = boxy_array[time_points, - channel] - - elif filetype[file_num] == 'parsed': - - # Which channel to look for in boxy_array. - channel = np.where(col_names == i_detect + - '-' + i_data + - str(i_source))[0][0] - - # Save our data based on data type. - data_[index_loc, :] = boxy_array[:, channel] - - # Phase unwrapping. - if i_data == 'Ph': - print('Fixing phase wrap') - # Accounts for sharp, sudden changes in phase - # such as crossing over from 0/360 degrees. - # Estimate mean phase of first 50 points. - # If a point differs more than 90 degrees from the - # mean, add or subtract 360 degrees from that point. - for i_chan in range(np.size(data_, axis=0)): - if np.mean(data_[i_chan, :50]) < 180: - wrapped_points = data_[i_chan, :] > 270 - data_[i_chan, wrapped_points] -= 360 - else: - wrapped_points = data_[i_chan, :] < 90 - data_[i_chan, wrapped_points] += 360 - - print('Detrending phase data') - # Remove trends and drifts that occur over time. - y = np.linspace(0, np.size(data_, axis=1) - 1, - np.size(data_, axis=1)) - x = np.transpose(y) - for i_chan in range(np.size(data_, axis=0)): - poly_coeffs = np.polyfit(x, data_[i_chan, :], 3) - tmp_ph = (data_[i_chan, :] - - np.polyval(poly_coeffs, x)) - data_[i_chan, :] = tmp_ph - - print('Removing phase mean') - # Subtract mean to better detect outliers using SD. - mrph = np.mean(data_, axis=1) - for i_chan in range(np.size(data_, axis=0)): - data_[i_chan, :] = (data_[i_chan, :] - - mrph[i_chan]) - - print('Removing phase outliers') - # Remove data points that are larger than three SDs. - ph_out_thr = 3 - - # Set ddof to 1 to mimic matlab. - sdph = np.std(data_, 1, ddof=1) - n_ph_out = np.zeros(np.size(data_, axis=0), - dtype=np.int8) - - for i_chan in range(np.size(data_, axis=0)): - outliers = np.where(np.abs(data_[i_chan, :]) > - (ph_out_thr * sdph[i_chan])) - outliers = outliers[0] - if len(outliers) > 0: - if outliers[0] == 0: - outliers = outliers[1:] - if len(outliers) > 0: - if (outliers[-1] == np.size(data_, - axis=1) - 1): - outliers = outliers[:-1] - n_ph_out[i_chan] = int(len(outliers)) - for i_pt in range(n_ph_out[i_chan]): - j_pt = outliers[i_pt] - data_[i_chan, j_pt] = ( - (data_[i_chan, j_pt - 1] + - data_[i_chan, j_pt + 1]) / 2) - - # Convert phase to pico seconds. - for i_chan in range(np.size(data_, axis=0)): - data_[i_chan, :] = ((1e12 * data_[i_chan, :]) / - (360 * mtg_mdf[i_mtg][i_chan])) - - # Swap channels to match new wavelength order. - for i_chan in range(0, len(data_), 2): - data_[[i_chan, i_chan + 1]] = data_[[i_chan + 1, i_chan]] - - # If there was an event file, place those events in our data. - # If no, use digaux for our events. - try: - temp_markers = np.zeros((len(data_[0, :]),)) - for event_num, event_info in enumerate( - event_data[file_num]): - temp_markers[event_info[0] - 1] = event_info[1] - block_markers.append(temp_markers) - except Exception: - - # Add our markers to the data array based on filetype. - if type(meta_data['digaux']) is not list: - if filetype[file_num] == 'non-parsed': - block_markers.append( - meta_data['digaux'] - [np.arange(0, len(meta_data['digaux']), - source_num[file_num])]) - elif filetype[file_num] == 'parsed': - block_markers.append(meta_data['digaux']) - else: - block_markers.append(np.zeros((len(data_[0, :]),))) - - # Check our markers to see if anything is actually in there. - if (all(i_mrk == 0 for i_mrk in block_markers[i_blk]) or - all(i_mrk == 255 for i_mrk in block_markers[i_blk])): - print('No markers for montage ' + mtg_name + - ' and block ' + blk_name) - else: - print('Found markers for montage ' + mtg_name + - ' and block ' + blk_name + '!') - - # Change marker for last timepoint to indicate end of block - # We'll be using digaux to send markers, a serial port, - # so we can send values between 1-255. - # We'll multiply our block start/end markers by 1000 to ensure - # we aren't within the 1-255 range. - block_markers[i_blk][-1] = int(blk_name) * 1000 - - all_blocks.append(data_) - - all_data.extend(np.hstack(all_blocks)) - all_markers.append(np.hstack(block_markers)) - - # Add markers to our data. - all_data.extend(all_markers) + boxy_data = list() + + # Loop through our data. + with open(boxy_files, 'r') as data_file: + for line_num, i_line in enumerate(data_file, 1): + if line_num == (start_line - 1): + + # Grab column names. + col_names = np.asarray(re.findall(r'\w+\-\w+|\w+\-\d+|\w+', + i_line.rsplit(' ')[0])) + if (line_num > start_line and line_num <= end_line): + + # Grab actual data. + boxy_data.append(i_line.rsplit(' ')) + + # Get number of sources. + sources = np.arange(1, source_num + 1, 1) + + # Grab the individual data points for each column. + boxy_data = [re.findall(r'[-+]?\d*\.?\d+', i_row[0]) + for i_row in boxy_data] + + # Make variable to store our data as an array + # rather than list of strings. + boxy_length = len(col_names) + boxy_array = np.full((len(boxy_data), boxy_length), np.nan) + for ii, i_data in enumerate(boxy_data): + + # Need to make sure our rows are the same length. + # This is done by padding the shorter ones. + padding = boxy_length - len(i_data) + boxy_array[ii] = np.pad(np.asarray(i_data, dtype=float), + (0, padding), mode='empty') + + # Grab data from the other columns that aren't AC, DC, or Ph. + meta_data = dict() + keys = ['time', 'record', 'group', 'exmux', 'step', 'mark', 'flag', + 'aux1', 'digaux'] + for i_detect in detectors[0:detect_num]: + keys.append('bias-' + i_detect) + + # Data that isn't in our boxy file will be an empty list. + for key in keys: + meta_data[key] = (boxy_array[:, np.where(col_names == key)[0][0]] + if key in col_names else list()) + + # Make some empty variables to store our data. + if filetype == 'non-parsed': + all_data = np.zeros(((detect_num * source_num), + int(len(boxy_data) / source_num))) + elif filetype == 'parsed': + all_data = np.zeros(((detect_num * source_num), + int(len(boxy_data)))) + + # Loop through detectors. + for i_detect in detectors[0:detect_num]: + + # Loop through sources. + for i_source in sources: + + # Determine where to store our data. + index_loc = (detectors.index(i_detect) * source_num + + (i_source - 1)) + + # Need to treat our filetypes differently. + if filetype == 'non-parsed': + + # Non-parsed saves timepoints in groups and + # this should account for that. + time_points = np.arange(i_source - 1, + int(meta_data['record'][-1]) * + source_num, source_num) + + # Determine which channel to + # look for in boxy_array. + channel = np.where(col_names == i_detect + '-' + + datatype)[0][0] + + # Save our data based on data type. + all_data[index_loc, :] = boxy_array[time_points, channel] + + elif filetype == 'parsed': + + # Which channel to look for in boxy_array. + channel = np.where(col_names == i_detect + '-' + datatype + + str(i_source))[0][0] + + # Save our data based on data type. + all_data[index_loc, :] = boxy_array[:, channel] + + # Change data to array. all_data = np.asarray(all_data) print('Blank Data shape: ', data.shape) diff --git a/mne/io/boxy/tests/test_boxy.py b/mne/io/boxy/tests/test_boxy.py index ed4a75014fb..47e273904aa 100644 --- a/mne/io/boxy/tests/test_boxy.py +++ b/mne/io/boxy/tests/test_boxy.py @@ -1,226 +1,41 @@ -# -*- coding: utf-8 -*- -# Authors: Robert Luke -# Eric Larson -# simplified BSD-3 license +# Authors: Kyle Mathewson, Jonathan Kuziek +# +# License: BSD (3-clause) -import os.path as op -import shutil +import os -import pytest -from numpy.testing import assert_allclose, assert_array_equal +import numpy as np +import scipy.io as spio +import mne from mne.datasets.testing import data_path, requires_testing_data -from mne.io import read_raw_nirx -from mne.io.tests.test_raw import _test_raw_reader -from mne.transforms import apply_trans, _get_trans -from mne.utils import run_tests_if_main -from mne.preprocessing.nirs import source_detector_distances,\ - short_channels - -fname_nirx_15_0 = op.join(data_path(download=False), - 'NIRx', 'nirx_15_0_recording') -fname_nirx_15_2 = op.join(data_path(download=False), - 'NIRx', 'nirx_15_2_recording') -fname_nirx_15_2_short = op.join(data_path(download=False), - 'NIRx', 'nirx_15_2_recording_w_short') @requires_testing_data -def test_nirx_15_2_short(): - """Test reading NIRX files.""" - raw = read_raw_nirx(fname_nirx_15_2_short, preload=True) - - # Test data import - assert raw._data.shape == (26, 145) - assert raw.info['sfreq'] == 12.5 - - # Test channel naming - assert raw.info['ch_names'][:4] == ["S1_D1 760", "S1_D1 850", - "S1_D9 760", "S1_D9 850"] - assert raw.info['ch_names'][24:26] == ["S5_D13 760", "S5_D13 850"] - - # Test frequency encoding - assert raw.info['chs'][0]['loc'][9] == 760 - assert raw.info['chs'][1]['loc'][9] == 850 - - # Test info import - assert raw.info['subject_info'] == dict(sex=1, first_name="MNE", - middle_name="Test", - last_name="Recording") - - # Test distance between optodes matches values from - # nirsite https://github.com/mne-tools/mne-testing-data/pull/51 - # step 4 figure 2 - allowed_distance_error = 0.0002 - distances = source_detector_distances(raw.info) - assert_allclose(distances[::2], [ - 0.0304, 0.0078, 0.0310, 0.0086, 0.0416, - 0.0072, 0.0389, 0.0075, 0.0558, 0.0562, - 0.0561, 0.0565, 0.0077], atol=allowed_distance_error) - - # Test which channels are short - # These are the ones marked as red at - # https://github.com/mne-tools/mne-testing-data/pull/51 step 4 figure 2 - is_short = short_channels(raw.info) - assert_array_equal(is_short[:9:2], [False, True, False, True, False]) - is_short = short_channels(raw.info, threshold=0.003) - assert_array_equal(is_short[:3:2], [False, False]) - is_short = short_channels(raw.info, threshold=50) - assert_array_equal(is_short[:3:2], [True, True]) - - # Test trigger events - assert_array_equal(raw.annotations.description, ['3.0', '2.0', '1.0']) - - # Test location of detectors - # The locations of detectors can be seen in the first - # figure on this page... - # https://github.com/mne-tools/mne-testing-data/pull/51 - # And have been manually copied below - # These values were reported in mm, but according to this page... - # https://mne.tools/stable/auto_tutorials/intro/plot_40_sensor_locations.html - # 3d locations should be specified in meters, so that's what's tested below - # Detector locations are stored in the third three loc values - allowed_dist_error = 0.0002 - locs = [ch['loc'][6:9] for ch in raw.info['chs']] - head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri') - mni_locs = apply_trans(head_mri_t, locs) - - assert raw.info['ch_names'][0][3:5] == 'D1' - assert_allclose( - mni_locs[0], [-0.0841, -0.0464, -0.0129], atol=allowed_dist_error) - - assert raw.info['ch_names'][4][3:5] == 'D3' - assert_allclose( - mni_locs[4], [0.0846, -0.0142, -0.0156], atol=allowed_dist_error) - - assert raw.info['ch_names'][8][3:5] == 'D2' - assert_allclose( - mni_locs[8], [0.0207, -0.1062, 0.0484], atol=allowed_dist_error) - - assert raw.info['ch_names'][12][3:5] == 'D4' - assert_allclose( - mni_locs[12], [-0.0196, 0.0821, 0.0275], atol=allowed_dist_error) - - assert raw.info['ch_names'][16][3:5] == 'D5' - assert_allclose( - mni_locs[16], [-0.0360, 0.0276, 0.0778], atol=allowed_dist_error) - - assert raw.info['ch_names'][19][3:5] == 'D6' - assert_allclose( - mni_locs[19], [0.0352, 0.0283, 0.0780], atol=allowed_dist_error) - - assert raw.info['ch_names'][21][3:5] == 'D7' - assert_allclose( - mni_locs[21], [0.0388, -0.0477, 0.0932], atol=allowed_dist_error) - - -@requires_testing_data -def test_encoding(tmpdir): - """Test NIRx encoding.""" - fname = str(tmpdir.join('latin')) - shutil.copytree(fname_nirx_15_2, fname) - hdr_fname = op.join(fname, 'NIRS-2019-10-02_003.hdr') - hdr = list() - with open(hdr_fname, 'rb') as fid: - hdr.extend(line for line in fid) - hdr[2] = b'Date="jeu. 13 f\xe9vr. 2020"\r\n' - with open(hdr_fname, 'wb') as fid: - for line in hdr: - fid.write(line) - # smoke test - read_raw_nirx(fname) - - -@requires_testing_data -def test_nirx_15_2(): - """Test reading NIRX files.""" - raw = read_raw_nirx(fname_nirx_15_2, preload=True) - - # Test data import - assert raw._data.shape == (64, 67) - assert raw.info['sfreq'] == 3.90625 - - # Test channel naming - assert raw.info['ch_names'][:4] == ["S1_D1 760", "S1_D1 850", - "S1_D10 760", "S1_D10 850"] - - # Test info import - assert raw.info['subject_info'] == dict(sex=1, first_name="TestRecording") - - # Test trigger events - assert_array_equal(raw.annotations.description, ['4.0', '6.0', '2.0']) - - # Test location of detectors - allowed_dist_error = 0.0002 - locs = [ch['loc'][6:9] for ch in raw.info['chs']] - head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri') - mni_locs = apply_trans(head_mri_t, locs) - - assert raw.info['ch_names'][0][3:5] == 'D1' - assert_allclose( - mni_locs[0], [-0.0292, 0.0852, -0.0142], atol=allowed_dist_error) - - assert raw.info['ch_names'][15][3:5] == 'D4' - assert_allclose( - mni_locs[15], [-0.0739, -0.0756, -0.0075], atol=allowed_dist_error) - - -@requires_testing_data -def test_nirx_15_0(): - """Test reading NIRX files.""" - raw = read_raw_nirx(fname_nirx_15_0, preload=True) - - # Test data import - assert raw._data.shape == (20, 92) - assert raw.info['sfreq'] == 6.25 - - # Test channel naming - assert raw.info['ch_names'][:12] == ["S1_D1 760", "S1_D1 850", - "S2_D2 760", "S2_D2 850", - "S3_D3 760", "S3_D3 850", - "S4_D4 760", "S4_D4 850", - "S5_D5 760", "S5_D5 850", - "S6_D6 760", "S6_D6 850"] - - # Test info import - assert raw.info['subject_info'] == {'first_name': 'NIRX', - 'last_name': 'Test', 'sex': '0'} - - # Test trigger events - assert_array_equal(raw.annotations.description, ['1.0', '2.0', '2.0']) - - # Test location of detectors - allowed_dist_error = 0.0002 - locs = [ch['loc'][6:9] for ch in raw.info['chs']] - head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri') - mni_locs = apply_trans(head_mri_t, locs) - - assert raw.info['ch_names'][0][3:5] == 'D1' - assert_allclose( - mni_locs[0], [0.0287, -0.1143, -0.0332], atol=allowed_dist_error) - - assert raw.info['ch_names'][15][3:5] == 'D8' - assert_allclose( - mni_locs[15], [-0.0693, -0.0480, 0.0657], atol=allowed_dist_error) - - # Test distance between optodes matches values from - allowed_distance_error = 0.0002 - distances = source_detector_distances(raw.info) - assert_allclose(distances[::2], [ - 0.0301, 0.0315, 0.0343, 0.0368, 0.0408, - 0.0399, 0.0393, 0.0367, 0.0336, 0.0447], atol=allowed_distance_error) - - -@requires_testing_data -@pytest.mark.parametrize('fname, boundary_decimal', ( - [fname_nirx_15_2_short, 1], - [fname_nirx_15_2, 0], - [fname_nirx_15_0, 0] -)) -def test_nirx_standard(fname, boundary_decimal): - """Test standard operations.""" - _test_raw_reader(read_raw_nirx, fname=fname, - boundary_decimal=boundary_decimal) # low fs - - -run_tests_if_main() +def test_boxy_load(): + """Test reading BOXY files.""" + # Determine to which decimal place we will compare. + thresh = 1e-10 + + # Load AC, DC, and Phase data. + boxy_raw_dir = os.path.join(data_path(download=False), + 'BOXY', 'boxy_short_recording') + + mne_dc = mne.io.read_raw_boxy(boxy_raw_dir, 'DC', verbose=True).load_data() + mne_ac = mne.io.read_raw_boxy(boxy_raw_dir, 'AC', verbose=True).load_data() + mne_ph = mne.io.read_raw_boxy(boxy_raw_dir, 'Ph', verbose=True).load_data() + + # Load p_pod data. + p_pod_dir = os.path.join(data_path(download=False), + 'BOXY', 'boxy_short_recording', + 'boxy_p_pod_files', '1anc071a_001.mat') + ppod_data = spio.loadmat(p_pod_dir) + + ppod_ac = np.transpose(ppod_data['ac']) + ppod_dc = np.transpose(ppod_data['dc']) + ppod_ph = np.transpose(ppod_data['ph']) + + # Compare MNE loaded data to p_pod loaded data. + assert (abs(ppod_ac - mne_ac._data) <= thresh).all() + assert (abs(ppod_dc - mne_dc._data) <= thresh).all() + assert (abs(ppod_ph - mne_ph._data) <= thresh).all() diff --git a/tutorials/io/plot_40_reading_boxy_data.py b/tutorials/io/plot_40_reading_boxy_data.py new file mode 100644 index 00000000000..1120bbc87c0 --- /dev/null +++ b/tutorials/io/plot_40_reading_boxy_data.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +r""" +.. _tut-importing-boxy-data: + +========================================================= +Importing data from BOXY software and ISS Imagent devices +========================================================= + +MNE includes various functions and utilities for reading optical imaging +data and optode locations. + +.. contents:: Page contents + :local: + :depth: 2 + + +.. _import-boxy: + +BOXY (directory) +================================ + +BOXY recordings can be read in using :func:`mne.io.read_raw_boxy`. +The BOXY software and Imagent devices store data in a single .txt file +containing DC, AC, and Phase information for each source and detector +combination. Recording settings, such as the number of sources/detectors, and +the sampling rate of the recording, are also saved at the beginning of this +file. MNE will extract the raw DC, AC, and Phase data, along with the recording +settings. + +""" # noqa:E501 diff --git a/tutorials/preprocessing/plot_80_boxy_processing.py b/tutorials/preprocessing/plot_80_boxy_processing.py deleted file mode 100644 index 9220957a129..00000000000 --- a/tutorials/preprocessing/plot_80_boxy_processing.py +++ /dev/null @@ -1,422 +0,0 @@ -""" -.. _tut-boxy-processing: - -Preprocessing optical imaging data from the Imagent hardware/BOXY software -========================================================================== - -This tutorial covers how to convert optical imaging data from raw measurements -to relative oxyhaemoglobin (HbO) and deoxyhaemoglobin (HbR) concentration. -Phase data from the recording is also processed and plotted in several ways -in the latter half. - -.. contents:: Page contents - :local: - :depth: 2 - -Here we will work with the :ref:`BOXY example data `. -""" -# sphinx_gallery_thumbnail_number = 1 - -import os -import matplotlib.pyplot as plt -import re as re -import numpy as np - -import mne - -# Get our data -boxy_data_folder = mne.datasets.boxy_example.data_path() -boxy_raw_dir = os.path.join(boxy_data_folder, 'Participant-1') - -# Load AC and Phase data -raw_intensity_ac = mne.io.read_raw_boxy(boxy_raw_dir, 'AC', - verbose=True).load_data() - -# Plot the raw data -scalings = dict(fnirs_cw_amplitude=2e2, fnirs_fd_phase=4e3, fnirs_od=2, - hbo=2e-3, hbr=2e-3) - -raw_intensity_ac.plot(n_channels=10, duration=20, scalings=scalings, - show_scrollbars=True) - -############################################################################### -# View location of sensors over brain surface -# ------------------------------------------- -# -# Here we validate that the location of sources-detector pairs and channels -# are in the expected locations. Sources are bright red dots, detectors are -# dark red dots, with source-detector pairs connected by white lines. - -subjects_dir = os.path.dirname(mne.datasets.fetch_fsaverage()) - -fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white') -fig = mne.viz.plot_alignment(raw_intensity_ac.info, - show_axes=True, - subject='fsaverage', - trans='fsaverage', - surfaces=['head-dense', 'brain'], - fnirs=['sources', 'detectors', 'pairs'], - mri_fiducials=True, - dig=True, - subjects_dir=subjects_dir, - fig=fig) -mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=55, distance=0.6) - -############################################################################### -# Selecting channels appropriate for detecting neural responses -# ------------------------------------------------------------- -# -# First we remove channels that are too close together (short channels) to -# detect a neural response (less than 1 cm distance between optodes). -# These short channels can be seen in the figure above. -# To achieve this we pick all the channels not considered to be short. - -picks = mne.pick_types(raw_intensity_ac.info, meg=False, fnirs=True, stim=True) - -dists = mne.preprocessing.nirs.source_detector_distances( - raw_intensity_ac.info, picks=picks) - -# Grab our marker channels so they don't get thrown out later -markers = [i_index for i_index, i_label - in enumerate(raw_intensity_ac.info['ch_names']) - if re.search(r'Markers ', i_label)] - -raw_intensity_ac.pick(picks[dists > 0.01].tolist() + markers) - -############################################################################### -# Converting from raw intensity to optical density -# ------------------------------------------------ -# -# The raw intensity values are then converted to optical density. -# We will only do this for either DC or AC data since they are measures of -# light intensity. - -raw_od = mne.preprocessing.nirs.optical_density(raw_intensity_ac) - -raw_od.plot(n_channels=len(raw_od.ch_names), - duration=500, show_scrollbars=False, scalings=scalings) - -############################################################################### -# Converting from optical density to haemoglobin -# ---------------------------------------------- -# -# Next we convert the optical density data to haemoglobin concentration using -# the modified Beer-Lambert law. - -raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od) - -raw_haemo.plot(n_channels=len(raw_haemo.ch_names), duration=500, - show_scrollbars=False, scalings=scalings) - -############################################################################### -# Removing heart rate from signal -# ------------------------------- -# -# The haemodynamic response has frequency content predominantly below 0.5 Hz. -# An increase in activity around 1 Hz can be seen in the data that is due to -# the person's heart beat and is unwanted. So we use a low pass filter to -# remove this. A high pass filter is also included to remove slow drifts -# in the data. - -fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 6)) - -fig = raw_haemo.plot_psd(average=True, ax=axes) -fig.suptitle('Before filtering', weight='bold', size='x-large') -fig.subplots_adjust(top=0.88) - -raw_haemo = raw_haemo.filter(0.05, 0.7) - -fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 6)) -fig = raw_haemo.plot_psd(average=True, ax=axes) -fig.suptitle('After filtering', weight='bold', size='x-large') -fig.subplots_adjust(top=0.88) - -############################################################################### -# Extract epochs -# -------------- -# -# Now that the signal has been converted to relative haemoglobin -# concentration, and the unwanted heart rate component has been removed, -# we can extract epochs related to each of the experimental conditions. -# -# First we extract the events of interest and visualise them to -# ensure they are correct. -# -# Since our events and timings for this data set are the same -# across montages, we will just use the 'Markers b' channel to find events - -all_events = mne.find_events(raw_intensity_ac, stim_channel=['Markers b']) - - -all_event_dict = {'Event_1': 1, - 'Event_2': 2, - 'Block 1 End': 1000, - 'Block 2 End': 2000} - -############################################################################### -# Next we define the range of our epochs, the rejection criteria, -# baseline correction, and extract the epochs. We visualise the log of which -# epochs were dropped. - -reject_criteria = None -tmin_ph, tmax_ph = -0.2, 2 -tmin_ac, tmax_ac = -2, 10 - -all_haemo_epochs = mne.Epochs(raw_haemo, all_events, - event_id=all_event_dict, tmin=tmin_ac, - tmax=tmax_ac, reject=reject_criteria, - reject_by_annotation=False, proj=True, - baseline=(None, 0), preload=True, detrend=None, - verbose=True, event_repeated='drop') - -# Plot epochs -fig = all_haemo_epochs.plot(scalings=scalings) - -############################################################################### -# View consistency of responses across trials -# ------------------------------------------- -# -# Now we can view the haemodynamic response for our different events. - -vmin_ac = -60 -vmax_ac = 60 - -all_haemo_epochs['Event_1'].plot_image(combine='mean', vmin=vmin_ac, - vmax=vmax_ac, ts_args=dict( - ylim=dict(hbo=[vmin_ac, vmax_ac], - hbr=[vmin_ac, vmax_ac])), - title='Haemo Event 1') - -all_haemo_epochs['Event_2'].plot_image(combine='mean', vmin=vmin_ac, - vmax=vmax_ac, ts_args=dict( - ylim=dict(hbo=[vmin_ac, vmax_ac], - hbr=[vmin_ac, vmax_ac])), - title='Haemo Event 2') - -############################################################################### -# Compare Events 1 and 2 -# ---------------------- - -# Evoked Activity -evoked_event_1_ac = all_haemo_epochs['Event_1'].average() -evoked_event_2_ac = all_haemo_epochs['Event_2'].average() - -fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(15, 6)) - -axes[0].plot(evoked_event_1_ac.times, - np.sqrt((evoked_event_1_ac.copy().pick('hbo') - ._data ** 2).mean(axis=0)) * 1e6, 'r', - evoked_event_1_ac.times, - np.sqrt((evoked_event_1_ac.copy().pick('hbr') - ._data ** 2).mean(axis=0)) * 1e6, 'b', - evoked_event_1_ac.times, - ((np.sqrt((evoked_event_1_ac.copy().pick('hbo') - ._data ** 2).mean(axis=0)) * 1e6) - - (np.sqrt((evoked_event_1_ac.copy().pick('hbr') - ._data ** 2).mean(axis=0)) * 1e6)), 'g') -axes[0].set_ylim([-40, 100]) -axes[0].set_xlabel('Time (s)') -axes[0].set_ylabel('\u03BCM') -axes[0].set_title('Event 1') -axes[0].legend(['HBO', 'HBR', 'Diff']) - -axes[1].plot(evoked_event_2_ac.times, - np.sqrt((evoked_event_2_ac.copy().pick('hbo') - ._data ** 2).mean(axis=0)) * 1e6, 'r', - evoked_event_2_ac.times, - np.sqrt((evoked_event_2_ac.copy().pick('hbr') - ._data ** 2).mean(axis=0)) * 1e6, 'b', - evoked_event_2_ac.times, - ((np.sqrt((evoked_event_2_ac.copy().pick('hbo') - ._data ** 2).mean(axis=0)) * 1e6) - - (np.sqrt((evoked_event_2_ac.copy().pick('hbr') - ._data ** 2).mean(axis=0)) * 1e6)), 'g') -axes[1].set_ylim([-40, 100]) -axes[1].set_xlabel('Time (s)') -axes[1].set_ylabel('\u03BCM') -axes[1].set_title('Event 2') -axes[1].legend(['HBO', 'HBR', 'Diff']) - -axes[2].plot(evoked_event_1_ac.times, - ((np.sqrt((evoked_event_1_ac.copy().pick('hbo') - ._data ** 2).mean(axis=0)) * 1e6) - - (np.sqrt((evoked_event_1_ac.copy().pick('hbr') - ._data ** 2).mean(axis=0)) * 1e6)), 'm', - evoked_event_1_ac.times, - ((np.sqrt((evoked_event_2_ac.copy().pick('hbo') - ._data ** 2).mean(axis=0)) * 1e6) - - (np.sqrt((evoked_event_2_ac.copy().pick('hbr') - ._data ** 2).mean(axis=0)) * 1e6)), 'c', - evoked_event_1_ac.times, - ((np.sqrt((evoked_event_1_ac.copy().pick('hbo') - ._data ** 2).mean(axis=0)) * 1e6) - - (np.sqrt((evoked_event_1_ac.copy().pick('hbr') - ._data ** 2).mean(axis=0)) * 1e6)) - - ((np.sqrt((evoked_event_2_ac.copy().pick('hbo') - ._data ** 2).mean(axis=0)) * 1e6) - - (np.sqrt((evoked_event_2_ac.copy().pick('hbr') - ._data ** 2).mean(axis=0)) * 1e6)), 'k') -axes[2].set_ylim([-40, 100]) -axes[2].set_xlabel('Time (s)') -axes[2].set_ylabel('\u03BCM') -axes[2].set_title('HBO - HBR') -axes[2].legend(['Event 1', 'Event 2', 'Diff']) - -# Topographies -fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(9, 5), - gridspec_kw=dict(width_ratios=[1, 1, 0.1])) - -topomap_args = dict(extrapolate='local', size=1, res=256, sensors='k.') -times = 1.0 - -all_haemo_epochs['Event_1'].average(picks='hbo').plot_topomap(times=times, - axes=axes[0, 0], - colorbar=False, - **topomap_args) - -all_haemo_epochs['Event_2'].average(picks='hbo').plot_topomap(times=times, - axes=axes[0, 1:], - colorbar=True, - **topomap_args) - -all_haemo_epochs['Event_1'].average(picks='hbr').plot_topomap(times=times, - axes=axes[1, 0], - colorbar=False, - **topomap_args) - -all_haemo_epochs['Event_2'].average(picks='hbr').plot_topomap(times=times, - axes=axes[1, 1:], - colorbar=True, - **topomap_args) - -for column, condition in enumerate(['Event 1', 'Event 2']): - for row, chroma in enumerate(['HBO', 'HBR']): - axes[row, column].set_title('{}: {}'.format(chroma, condition)) -fig.tight_layout() - -############################################################################### -# Extracting and Plotting Phase Data -# ---------------------------------- -# Now we can extract phase data from the boxy file and generate similar -# plots as done above with the AC data. - -# Get our data -boxy_data_folder = mne.datasets.boxy_example.data_path() -boxy_raw_dir = os.path.join(boxy_data_folder, 'Participant-1') - -# Load Phase data -raw_intensity_ph = mne.io.read_raw_boxy(boxy_raw_dir, 'Ph', - verbose=True).load_data() - -# Plot the raw data -scalings = dict(fnirs_raw=2e2, fnirs_ph=4e3, fnirs_od=2, - hbo=2e-3, hbr=2e-3) - -raw_intensity_ph.plot(n_channels=10, duration=20, scalings=scalings, - show_scrollbars=True) - -############################################################################### -# Selecting channels appropriate for detecting neural responses -# ------------------------------------------------------------- -# -# First we remove channels that are too close together (short channels) to -# detect a neural response (less than 1 cm distance between optodes). -# These short channels can be seen in the figure above. -# To achieve this we pick all the channels not considered to be short. - -picks = mne.pick_types(raw_intensity_ph.info, meg=False, fnirs=True, stim=True) - -dists = mne.preprocessing.nirs.source_detector_distances( - raw_intensity_ph.info, picks=picks) - -# Grab our marker channels so they don't get thrown out later -markers = [i_index for i_index, i_label - in enumerate(raw_intensity_ph.info['ch_names']) - if re.search(r'Markers ', i_label)] - -raw_intensity_ph.pick(picks[dists > 0.01].tolist() + markers) - -############################################################################### -# Extract epochs -# -------------- - -all_events = mne.find_events(raw_intensity_ph, stim_channel=['Markers b']) - - -all_event_dict = {'Event_1': 1, - 'Event_2': 2, - 'Block 1 End': 1000, - 'Block 2 End': 2000} - -############################################################################### -# Next we define the range of our epochs, the rejection criteria, -# baseline correction, and extract the epochs. We visualise the log of which -# epochs were dropped. - -reject_criteria = None -tmin_ph, tmax_ph = -0.2, 2 - -all_phase_epochs = mne.Epochs(raw_intensity_ph, all_events, - event_id=all_event_dict, tmin=tmin_ph, - tmax=tmax_ph, reject=None, - reject_by_annotation=False, proj=False, - baseline=(-0.2, 0), preload=True, - detrend=None, verbose=True, - event_repeated='drop') - -# Plot epochs -fig = all_phase_epochs.plot(scalings=scalings) - -############################################################################### -# View consistency of responses across trials -# ------------------------------------------- - -vmin_ph = -180 -vmax_ph = 180 - -all_phase_epochs['Event_1'].plot_image(combine='mean', vmin=vmin_ph, - vmax=vmax_ph, title='Phase Event 1') - -all_phase_epochs['Event_2'].plot_image(combine='mean', vmin=vmin_ph, - vmax=vmax_ph, title='Phase Event 2') - -############################################################################### -# Compare Events 1 and 2 -# ---------------------- - -# Evoked Activity -evoked_event_1_ph = all_phase_epochs['Event_1'].average() -evoked_event_2_ph = all_phase_epochs['Event_2'].average() -evoked_diff_ph = mne.combine_evoked([evoked_event_1_ph, -evoked_event_2_ph], - weights='equal') - -evoked_dict_ph = {'Event_1': evoked_event_1_ph, 'Event_2': evoked_event_2_ph, - 'Difference': evoked_diff_ph} - -color_dict = {'Event_1': 'r', 'Event_2': 'b', 'Difference': 'g'} - -mne.viz.plot_compare_evokeds(evoked_dict_ph, combine='mean', ci=0.95, - colors=color_dict, title='Phase') - -# Topographies -topomap_args = dict(extrapolate='local', size=1, res=256, sensors='k.') -times = 1.0 - -fig, axes = plt.subplots(nrows=1, ncols=4, figsize=(9, 5), - gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1])) -vmin, vmax, ts = -0.192, 0.992, 0.1 -vmin = -180 -vmax = 180 - -evoked_event_1_ph.plot_topomap(times=ts, axes=axes[0], vmin=vmin, vmax=vmax, - colorbar=False, **topomap_args) - -evoked_event_2_ph.plot_topomap(times=ts, axes=axes[1], vmin=vmin, vmax=vmax, - colorbar=False, **topomap_args) - -evoked_diff_ph.plot_topomap(times=ts, axes=axes[2:], vmin=vmin, vmax=vmax, - colorbar=True, **topomap_args) - -for column, condition in enumerate(['Event 1', 'Event 2', 'Difference']): - axes[column].set_title('Phase: {}'.format(condition)) -fig.tight_layout() From 0132e09578f35a71cf42ef0f0544011bdf385329 Mon Sep 17 00:00:00 2001 From: kuziekj Date: Thu, 6 Aug 2020 11:10:56 -0600 Subject: [PATCH 139/167] Events from raw data file are added as annotations (#27) * events from raw data file are added as annotations * ignores conssecutive trigger repeats in digaux, added tests for filetypes and digaux * fixed some typos * fixed more spelling and style errors --- mne/io/boxy/boxy.py | 60 +++++++++++-- mne/io/boxy/tests/test_boxy.py | 156 +++++++++++++++++++++++++++++++++ 2 files changed, 211 insertions(+), 5 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 719cb5106ac..8ec48076394 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -10,6 +10,7 @@ from ..base import BaseRaw from ..meas_info import create_info from ...utils import logger, verbose, fill_doc +from ...annotations import Annotations @fill_doc @@ -78,6 +79,11 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): # Read header file and grab some info. filetype = 'parsed' + start_line = 0 + end_line = 0 + mrk_col = 0 + mrk_data = list() + col_names = list() with open(files[key][0], 'r') as data: for line_num, i_line in enumerate(data, 1): if '#DATA ENDS' in i_line: @@ -95,8 +101,32 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): elif '#DATA BEGINS' in i_line: # Data should start a couple lines later. start_line = line_num + 2 - elif 'exmux' in i_line: - filetype = 'non-parsed' + if start_line > 0 & end_line == 0: + if line_num == start_line - 1: + # Grab names for each column of data. + col_names = np.asarray(re.findall( + r'\w+\-\w+|\w+\-\d+|\w+', i_line.rsplit(' ')[0])) + if 'exmux' in col_names: + # Change filetype based on data organisation. + filetype = 'non-parsed' + if 'digaux' in col_names: + mrk_col = np.where(col_names == 'digaux')[0][0] + # Need to treat parsed and non-parsed files differently. + elif (mrk_col > 0 and line_num > start_line and + filetype == 'non-parsed'): + # Non-parsed files have different lines lengths. + crnt_line = i_line.rsplit(' ')[0] + temp_data = re.findall(r'[-+]?\d*\.?\d+', crnt_line) + if len(temp_data) == len(col_names): + mrk_data.append(float( + re.findall(r'[-+]?\d*\.?\d+', crnt_line) + [mrk_col])) + elif (mrk_col > 0 and line_num > start_line + and filetype == 'parsed'): + # Parsed files have the same line lengths for data. + crnt_line = i_line.rsplit(' ')[0] + mrk_data.append(float( + re.findall(r'[-+]?\d*\.?\d+', crnt_line)[mrk_col])) # Label each channel in our data. # Data is organised by channels x timepoint, where the first @@ -126,6 +156,7 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): 'filetype': filetype, 'files': files[key][0], 'datatype': datatype, + 'srate': srate, } # Make sure data lengths are the same. @@ -153,6 +184,28 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): last_samps=[last_samps - 1], raw_extras=[raw_extras], verbose=verbose) + # Now let's grab our markers, if they are present. + if len(mrk_data) != 0: + mrk_data = np.asarray(mrk_data) + # We only want the first instance of each trigger. + prev_mrk = 0 + mrk_idx = list() + duration = list() + tmp_dur = 0 + for i_num, i_mrk in enumerate(mrk_data): + if i_mrk != 0 and i_mrk != prev_mrk: + mrk_idx.append(i_num) + if i_mrk != 0 and i_mrk == prev_mrk: + tmp_dur += 1 + if i_mrk == 0 and i_mrk != prev_mrk: + duration.append((tmp_dur + 1) * (1.0 / srate)) + tmp_dur = 0 + prev_mrk = i_mrk + onset = [i_mrk * (1.0 / srate) for i_mrk in mrk_idx] + description = [float(i_mrk)for i_mrk in mrk_data[mrk_idx]] + annot = Annotations(onset, duration, description) + self.set_annotations(annot) + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a segment of data from a file. @@ -264,9 +317,6 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): # Save our data based on data type. all_data[index_loc, :] = boxy_array[:, channel] - # Change data to array. - all_data = np.asarray(all_data) - print('Blank Data shape: ', data.shape) print('Input Data shape: ', all_data.shape) diff --git a/mne/io/boxy/tests/test_boxy.py b/mne/io/boxy/tests/test_boxy.py index 47e273904aa..6d43b2f5423 100644 --- a/mne/io/boxy/tests/test_boxy.py +++ b/mne/io/boxy/tests/test_boxy.py @@ -5,6 +5,7 @@ import os import numpy as np +from numpy.testing import assert_allclose, assert_array_equal import scipy.io as spio import mne @@ -39,3 +40,158 @@ def test_boxy_load(): assert (abs(ppod_ac - mne_ac._data) <= thresh).all() assert (abs(ppod_dc - mne_dc._data) <= thresh).all() assert (abs(ppod_ph - mne_ph._data) <= thresh).all() + + +@requires_testing_data +def test_boxy_filetypes(): + """Test reading parsed and unparsed BOXY data files.""" + # BOXY data files can be saved in two types (parsed and unparsed) which + # mostly determines how the data is organised. + # For parsed files, each row is a single timepoint and all + # source/detector combinations are represented as columns. + # For unparsed files, each row is a source and each group of n rows + # represents a timepoint. For example, if there are ten sources in the raw + # data then the first ten rows represent the ten sources at timepoint 1 + # while the next set of ten rows are the ten sources at timepoint 2. + # Detectors are represented as columns. + + # Since p_pod is designed to only load unparsed files, we will first + # compare MNE and p_pod loaded data from an unparsed data file. If those + # files are comparable, then we will compare the MNE loaded data between + # parsed and unparsed files. + + # Determine to which decimal place we will compare. + thresh = 1e-10 + + # Load AC, DC, and Phase data. + boxy_raw_dir = os.path.join(data_path(download=False), + 'BOXY', 'boxy_digaux_recording', 'unparsed') + + unp_dc = mne.io.read_raw_boxy(boxy_raw_dir, 'DC', verbose=True).load_data() + unp_ac = mne.io.read_raw_boxy(boxy_raw_dir, 'AC', verbose=True).load_data() + unp_ph = mne.io.read_raw_boxy(boxy_raw_dir, 'Ph', verbose=True).load_data() + + # Load p_pod data. + p_pod_dir = os.path.join(data_path(download=False), + 'BOXY', 'boxy_digaux_recording', 'p_pod', + 'p_pod_digaux_unparsed.mat') + ppod_data = spio.loadmat(p_pod_dir) + + ppod_ac = np.transpose(ppod_data['ac']) + ppod_dc = np.transpose(ppod_data['dc']) + ppod_ph = np.transpose(ppod_data['ph']) + + # Compare MNE loaded data to p_pod loaded data. + assert (abs(ppod_ac - unp_ac._data) <= thresh).all() + assert (abs(ppod_dc - unp_dc._data) <= thresh).all() + assert (abs(ppod_ph - unp_ph._data) <= thresh).all() + + # Now let's load our parsed data. + boxy_raw_dir = os.path.join(data_path(download=False), + 'BOXY', 'boxy_digaux_recording', 'parsed') + + par_dc = mne.io.read_raw_boxy(boxy_raw_dir, 'DC', verbose=True).load_data() + par_ac = mne.io.read_raw_boxy(boxy_raw_dir, 'AC', verbose=True).load_data() + par_ph = mne.io.read_raw_boxy(boxy_raw_dir, 'Ph', verbose=True).load_data() + + # Compare parsed and unparsed data. + assert (abs(unp_dc._data - par_dc._data) == 0).all() + assert (abs(unp_ac._data - par_ac._data) == 0).all() + assert (abs(unp_ph._data - par_ph._data) == 0).all() + + +@requires_testing_data +def test_boxy_digaux(): + """Test reading BOXY files and generating annotations from digaux.""" + # We'll test both parsed and unparsed boxy data files. + # Set our comparison threshold and sampling rate. + thresh = 1e-6 + srate = 79.4722 + + # Load AC, DC, and Phase data from a parsed file first. + boxy_raw_dir = os.path.join(data_path(download=False), + 'BOXY', 'boxy_digaux_recording', 'parsed') + + # The type of data shouldn't matter, but we'll test all three. + par_dc = mne.io.read_raw_boxy(boxy_raw_dir, 'DC', verbose=True).load_data() + par_ac = mne.io.read_raw_boxy(boxy_raw_dir, 'AC', verbose=True).load_data() + par_ph = mne.io.read_raw_boxy(boxy_raw_dir, 'Ph', verbose=True).load_data() + + # Check that our event order matches what we expect. + event_list = ['1.0', '2.0', '3.0', '4.0', '5.0'] + assert_array_equal(par_dc.annotations.description, event_list) + assert_array_equal(par_ac.annotations.description, event_list) + assert_array_equal(par_ph.annotations.description, event_list) + + # Check that our event timings are what we expect. + event_onset = [i_time * (1.0 / srate) for i_time in + [105, 185, 265, 344, 424]] + assert_allclose(par_dc.annotations.onset, event_onset, atol=thresh) + assert_allclose(par_ac.annotations.onset, event_onset, atol=thresh) + assert_allclose(par_ph.annotations.onset, event_onset, atol=thresh) + + # Now we'll load data from an unparsed file. + boxy_raw_dir = os.path.join(data_path(download=False), + 'BOXY', 'boxy_digaux_recording', 'unparsed') + + # The type of data shouldn't matter, but we'll test all three. + unp_dc = mne.io.read_raw_boxy(boxy_raw_dir, 'DC', verbose=True).load_data() + unp_ac = mne.io.read_raw_boxy(boxy_raw_dir, 'AC', verbose=True).load_data() + unp_ph = mne.io.read_raw_boxy(boxy_raw_dir, 'Ph', verbose=True).load_data() + + # Check that our event order matches what we expect. + event_list = ['1.0', '2.0', '3.0', '4.0', '5.0'] + assert_array_equal(unp_dc.annotations.description, event_list) + assert_array_equal(unp_ac.annotations.description, event_list) + assert_array_equal(unp_ph.annotations.description, event_list) + + # Check that our event timings are what we expect. + event_onset = [i_time * (1.0 / srate) for i_time in + [105, 185, 265, 344, 424]] + assert_allclose(unp_dc.annotations.onset, event_onset, atol=thresh) + assert_allclose(unp_ac.annotations.onset, event_onset, atol=thresh) + assert_allclose(unp_ph.annotations.onset, event_onset, atol=thresh) + + # Now let's compare parsed and unparsed events to p_pod loaded digaux. + # Load our p_pod data. + p_pod_dir = os.path.join(data_path(download=False), + 'BOXY', 'boxy_digaux_recording', + 'p_pod', 'p_pod_digaux_unparsed.mat') + + ppod_data = spio.loadmat(p_pod_dir) + ppod_digaux = np.transpose(ppod_data['digaux'])[0] + + # Now let's get our triggers from the p_pod digaux. + # We only want the first instance of each trigger. + prev_mrk = 0 + mrk_idx = list() + duration = list() + tmp_dur = 0 + for i_num, i_mrk in enumerate(ppod_digaux): + if i_mrk != 0 and i_mrk != prev_mrk: + mrk_idx.append(i_num) + if i_mrk != 0 and i_mrk == prev_mrk: + tmp_dur += 1 + if i_mrk == 0 and i_mrk != prev_mrk: + duration.append((tmp_dur + 1) * (1.0 / srate)) + tmp_dur = 0 + prev_mrk = i_mrk + onset = np.asarray([i_mrk * (1.0 / srate) for i_mrk in mrk_idx]) + description = np.asarray([str(float(i_mrk))for i_mrk in + ppod_digaux[mrk_idx]]) + + # Check that our event orders match. + assert_array_equal(par_dc.annotations.description, description) + assert_array_equal(par_ac.annotations.description, description) + assert_array_equal(par_ph.annotations.description, description) + assert_array_equal(unp_dc.annotations.description, description) + assert_array_equal(unp_ac.annotations.description, description) + assert_array_equal(unp_ph.annotations.description, description) + + # Check that our event timings match. + assert_allclose(par_dc.annotations.onset, onset, atol=thresh) + assert_allclose(par_ac.annotations.onset, onset, atol=thresh) + assert_allclose(par_ph.annotations.onset, onset, atol=thresh) + assert_allclose(unp_dc.annotations.onset, onset, atol=thresh) + assert_allclose(unp_ac.annotations.onset, onset, atol=thresh) + assert_allclose(unp_ph.annotations.onset, onset, atol=thresh) From 6672d7b756c69f1d3793cc6fe5a386cdc5f34522 Mon Sep 17 00:00:00 2001 From: kuziekj Date: Wed, 12 Aug 2020 10:07:07 -0600 Subject: [PATCH 140/167] changed filenames to match testing data set (#29) --- mne/io/boxy/boxy.py | 26 +++-------- mne/io/boxy/tests/test_boxy.py | 82 +++++++++++++++++++--------------- 2 files changed, 51 insertions(+), 57 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 8ec48076394..901ef01982d 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -2,7 +2,6 @@ # # License: BSD (3-clause) -import glob as glob import re as re import numpy as np @@ -20,7 +19,7 @@ def read_raw_boxy(fname, datatype='AC', preload=False, verbose=None): Parameters ---------- fname : str - Path to the BOXY data folder. + Path to the BOXY data file. datatype : str Type of data to return (AC, DC, or Ph). %(preload)s @@ -45,7 +44,7 @@ class RawBOXY(BaseRaw): Parameters ---------- fname : str - Path to the BOXY data folder. + Path to the BOXY data file. datatype : str Type of data to return (AC, DC, or Ph). %(preload)s @@ -60,19 +59,6 @@ class RawBOXY(BaseRaw): def __init__(self, fname, datatype='AC', preload=False, verbose=None): logger.info('Loading %s' % fname) - # Check if required files exist and store names for later use. - files = dict() - key = '*.txt' - print(fname) - files[key] = [glob.glob('%s/*%s' % (fname, key))] - - # Make sure filenames are in order. - files[key][0].sort() - if len(files[key]) != 1: - raise RuntimeError('Expect one %s file, got %d' % - (key, len(files[key]),)) - files[key] = files[key][0] - # Determine which data type to return. if datatype not in ['AC', 'DC', 'Ph']: raise RuntimeError('Expect AC, DC, or Ph, got %s' % datatype) @@ -84,7 +70,7 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): mrk_col = 0 mrk_data = list() col_names = list() - with open(files[key][0], 'r') as data: + with open(fname, 'r') as data: for line_num, i_line in enumerate(data, 1): if '#DATA ENDS' in i_line: # Data ends just before this. @@ -154,7 +140,7 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): 'start_line': start_line, 'end_line': end_line, 'filetype': filetype, - 'files': files[key][0], + 'file': fname, 'datatype': datatype, 'srate': srate, } @@ -219,7 +205,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): end_line = self._raw_extras[fi]['end_line'] filetype = self._raw_extras[fi]['filetype'] datatype = self._raw_extras[fi]['datatype'] - boxy_files = self._raw_extras[fi]['files'] + boxy_file = self._raw_extras[fi]['file'] # Possible detector names. detectors = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', @@ -230,7 +216,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): boxy_data = list() # Loop through our data. - with open(boxy_files, 'r') as data_file: + with open(boxy_file, 'r') as data_file: for line_num, i_line in enumerate(data_file, 1): if line_num == (start_line - 1): diff --git a/mne/io/boxy/tests/test_boxy.py b/mne/io/boxy/tests/test_boxy.py index 6d43b2f5423..a7d9203fb4c 100644 --- a/mne/io/boxy/tests/test_boxy.py +++ b/mne/io/boxy/tests/test_boxy.py @@ -19,18 +19,20 @@ def test_boxy_load(): thresh = 1e-10 # Load AC, DC, and Phase data. - boxy_raw_dir = os.path.join(data_path(download=False), - 'BOXY', 'boxy_short_recording') + boxy_file = os.path.join(data_path(download=False), + 'BOXY', 'boxy_0_40_recording', + 'boxy_0_40_notriggers_unparsed.txt') - mne_dc = mne.io.read_raw_boxy(boxy_raw_dir, 'DC', verbose=True).load_data() - mne_ac = mne.io.read_raw_boxy(boxy_raw_dir, 'AC', verbose=True).load_data() - mne_ph = mne.io.read_raw_boxy(boxy_raw_dir, 'Ph', verbose=True).load_data() + mne_dc = mne.io.read_raw_boxy(boxy_file, 'DC', verbose=True).load_data() + mne_ac = mne.io.read_raw_boxy(boxy_file, 'AC', verbose=True).load_data() + mne_ph = mne.io.read_raw_boxy(boxy_file, 'Ph', verbose=True).load_data() # Load p_pod data. - p_pod_dir = os.path.join(data_path(download=False), - 'BOXY', 'boxy_short_recording', - 'boxy_p_pod_files', '1anc071a_001.mat') - ppod_data = spio.loadmat(p_pod_dir) + p_pod_file = os.path.join(data_path(download=False), + 'BOXY', 'boxy_0_40_recording', + 'p_pod_10_6_3_loaded_data', + 'p_pod_10_6_3_notriggers_unparsed.mat') + ppod_data = spio.loadmat(p_pod_file) ppod_ac = np.transpose(ppod_data['ac']) ppod_dc = np.transpose(ppod_data['dc']) @@ -64,18 +66,20 @@ def test_boxy_filetypes(): thresh = 1e-10 # Load AC, DC, and Phase data. - boxy_raw_dir = os.path.join(data_path(download=False), - 'BOXY', 'boxy_digaux_recording', 'unparsed') + boxy_file = os.path.join(data_path(download=False), + 'BOXY', 'boxy_0_84_digaux_recording', + 'boxy_0_84_triggers_unparsed.txt') - unp_dc = mne.io.read_raw_boxy(boxy_raw_dir, 'DC', verbose=True).load_data() - unp_ac = mne.io.read_raw_boxy(boxy_raw_dir, 'AC', verbose=True).load_data() - unp_ph = mne.io.read_raw_boxy(boxy_raw_dir, 'Ph', verbose=True).load_data() + unp_dc = mne.io.read_raw_boxy(boxy_file, 'DC', verbose=True).load_data() + unp_ac = mne.io.read_raw_boxy(boxy_file, 'AC', verbose=True).load_data() + unp_ph = mne.io.read_raw_boxy(boxy_file, 'Ph', verbose=True).load_data() # Load p_pod data. - p_pod_dir = os.path.join(data_path(download=False), - 'BOXY', 'boxy_digaux_recording', 'p_pod', - 'p_pod_digaux_unparsed.mat') - ppod_data = spio.loadmat(p_pod_dir) + p_pod_file = os.path.join(data_path(download=False), + 'BOXY', 'boxy_0_84_digaux_recording', + 'p_pod_10_6_3_loaded_data', + 'p_pod_10_6_3_triggers_unparsed.mat') + ppod_data = spio.loadmat(p_pod_file) ppod_ac = np.transpose(ppod_data['ac']) ppod_dc = np.transpose(ppod_data['dc']) @@ -87,12 +91,13 @@ def test_boxy_filetypes(): assert (abs(ppod_ph - unp_ph._data) <= thresh).all() # Now let's load our parsed data. - boxy_raw_dir = os.path.join(data_path(download=False), - 'BOXY', 'boxy_digaux_recording', 'parsed') + boxy_file = os.path.join(data_path(download=False), + 'BOXY', 'boxy_0_84_digaux_recording', + 'boxy_0_84_triggers_unparsed.txt') - par_dc = mne.io.read_raw_boxy(boxy_raw_dir, 'DC', verbose=True).load_data() - par_ac = mne.io.read_raw_boxy(boxy_raw_dir, 'AC', verbose=True).load_data() - par_ph = mne.io.read_raw_boxy(boxy_raw_dir, 'Ph', verbose=True).load_data() + par_dc = mne.io.read_raw_boxy(boxy_file, 'DC', verbose=True).load_data() + par_ac = mne.io.read_raw_boxy(boxy_file, 'AC', verbose=True).load_data() + par_ph = mne.io.read_raw_boxy(boxy_file, 'Ph', verbose=True).load_data() # Compare parsed and unparsed data. assert (abs(unp_dc._data - par_dc._data) == 0).all() @@ -109,13 +114,14 @@ def test_boxy_digaux(): srate = 79.4722 # Load AC, DC, and Phase data from a parsed file first. - boxy_raw_dir = os.path.join(data_path(download=False), - 'BOXY', 'boxy_digaux_recording', 'parsed') + boxy_file = os.path.join(data_path(download=False), + 'BOXY', 'boxy_0_84_digaux_recording', + 'boxy_0_84_triggers_parsed.txt') # The type of data shouldn't matter, but we'll test all three. - par_dc = mne.io.read_raw_boxy(boxy_raw_dir, 'DC', verbose=True).load_data() - par_ac = mne.io.read_raw_boxy(boxy_raw_dir, 'AC', verbose=True).load_data() - par_ph = mne.io.read_raw_boxy(boxy_raw_dir, 'Ph', verbose=True).load_data() + par_dc = mne.io.read_raw_boxy(boxy_file, 'DC', verbose=True).load_data() + par_ac = mne.io.read_raw_boxy(boxy_file, 'AC', verbose=True).load_data() + par_ph = mne.io.read_raw_boxy(boxy_file, 'Ph', verbose=True).load_data() # Check that our event order matches what we expect. event_list = ['1.0', '2.0', '3.0', '4.0', '5.0'] @@ -131,13 +137,14 @@ def test_boxy_digaux(): assert_allclose(par_ph.annotations.onset, event_onset, atol=thresh) # Now we'll load data from an unparsed file. - boxy_raw_dir = os.path.join(data_path(download=False), - 'BOXY', 'boxy_digaux_recording', 'unparsed') + boxy_file = os.path.join(data_path(download=False), + 'BOXY', 'boxy_0_84_digaux_recording', + 'boxy_0_84_triggers_unparsed.txt') # The type of data shouldn't matter, but we'll test all three. - unp_dc = mne.io.read_raw_boxy(boxy_raw_dir, 'DC', verbose=True).load_data() - unp_ac = mne.io.read_raw_boxy(boxy_raw_dir, 'AC', verbose=True).load_data() - unp_ph = mne.io.read_raw_boxy(boxy_raw_dir, 'Ph', verbose=True).load_data() + unp_dc = mne.io.read_raw_boxy(boxy_file, 'DC', verbose=True).load_data() + unp_ac = mne.io.read_raw_boxy(boxy_file, 'AC', verbose=True).load_data() + unp_ph = mne.io.read_raw_boxy(boxy_file, 'Ph', verbose=True).load_data() # Check that our event order matches what we expect. event_list = ['1.0', '2.0', '3.0', '4.0', '5.0'] @@ -154,11 +161,12 @@ def test_boxy_digaux(): # Now let's compare parsed and unparsed events to p_pod loaded digaux. # Load our p_pod data. - p_pod_dir = os.path.join(data_path(download=False), - 'BOXY', 'boxy_digaux_recording', - 'p_pod', 'p_pod_digaux_unparsed.mat') + p_pod_file = os.path.join(data_path(download=False), + 'BOXY', 'boxy_0_84_digaux_recording', + 'p_pod_10_6_3_loaded_data', + 'p_pod_10_6_3_triggers_unparsed.mat') - ppod_data = spio.loadmat(p_pod_dir) + ppod_data = spio.loadmat(p_pod_file) ppod_digaux = np.transpose(ppod_data['digaux'])[0] # Now let's get our triggers from the p_pod digaux. From 10631706d565e7a5ea7f139f0c560e99d6bf8339 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Sat, 15 Aug 2020 13:17:07 -0600 Subject: [PATCH 141/167] changes to make ci tests happy --- doc/_includes/data_formats.rst | 2 ++ doc/changes/latest.inc | 4 +++ doc/changes/names.inc | 2 ++ doc/conf.py | 2 +- mne/datasets/utils.py | 4 +-- mne/io/boxy/__init__.py | 2 +- mne/io/boxy/boxy.py | 2 +- mne/io/boxy/tests/test_boxy.py | 2 +- tutorials/io/plot_30_reading_fnirs_data.py | 15 +++++++++++ tutorials/io/plot_40_reading_boxy_data.py | 30 ---------------------- 10 files changed, 29 insertions(+), 36 deletions(-) delete mode 100644 tutorials/io/plot_40_reading_boxy_data.py diff --git a/doc/_includes/data_formats.rst b/doc/_includes/data_formats.rst index ede81a317c8..54e4e63f050 100644 --- a/doc/_includes/data_formats.rst +++ b/doc/_includes/data_formats.rst @@ -69,6 +69,8 @@ EEG :ref:`General data format ` .gdf :func:`mn EEG :ref:`Nicolet ` .data :func:`mne.io.read_raw_nicolet` NIRS :ref:`NIRx ` directory :func:`mne.io.read_raw_nirx` + +NIRS :ref:`BOXY ` directory :func:`mne.io.read_raw_boxy` ============ ============================================ ========= =================================== More details are provided in the tutorials in the :ref:`tut-data-formats` diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 78efdfce3e4..3c69294ecb3 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -12,6 +12,10 @@ Current (0.21.dev0) ------------------- +- Add reader for BOXY data in :func:`mne.io.read_raw_boxy` by `Kyle Mathewson`_ and `Jonathan Kuziek`_ + + + Changelog ~~~~~~~~~ diff --git a/doc/changes/names.inc b/doc/changes/names.inc index 85d6d8afa75..86c3f0d908c 100644 --- a/doc/changes/names.inc +++ b/doc/changes/names.inc @@ -313,3 +313,5 @@ .. _Johann Benerradi: https://github.com/HanBnrd .. _Rahul Nadkarni: https://github.com/rahuln + +.. _Jonathan Kuziek: https://github.com/kuziekj diff --git a/doc/conf.py b/doc/conf.py index a4f4d826207..da0b995c5a3 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -655,7 +655,7 @@ def reset_warnings(gallery_conf, fname): 'n_dipoles_fwd', 'n_picks_ref', # Undocumented (on purpose) 'RawKIT', 'RawEximia', 'RawEGI', 'RawEEGLAB', 'RawEDF', 'RawCTF', 'RawBTi', - 'RawBrainVision', 'RawCurry', 'RawNIRX', 'RawGDF', 'RawSNIRF', + 'RawBrainVision', 'RawCurry', 'RawNIRX', 'RawGDF', 'RawSNIRF', 'RawBOXY', # sklearn subclasses 'mapping', 'to', 'any', # unlinkable diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py index ff7b13f232f..8c581f4b59f 100644 --- a/mne/datasets/utils.py +++ b/mne/datasets/utils.py @@ -241,7 +241,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, path = _get_path(path, key, name) # To update the testing or misc dataset, push commits, then make a new # release on GitHub. Then update the "releases" variable: - releases = dict(testing='0.100', misc='0.6') + releases = dict(testing='0.101', misc='0.6') # And also update the "md5_hashes['testing']" variable below. # To update any other dataset, update the data archive itself (upload @@ -330,7 +330,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, sample='12b75d1cb7df9dfb4ad73ed82f61094f', somato='ea825966c0a1e9b2f84e3826c5500161', spm='9f43f67150e3b694b523a21eb929ea75', - testing='deb175669c3489c1fef582f72d3d6017', + testing='de46d819dd21a32b6bfec8c3b57b2fb2', multimodal='26ec847ae9ab80f58f204d09e2c08367', fnirs_motor='c4935d19ddab35422a69f3326a01fef8', boxy_example='d567e80b8063e90096861297638e2eef', diff --git a/mne/io/boxy/__init__.py b/mne/io/boxy/__init__.py index c06d590829e..5da9a5b6a37 100644 --- a/mne/io/boxy/__init__.py +++ b/mne/io/boxy/__init__.py @@ -1,6 +1,6 @@ """fNIRS module for conversion to FIF.""" -# Author: Robert Luke +# Authors: Kyle Mathewson, Jonathan Kuziek # # License: BSD (3-clause) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 901ef01982d..485cd82889f 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -1,4 +1,4 @@ -# Authors: Kyle Mathewson, Jonathan Kuziek +# Authors: Kyle Mathewson, Jonathan Kuziek # # License: BSD (3-clause) diff --git a/mne/io/boxy/tests/test_boxy.py b/mne/io/boxy/tests/test_boxy.py index a7d9203fb4c..decea697815 100644 --- a/mne/io/boxy/tests/test_boxy.py +++ b/mne/io/boxy/tests/test_boxy.py @@ -1,4 +1,4 @@ -# Authors: Kyle Mathewson, Jonathan Kuziek +# Authors: Kyle Mathewson, Jonathan Kuziek # # License: BSD (3-clause) diff --git a/tutorials/io/plot_30_reading_fnirs_data.py b/tutorials/io/plot_30_reading_fnirs_data.py index c80d7ba34dc..0b6d603adfd 100644 --- a/tutorials/io/plot_30_reading_fnirs_data.py +++ b/tutorials/io/plot_30_reading_fnirs_data.py @@ -37,6 +37,21 @@ stored in the .snirf format. +.. _import-boxy: + +BOXY (.txt) +================================ + +BOXY recordings can be read in using :func:`mne.io.read_raw_boxy`. +The BOXY software and Imagent devices store data in a single .txt file +containing DC, AC, and Phase information for each source and detector +combination. These raw data files can be saved as parsed or unparsed .txt +files, which affects how the data in the file is organised. +MNE will read either file type and extract the raw DC, AC, and Phase data. +If triggers are sent using the 'digaux' port of the recording hardware, MNE +will also read the 'digaux' data and create annotations for any triggers. + + Storing of optode locations =========================== diff --git a/tutorials/io/plot_40_reading_boxy_data.py b/tutorials/io/plot_40_reading_boxy_data.py deleted file mode 100644 index 1120bbc87c0..00000000000 --- a/tutorials/io/plot_40_reading_boxy_data.py +++ /dev/null @@ -1,30 +0,0 @@ -# -*- coding: utf-8 -*- -r""" -.. _tut-importing-boxy-data: - -========================================================= -Importing data from BOXY software and ISS Imagent devices -========================================================= - -MNE includes various functions and utilities for reading optical imaging -data and optode locations. - -.. contents:: Page contents - :local: - :depth: 2 - - -.. _import-boxy: - -BOXY (directory) -================================ - -BOXY recordings can be read in using :func:`mne.io.read_raw_boxy`. -The BOXY software and Imagent devices store data in a single .txt file -containing DC, AC, and Phase information for each source and detector -combination. Recording settings, such as the number of sources/detectors, and -the sampling rate of the recording, are also saved at the beginning of this -file. MNE will extract the raw DC, AC, and Phase data, along with the recording -settings. - -""" # noqa:E501 From 22b3271b374da47df9274b60a8cd471b93467563 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Fri, 21 Aug 2020 14:36:42 -0600 Subject: [PATCH 142/167] removed print statements and references to old dataset --- .circleci/config.yml | 3 --- doc/overview/datasets_index.rst | 17 ------------- doc/python_reference.rst | 1 - mne/datasets/__init__.py | 1 - mne/datasets/boxy_example/__init__.py | 3 --- mne/datasets/boxy_example/boxy_example.py | 31 ----------------------- mne/datasets/utils.py | 8 +----- mne/io/boxy/boxy.py | 13 +--------- mne/utils/config.py | 1 - 9 files changed, 2 insertions(+), 76 deletions(-) delete mode 100644 mne/datasets/boxy_example/__init__.py delete mode 100644 mne/datasets/boxy_example/boxy_example.py diff --git a/.circleci/config.yml b/.circleci/config.yml index f26bf88b4ae..e0ac41b41df 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -199,9 +199,6 @@ jobs: if [[ $(cat $FNAME | grep -x ".*datasets.*fnirs_motor.*" | wc -l) -gt 0 ]]; then python -c "import mne; print(mne.datasets.fnirs_motor.data_path(update_path=True))"; fi; - if [[ $(cat $FNAME | grep -x ".*datasets.*boxy_example.*" | wc -l) -gt 0 ]]; then - python -c "import mne; print(mne.datasets.boxy_example.data_path(update_path=True))"; - fi; if [[ $(cat $FNAME | grep -x ".*datasets.*opm.*" | wc -l) -gt 0 ]]; then python -c "import mne; print(mne.datasets.opm.data_path(update_path=True))"; fi; diff --git a/doc/overview/datasets_index.rst b/doc/overview/datasets_index.rst index 9167a352f42..7f5cdfc8857 100644 --- a/doc/overview/datasets_index.rst +++ b/doc/overview/datasets_index.rst @@ -212,23 +212,6 @@ The tapping lasts 5 seconds, and there are 30 trials of each condition. .. topic:: Examples * :ref:`tut-fnirs-processing` - -.. _boxy-example-dataset: - -BOXY Example -============ -:func:`mne.datasets.boxy_example.data_path` - -This dataset is of a single participant. -Recorded at the University of Illinois at Urbana-Champaign. -Sources and detectors are placed over the occipital lobe. -The participant was shown a checkerboard pattern, alternating at 1Hz. -This reversal starts half-way through the recording. -This set contains data for two montages, each with two blocks. -Each montage and block contains two marker types: - -- 1 = checkerboard reversal -- 2 = same as 1 but for the first few trials (to keep separate if needed) High frequency SEF ================== diff --git a/doc/python_reference.rst b/doc/python_reference.rst index 743a7469687..e1453c2caea 100644 --- a/doc/python_reference.rst +++ b/doc/python_reference.rst @@ -184,7 +184,6 @@ Datasets .. autosummary:: :toctree: generated/ - boxy_example.data_path brainstorm.bst_auditory.data_path brainstorm.bst_resting.data_path brainstorm.bst_raw.data_path diff --git a/mne/datasets/__init__.py b/mne/datasets/__init__.py index a4d6cb429d8..98ac5679ab3 100644 --- a/mne/datasets/__init__.py +++ b/mne/datasets/__init__.py @@ -15,7 +15,6 @@ from . import somato from . import multimodal from . import fnirs_motor -from . import boxy_example from . import opm from . import spm_face from . import testing diff --git a/mne/datasets/boxy_example/__init__.py b/mne/datasets/boxy_example/__init__.py deleted file mode 100644 index 9e1776a5268..00000000000 --- a/mne/datasets/boxy_example/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -"""boxy example dataset.""" - -from .boxy_example import data_path, has_boxy_example_data, get_version diff --git a/mne/datasets/boxy_example/boxy_example.py b/mne/datasets/boxy_example/boxy_example.py deleted file mode 100644 index eb73ce6a8cb..00000000000 --- a/mne/datasets/boxy_example/boxy_example.py +++ /dev/null @@ -1,31 +0,0 @@ -# Authors: Eric Larson -# License: BSD Style. - -from functools import partial - -from ...utils import verbose -from ..utils import (has_dataset, _data_path, _data_path_doc, - _get_version, _version_doc) - - -has_boxy_example_data = partial(has_dataset, name='boxy_example') - - -@verbose -def data_path(path=None, force_update=False, update_path=True, download=True, - verbose=None): # noqa: D103 - return _data_path(path=path, force_update=force_update, - update_path=update_path, name='boxy_example', - download=download) - - -data_path.__doc__ = _data_path_doc.format(name='boxy_example', - conf='MNE_DATASETS_BOXY_EXAMPLE_PATH' - ) - - -def get_version(): # noqa: D103 - return _get_version('boxy_example') - - -get_version.__doc__ = _version_doc.format(name='boxy_example') diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py index 8c581f4b59f..f9aa77b91cc 100644 --- a/mne/datasets/utils.py +++ b/mne/datasets/utils.py @@ -227,7 +227,6 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, 'testing': 'MNE_DATASETS_TESTING_PATH', 'multimodal': 'MNE_DATASETS_MULTIMODAL_PATH', 'fnirs_motor': 'MNE_DATASETS_FNIRS_MOTOR_PATH', - 'boxy_example': 'MNE_DATASETS_BOXY_EXAMPLE_PATH', 'opm': 'MNE_DATASETS_OPM_PATH', 'visual_92_categories': 'MNE_DATASETS_VISUAL_92_CATEGORIES_PATH', 'kiloword': 'MNE_DATASETS_KILOWORD_PATH', @@ -266,7 +265,6 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, 'tar.gz/%s' % releases['testing'], multimodal='https://ndownloader.figshare.com/files/5999598', fnirs_motor='https://osf.io/dj3eh/download?version=1', - boxy_example='https://osf.io/hksme/download?version=6', opm='https://osf.io/p6ae7/download?version=2', visual_92_categories=[ 'https://osf.io/8ejrs/download?version=1', @@ -286,7 +284,6 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, mtrf='mTRF_1.5.zip', multimodal='MNE-multimodal-data.tar.gz', fnirs_motor='MNE-fNIRS-motor-data.tgz', - boxy_example='MNE-BOXY-example-data.tgz', opm='MNE-OPM-data.tar.gz', sample='MNE-sample-data-processed.tar.gz', somato='MNE-somato-data.tar.gz', @@ -333,7 +330,6 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, testing='de46d819dd21a32b6bfec8c3b57b2fb2', multimodal='26ec847ae9ab80f58f204d09e2c08367', fnirs_motor='c4935d19ddab35422a69f3326a01fef8', - boxy_example='d567e80b8063e90096861297638e2eef', opm='370ad1dcfd5c47e029e692c85358a374', visual_92_categories=['74f50bbeb65740903eadc229c9fa759f', '203410a98afc9df9ae8ba9f933370e20'], @@ -552,7 +548,6 @@ def has_dataset(name): 'spm': 'MNE-spm-face', 'multimodal': 'MNE-multimodal-data', 'fnirs_motor': 'MNE-fNIRS-motor-data', - 'boxy_example': 'MNE-BOXY-example-data', 'opm': 'MNE-OPM-data', 'testing': 'MNE-testing-data', 'visual_92_categories': 'MNE-visual_92_categories-data', @@ -581,7 +576,7 @@ def _download_all_example_data(verbose=True): from . import (sample, testing, misc, spm_face, somato, brainstorm, eegbci, multimodal, opm, hf_sef, mtrf, fieldtrip_cmc, kiloword, phantom_4dbti, sleep_physionet, limo, - fnirs_motor, refmeg_noise, boxy_example) + fnirs_motor, refmeg_noise) sample_path = sample.data_path() testing.data_path() @@ -591,7 +586,6 @@ def _download_all_example_data(verbose=True): hf_sef.data_path() multimodal.data_path() fnirs_motor.data_path() - boxy_example.data_path() opm.data_path() mtrf.data_path() fieldtrip_cmc.data_path() diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 485cd82889f..02989cf5710 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -145,12 +145,7 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): 'srate': srate, } - # Make sure data lengths are the same. - print('Start Line: ', start_line) - print('End Line: ', end_line) - print('Original Difference: ', end_line - start_line) - first_samps = start_line - print('New first_samps: ', first_samps) + # Determine how long our data is. diff = end_line - (start_line) # Number if rows in data file depends on data file type. @@ -162,9 +157,6 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): # First sample is technically sample 0, not the start line in the file. first_samps = 0 - print('New last_samps: ', last_samps) - print('New Difference: ', last_samps - first_samps) - super(RawBOXY, self).__init__( info, preload, filenames=[fname], first_samps=[first_samps], last_samps=[last_samps - 1], @@ -303,9 +295,6 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): # Save our data based on data type. all_data[index_loc, :] = boxy_array[:, channel] - print('Blank Data shape: ', data.shape) - print('Input Data shape: ', all_data.shape) - # Place our data into the data object in place. data[:] = all_data diff --git a/mne/utils/config.py b/mne/utils/config.py index f6c502c6635..83c95f1101a 100644 --- a/mne/utils/config.py +++ b/mne/utils/config.py @@ -96,7 +96,6 @@ def set_memmap_min_size(memmap_min_size): 'MNE_DATASETS_SOMATO_PATH', 'MNE_DATASETS_MULTIMODAL_PATH', 'MNE_DATASETS_FNIRS_MOTOR_PATH', - 'MNE_DATASETS_BOXY_EXAMPLE_PATH', 'MNE_DATASETS_OPM_PATH', 'MNE_DATASETS_SPM_FACE_DATASETS_TESTS', 'MNE_DATASETS_SPM_FACE_PATH', From 34c5f7ce52ef0df6893cbc6d75c01d3053fb6fb5 Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Fri, 21 Aug 2020 14:44:56 -0600 Subject: [PATCH 143/167] removed extra blank space --- mne/datasets/utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py index 74239cc0da9..03d232f4a52 100644 --- a/mne/datasets/utils.py +++ b/mne/datasets/utils.py @@ -577,7 +577,6 @@ def _download_all_example_data(verbose=True): eegbci, multimodal, opm, hf_sef, mtrf, fieldtrip_cmc, kiloword, phantom_4dbti, sleep_physionet, limo, fnirs_motor, refmeg_noise) - sample_path = sample.data_path() testing.data_path() misc.data_path() From fcaa8ffffa1c3e7d6314a34173483fd9e062fbd7 Mon Sep 17 00:00:00 2001 From: kuziekj Date: Mon, 31 Aug 2020 15:43:18 -0600 Subject: [PATCH 144/167] Addressing reviewer comments (#30) * addressing comments * addressed more comments. Reader will return all data types --- doc/changes/latest.inc | 6 +- mne/channels/channels.py | 3 +- mne/channels/layout.py | 3 +- mne/cov.py | 9 +- mne/defaults.py | 11 +- mne/io/boxy/boxy.py | 115 ++++++++++--------- mne/io/boxy/tests/test_boxy.py | 127 ++++++++++++++++++--- mne/io/meas_info.py | 2 - mne/io/pick.py | 29 ++--- mne/viz/raw.py | 3 +- mne/viz/topo.py | 3 +- mne/viz/topomap.py | 3 +- mne/viz/utils.py | 5 +- tutorials/io/plot_30_reading_fnirs_data.py | 9 +- 14 files changed, 197 insertions(+), 131 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 522abd2c9a6..a36205e15e9 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -12,10 +12,6 @@ Current (0.21.dev0) ------------------- -- Add reader for BOXY data in :func:`mne.io.read_raw_boxy` by `Kyle Mathewson`_ and `Jonathan Kuziek`_ - - - Changelog ~~~~~~~~~ @@ -171,6 +167,8 @@ Changelog - Add ``reject_by_annotation=True`` to :func:`mne.make_fixed_length_epochs` and :meth:`mne.preprocessing.ICA.plot_properties` to reject bad data segments based on annotation by `Yu-Han Luo`_ +- Add reader for optical imaging data recorded using ISS Imgagent I/II hardware and BOXY recording software in :func:`mne.io.read_raw_boxy` by `Kyle Mathewson`_ and `Jonathan Kuziek`_ + Bug ~~~ diff --git a/mne/channels/channels.py b/mne/channels/channels.py index 61239508070..6f310f4abc6 100644 --- a/mne/channels/channels.py +++ b/mne/channels/channels.py @@ -78,8 +78,7 @@ def _get_ch_type(inst, ch_type, allow_ref_meg=False): """ if ch_type is None: allowed_types = ['mag', 'grad', 'planar1', 'planar2', 'eeg', 'csd', - 'fnirs_cw_amplitude', 'fnirs_fd_phase', - 'fnirs_od', 'hbo', 'hbr', + 'fnirs_cw_amplitude', 'fnirs_od', 'hbo', 'hbr', 'ecog', 'seeg'] allowed_types += ['ref_meg'] if allow_ref_meg else [] for type_ in allowed_types: diff --git a/mne/channels/layout.py b/mne/channels/layout.py index 464a55885e3..c8790324e73 100644 --- a/mne/channels/layout.py +++ b/mne/channels/layout.py @@ -915,8 +915,7 @@ def _merge_ch_data(data, ch_type, names, method='rms'): if ch_type == 'grad': data = _merge_grad_data(data, method) else: - assert ch_type in ('hbo', 'hbr', 'fnirs_cw_amplitude', - 'fnirs_fd_phase', 'fnirs_od') + assert ch_type in ('hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_od') data, names = _merge_nirs_data(data, names) return data, names diff --git a/mne/cov.py b/mne/cov.py index 9e22c3d30a7..cde84a271c8 100644 --- a/mne/cov.py +++ b/mne/cov.py @@ -1180,9 +1180,8 @@ class _RegCovariance(BaseEstimator): """Aux class.""" def __init__(self, info, grad=0.1, mag=0.1, eeg=0.1, seeg=0.1, ecog=0.1, - hbo=0.1, hbr=0.1, fnirs_cw_amplitude=0.1, fnirs_fd_phase=0.1, - fnirs_od=0.1, csd=0.1, - store_precision=False, assume_centered=False): + hbo=0.1, hbr=0.1, fnirs_cw_amplitude=0.1, fnirs_od=0.1, + csd=0.1, store_precision=False, assume_centered=False): self.info = info # For sklearn compat, these cannot (easily?) be combined into # a single dictionary @@ -1195,7 +1194,6 @@ def __init__(self, info, grad=0.1, mag=0.1, eeg=0.1, seeg=0.1, ecog=0.1, self.hbr = hbr self.fnirs_cw_amplitude = fnirs_cw_amplitude self.fnirs_od = fnirs_od - self.fnirs_fd_phase = fnirs_fd_phase self.csd = csd self.store_precision = store_precision self.assume_centered = assume_centered @@ -1517,8 +1515,6 @@ def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads', Regularization factor for HBR signals. fnirs_cw_amplitude : float (default 0.1) Regularization factor for fNIRS raw signals. - fnirs_fd_phase : float (default 0.1) - Regularization factor for fNIRS FD phase. fnirs_od : float (default 0.1) Regularization factor for fNIRS optical density signals. csd : float (default 0.1) @@ -1550,7 +1546,6 @@ def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads', scalings = _handle_default('scalings_cov_rank', scalings) regs = dict(eeg=eeg, seeg=seeg, ecog=ecog, hbo=hbo, hbr=hbr, fnirs_cw_amplitude=fnirs_cw_amplitude, - fnirs_fd_phase=fnirs_fd_phase, fnirs_od=fnirs_od, csd=csd) if exclude is None: diff --git a/mne/defaults.py b/mne/defaults.py index ec578f16e54..41d8c38e783 100644 --- a/mne/defaults.py +++ b/mne/defaults.py @@ -11,25 +11,23 @@ ref_meg='steelblue', misc='k', stim='k', resp='k', chpi='k', exci='k', ias='k', syst='k', seeg='saddlebrown', dipole='k', gof='k', bio='k', ecog='k', hbo='#AA3377', hbr='b', - fnirs_cw_amplitude='k', fnirs_fd_phase='k', fnirs_od='k', - csd='k'), + fnirs_cw_amplitude='k', fnirs_od='k', csd='k'), units=dict(mag='fT', grad='fT/cm', eeg='µV', eog='µV', ecg='µV', emg='µV', misc='AU', seeg='mV', dipole='nAm', gof='GOF', bio='µV', ecog='µV', hbo='µM', hbr='µM', ref_meg='fT', - fnirs_cw_amplitude='V', fnirs_fd_phase=u'\N{DEGREE SIGN}', - fnirs_od='V', csd='mV/m²'), + fnirs_cw_amplitude='V', fnirs_od='V', csd='mV/m²'), # scalings for the units scalings=dict(mag=1e15, grad=1e13, eeg=1e6, eog=1e6, emg=1e6, ecg=1e6, misc=1.0, seeg=1e3, dipole=1e9, gof=1.0, bio=1e6, ecog=1e6, hbo=1e6, hbr=1e6, ref_meg=1e15, fnirs_cw_amplitude=1.0, - fnirs_fd_phase=1.0, fnirs_od=1.0, csd=1e3), + fnirs_od=1.0, csd=1e3), # rough guess for a good plot scalings_plot_raw=dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4, emg=1e-3, ref_meg=1e-12, misc='auto', stim=1, resp=1, chpi=1e-4, exci=1, ias=1, syst=1, seeg=1e-4, bio=1e-6, ecog=1e-4, hbo=10e-6, hbr=10e-6, whitened=10., fnirs_cw_amplitude=2e-2, - fnirs_fd_phase=5e3, fnirs_od=2e-2, csd=200e-4), + fnirs_od=2e-2, csd=200e-4), scalings_cov_rank=dict(mag=1e12, grad=1e11, eeg=1e5, # ~100x scalings seeg=1e1, ecog=1e4, hbo=1e4, hbr=1e4), ylim=dict(mag=(-600., 600.), grad=(-200., 200.), eeg=(-200., 200.), @@ -41,7 +39,6 @@ dipole='Dipole', ecog='ECoG', hbo='Oxyhemoglobin', ref_meg='Reference Magnetometers', fnirs_cw_amplitude='fNIRS (CW amplitude)', - fnirs_fd_phase='fNIRS (FD Phase)', fnirs_od='fNIRS (OD)', hbr='Deoxyhemoglobin', gof='Goodness of fit', csd='Current source density'), mask_params=dict(marker='o', diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 02989cf5710..9bfc36c8f14 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -13,15 +13,16 @@ @fill_doc -def read_raw_boxy(fname, datatype='AC', preload=False, verbose=None): - """Reader for a BOXY optical imaging recording. +def read_raw_boxy(fname, preload=False, verbose=None): + """Reader for an optical imaging recording. + + This function has been tested using the ISS Imagent I and II systems + and versions 0.40/0.84 of the BOXY recording software. Parameters ---------- fname : str Path to the BOXY data file. - datatype : str - Type of data to return (AC, DC, or Ph). %(preload)s %(verbose)s @@ -34,7 +35,7 @@ def read_raw_boxy(fname, datatype='AC', preload=False, verbose=None): -------- mne.io.Raw : Documentation of attribute and methods. """ - return RawBOXY(fname, datatype, preload, verbose) + return RawBOXY(fname, preload, verbose) @fill_doc @@ -45,8 +46,6 @@ class RawBOXY(BaseRaw): ---------- fname : str Path to the BOXY data file. - datatype : str - Type of data to return (AC, DC, or Ph). %(preload)s %(verbose)s @@ -56,13 +55,9 @@ class RawBOXY(BaseRaw): """ @verbose - def __init__(self, fname, datatype='AC', preload=False, verbose=None): + def __init__(self, fname, preload=False, verbose=None): logger.info('Loading %s' % fname) - # Determine which data type to return. - if datatype not in ['AC', 'DC', 'Ph']: - raise RuntimeError('Expect AC, DC, or Ph, got %s' % datatype) - # Read header file and grab some info. filetype = 'parsed' start_line = 0 @@ -72,6 +67,13 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): col_names = list() with open(fname, 'r') as data: for line_num, i_line in enumerate(data, 1): + if 'BOXY.EXE:' in i_line: + boxy_ver = re.findall(r'\d*\.\d+', + i_line.rsplit(' ')[-1])[0] + # Check that the BOXY version is supported + if boxy_ver not in ['0.40', '0.84']: + raise RuntimeError('MNE has not been tested with BOXY ' + 'version (%s)' % boxy_ver) if '#DATA ENDS' in i_line: # Data ends just before this. end_line = line_num - 1 @@ -83,6 +85,10 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): elif 'Update Rate (Hz)' in i_line: srate = float(i_line.rsplit(' ')[0]) elif 'Updata Rate (Hz)' in i_line: + # Version 0.40 of the BOXY recording software + # (and possibly other versions lower than 0.84) contains a + # typo in the raw data file where 'Update Rate' is spelled + # "Updata Rate. This will account for this typo. srate = float(i_line.rsplit(' ')[0]) elif '#DATA BEGINS' in i_line: # Data should start a couple lines later. @@ -114,26 +120,19 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): mrk_data.append(float( re.findall(r'[-+]?\d*\.?\d+', crnt_line)[mrk_col])) - # Label each channel in our data. + # Label each channel in our data, for each data type (DC, AC, Ph). # Data is organised by channels x timepoint, where the first # 'source_num' rows correspond to the first detector, the next # 'source_num' rows correspond to the second detector, and so on. boxy_labels = list() for det_num in range(detect_num): for src_num in range(source_num): - boxy_labels.append('S' + str(src_num + 1) + - '_D' + str(det_num + 1)) - - # Determine channel types. - if datatype == 'Ph': - chan_type = 'fnirs_fd_phase' - else: - chan_type = 'fnirs_cw_amplitude' - - ch_types = ([chan_type for i_chan in boxy_labels]) + for i_type in ['DC', 'AC', 'Ph']: + boxy_labels.append('S' + str(src_num + 1) + + '_D' + str(det_num + 1) + ' ' + i_type) # Create info structure. - info = create_info(boxy_labels, srate, ch_types=ch_types) + info = create_info(boxy_labels, srate) raw_extras = {'source_num': source_num, 'detect_num': detect_num, @@ -141,16 +140,15 @@ def __init__(self, fname, datatype='AC', preload=False, verbose=None): 'end_line': end_line, 'filetype': filetype, 'file': fname, - 'datatype': datatype, 'srate': srate, } # Determine how long our data is. diff = end_line - (start_line) - # Number if rows in data file depends on data file type. + # Number of rows in data file depends on data file type. if filetype == 'non-parsed': - last_samps = diff // (source_num) + last_samps = (diff // (source_num)) elif filetype == 'parsed': last_samps = diff @@ -196,7 +194,6 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): start_line = self._raw_extras[fi]['start_line'] end_line = self._raw_extras[fi]['end_line'] filetype = self._raw_extras[fi]['filetype'] - datatype = self._raw_extras[fi]['datatype'] boxy_file = self._raw_extras[fi]['file'] # Possible detector names. @@ -235,9 +232,14 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): # Need to make sure our rows are the same length. # This is done by padding the shorter ones. - padding = boxy_length - len(i_data) - boxy_array[ii] = np.pad(np.asarray(i_data, dtype=float), - (0, padding), mode='empty') + line_diff = boxy_length - len(i_data) + if line_diff == 0: + full_line = i_data + boxy_array[ii] = np.asarray(i_data, dtype=float) + else: + pad = full_line[-line_diff:] + i_data.extend(pad) + boxy_array[ii] = np.asarray(i_data, dtype=float) # Grab data from the other columns that aren't AC, DC, or Ph. meta_data = dict() @@ -253,10 +255,10 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): # Make some empty variables to store our data. if filetype == 'non-parsed': - all_data = np.zeros(((detect_num * source_num), + all_data = np.zeros(((detect_num * source_num * 3), int(len(boxy_data) / source_num))) elif filetype == 'parsed': - all_data = np.zeros(((detect_num * source_num), + all_data = np.zeros(((detect_num * source_num * 3), int(len(boxy_data)))) # Loop through detectors. @@ -265,35 +267,38 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): # Loop through sources. for i_source in sources: - # Determine where to store our data. - index_loc = (detectors.index(i_detect) * source_num + - (i_source - 1)) + for i_num, i_type in enumerate(['DC', 'AC', 'Ph']): + + # Determine where to store our data. + index_loc = (detectors.index(i_detect) * source_num * 3 + + ((i_source - 1) * 3) + i_num) - # Need to treat our filetypes differently. - if filetype == 'non-parsed': + # Need to treat our filetypes differently. + if filetype == 'non-parsed': - # Non-parsed saves timepoints in groups and - # this should account for that. - time_points = np.arange(i_source - 1, - int(meta_data['record'][-1]) * - source_num, source_num) + # Non-parsed saves timepoints in groups and + # this should account for that. + time_points = np.arange(i_source - 1, + int(meta_data['record'][-1]) * + source_num, source_num) - # Determine which channel to - # look for in boxy_array. - channel = np.where(col_names == i_detect + '-' + - datatype)[0][0] + # Determine which channel to + # look for in boxy_array. + channel = np.where(col_names == i_detect + '-' + + i_type)[0][0] - # Save our data based on data type. - all_data[index_loc, :] = boxy_array[time_points, channel] + # Save our data based on data type. + all_data[index_loc, :] = boxy_array[time_points, + channel] - elif filetype == 'parsed': + elif filetype == 'parsed': - # Which channel to look for in boxy_array. - channel = np.where(col_names == i_detect + '-' + datatype + - str(i_source))[0][0] + # Which channel to look for in boxy_array. + channel = np.where(col_names == i_detect + '-' + + i_type + str(i_source))[0][0] - # Save our data based on data type. - all_data[index_loc, :] = boxy_array[:, channel] + # Save our data based on data type. + all_data[index_loc, :] = boxy_array[:, channel] # Place our data into the data object in place. data[:] = all_data diff --git a/mne/io/boxy/tests/test_boxy.py b/mne/io/boxy/tests/test_boxy.py index decea697815..93ce7a7f682 100644 --- a/mne/io/boxy/tests/test_boxy.py +++ b/mne/io/boxy/tests/test_boxy.py @@ -23,9 +23,57 @@ def test_boxy_load(): 'BOXY', 'boxy_0_40_recording', 'boxy_0_40_notriggers_unparsed.txt') - mne_dc = mne.io.read_raw_boxy(boxy_file, 'DC', verbose=True).load_data() - mne_ac = mne.io.read_raw_boxy(boxy_file, 'AC', verbose=True).load_data() - mne_ph = mne.io.read_raw_boxy(boxy_file, 'Ph', verbose=True).load_data() + boxy_data = mne.io.read_raw_boxy(boxy_file, verbose=True).load_data() + + # Test sampling rate. + assert boxy_data.info['sfreq'] == 62.5 + + # Grab our different data types. + chans_dc = np.arange(0, 80) * 3 + 0 + chans_ac = np.arange(0, 80) * 3 + 1 + chans_ph = np.arange(0, 80) * 3 + 2 + + mne_dc = boxy_data.copy().pick(chans_dc) + mne_ac = boxy_data.copy().pick(chans_ac) + mne_ph = boxy_data.copy().pick(chans_ph) + + # Check channel names. + first_chans = ['S1_D1', 'S2_D1', 'S3_D1', 'S4_D1', 'S5_D1', + 'S6_D1', 'S7_D1', 'S8_D1', 'S9_D1', 'S10_D1'] + last_chans = ['S1_D8', 'S2_D8', 'S3_D8', 'S4_D8', 'S5_D8', + 'S6_D8', 'S7_D8', 'S8_D8', 'S9_D8', 'S10_D8'] + + assert mne_dc.info['ch_names'][:10] == [i_chan + ' ' + 'DC' + for i_chan in first_chans] + assert mne_ac.info['ch_names'][:10] == [i_chan + ' ' + 'AC' + for i_chan in first_chans] + assert mne_ph.info['ch_names'][:10] == [i_chan + ' ' + 'Ph' + for i_chan in first_chans] + + assert mne_dc.info['ch_names'][70::] == [i_chan + ' ' + 'DC' + for i_chan in last_chans] + assert mne_ac.info['ch_names'][70::] == [i_chan + ' ' + 'AC' + for i_chan in last_chans] + assert mne_ph.info['ch_names'][70::] == [i_chan + ' ' + 'Ph' + for i_chan in last_chans] + + # Since this data set has no 'digaux' for creating trigger annotations, + # let's make sure our Raw object has no annotations. + + # Check description. + assert mne_dc._annotations.description.size == 0 + assert mne_ac._annotations.description.size == 0 + assert mne_ph._annotations.description.size == 0 + + # Check duration. + assert mne_dc._annotations.duration.size == 0 + assert mne_ac._annotations.duration.size == 0 + assert mne_ph._annotations.duration.size == 0 + + # Check onset. + assert mne_dc._annotations.onset.size == 0 + assert mne_ac._annotations.onset.size == 0 + assert mne_ph._annotations.onset.size == 0 # Load p_pod data. p_pod_file = os.path.join(data_path(download=False), @@ -47,7 +95,7 @@ def test_boxy_load(): @requires_testing_data def test_boxy_filetypes(): """Test reading parsed and unparsed BOXY data files.""" - # BOXY data files can be saved in two types (parsed and unparsed) which + # BOXY data files can be saved in two formats (parsed and unparsed) which # mostly determines how the data is organised. # For parsed files, each row is a single timepoint and all # source/detector combinations are represented as columns. @@ -70,9 +118,30 @@ def test_boxy_filetypes(): 'BOXY', 'boxy_0_84_digaux_recording', 'boxy_0_84_triggers_unparsed.txt') - unp_dc = mne.io.read_raw_boxy(boxy_file, 'DC', verbose=True).load_data() - unp_ac = mne.io.read_raw_boxy(boxy_file, 'AC', verbose=True).load_data() - unp_ph = mne.io.read_raw_boxy(boxy_file, 'Ph', verbose=True).load_data() + boxy_data = mne.io.read_raw_boxy(boxy_file, verbose=True).load_data() + + # Test sampling rate. + assert boxy_data.info['sfreq'] == 79.4722 + + # Grab our different data types. + chans_dc = np.arange(0, 8) * 3 + 0 + chans_ac = np.arange(0, 8) * 3 + 1 + chans_ph = np.arange(0, 8) * 3 + 2 + + unp_dc = boxy_data.copy().pick(chans_dc) + unp_ac = boxy_data.copy().pick(chans_ac) + unp_ph = boxy_data.copy().pick(chans_ph) + + # Check channel names. + chans = ['S1_D1', 'S2_D1', 'S3_D1', 'S4_D1', + 'S5_D1', 'S6_D1', 'S7_D1', 'S8_D1'] + + assert unp_dc.info['ch_names'] == [i_chan + ' ' + 'DC' + for i_chan in chans] + assert unp_ac.info['ch_names'] == [i_chan + ' ' + 'AC' + for i_chan in chans] + assert unp_ph.info['ch_names'] == [i_chan + ' ' + 'Ph' + for i_chan in chans] # Load p_pod data. p_pod_file = os.path.join(data_path(download=False), @@ -95,9 +164,23 @@ def test_boxy_filetypes(): 'BOXY', 'boxy_0_84_digaux_recording', 'boxy_0_84_triggers_unparsed.txt') - par_dc = mne.io.read_raw_boxy(boxy_file, 'DC', verbose=True).load_data() - par_ac = mne.io.read_raw_boxy(boxy_file, 'AC', verbose=True).load_data() - par_ph = mne.io.read_raw_boxy(boxy_file, 'Ph', verbose=True).load_data() + boxy_data = mne.io.read_raw_boxy(boxy_file, verbose=True).load_data() + + # Test sampling rate. + assert boxy_data.info['sfreq'] == 79.4722 + + # Grab our different data types. + par_dc = boxy_data.copy().pick(chans_dc) + par_ac = boxy_data.copy().pick(chans_ac) + par_ph = boxy_data.copy().pick(chans_ph) + + # Check channel names. + assert par_dc.info['ch_names'] == [i_chan + ' ' + 'DC' + for i_chan in chans] + assert par_ac.info['ch_names'] == [i_chan + ' ' + 'AC' + for i_chan in chans] + assert par_ph.info['ch_names'] == [i_chan + ' ' + 'Ph' + for i_chan in chans] # Compare parsed and unparsed data. assert (abs(unp_dc._data - par_dc._data) == 0).all() @@ -118,10 +201,16 @@ def test_boxy_digaux(): 'BOXY', 'boxy_0_84_digaux_recording', 'boxy_0_84_triggers_parsed.txt') - # The type of data shouldn't matter, but we'll test all three. - par_dc = mne.io.read_raw_boxy(boxy_file, 'DC', verbose=True).load_data() - par_ac = mne.io.read_raw_boxy(boxy_file, 'AC', verbose=True).load_data() - par_ph = mne.io.read_raw_boxy(boxy_file, 'Ph', verbose=True).load_data() + boxy_data = mne.io.read_raw_boxy(boxy_file, verbose=True).load_data() + + # Grab our different data types. + chans_dc = np.arange(0, 8) * 3 + 0 + chans_ac = np.arange(0, 8) * 3 + 1 + chans_ph = np.arange(0, 8) * 3 + 2 + + par_dc = boxy_data.copy().pick(chans_dc) + par_ac = boxy_data.copy().pick(chans_ac) + par_ph = boxy_data.copy().pick(chans_ph) # Check that our event order matches what we expect. event_list = ['1.0', '2.0', '3.0', '4.0', '5.0'] @@ -141,10 +230,12 @@ def test_boxy_digaux(): 'BOXY', 'boxy_0_84_digaux_recording', 'boxy_0_84_triggers_unparsed.txt') - # The type of data shouldn't matter, but we'll test all three. - unp_dc = mne.io.read_raw_boxy(boxy_file, 'DC', verbose=True).load_data() - unp_ac = mne.io.read_raw_boxy(boxy_file, 'AC', verbose=True).load_data() - unp_ph = mne.io.read_raw_boxy(boxy_file, 'Ph', verbose=True).load_data() + boxy_data = mne.io.read_raw_boxy(boxy_file, verbose=True).load_data() + + # Grab our different data types. + unp_dc = boxy_data.copy().pick(chans_dc) + unp_ac = boxy_data.copy().pick(chans_ac) + unp_ph = boxy_data.copy().pick(chans_ph) # Check that our event order matches what we expect. event_list = ['1.0', '2.0', '3.0', '4.0', '5.0'] diff --git a/mne/io/meas_info.py b/mne/io/meas_info.py index fa8e2dba6d3..e79f972db8f 100644 --- a/mne/io/meas_info.py +++ b/mne/io/meas_info.py @@ -56,8 +56,6 @@ ecog=(FIFF.FIFFV_ECOG_CH, FIFF.FIFFV_COIL_EEG, FIFF.FIFF_UNIT_V), fnirs_cw_amplitude=(FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE, FIFF.FIFF_UNIT_V), - fnirs_fd_phase=(FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_COIL_FNIRS_FD_PHASE, - FIFF.FIFF_UNIT_V), fnirs_od=(FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_COIL_FNIRS_OD, FIFF.FIFF_UNIT_NONE), hbo=(FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_COIL_FNIRS_HBO, FIFF.FIFF_UNIT_MOL), diff --git a/mne/io/pick.py b/mne/io/pick.py index 20996f24d94..a0dff269df0 100644 --- a/mne/io/pick.py +++ b/mne/io/pick.py @@ -54,8 +54,6 @@ def get_channel_type_constants(): coil_type=FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE), fnirs_od=dict(kind=FIFF.FIFFV_FNIRS_CH, coil_type=FIFF.FIFFV_COIL_FNIRS_OD), - fnirs_fd_phase=dict(kind=FIFF.FIFFV_FNIRS_CH, - coil_type=FIFF.FIFFV_COIL_FNIRS_FD_PHASE), hbo=dict(kind=FIFF.FIFFV_FNIRS_CH, coil_type=FIFF.FIFFV_COIL_FNIRS_HBO), hbr=dict(kind=FIFF.FIFFV_FNIRS_CH, @@ -103,7 +101,6 @@ def get_channel_type_constants(): FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE: 'fnirs_cw_amplitude', FIFF.FIFFV_COIL_FNIRS_OD: 'fnirs_od', - FIFF.FIFFV_COIL_FNIRS_FD_PHASE: 'fnirs_fd_phase', }), 'eeg': ('coil_type', {FIFF.FIFFV_COIL_EEG: 'eeg', FIFF.FIFFV_COIL_EEG_BIPOLAR: 'eeg', @@ -278,9 +275,6 @@ def _triage_fnirs_pick(ch, fnirs, warned): return True elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_OD and fnirs == 'fnirs_od': return True - elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_FD_PHASE and \ - fnirs == 'fnirs_fd_phase': - return True return False @@ -417,8 +411,7 @@ def pick_types(info, meg=None, eeg=False, stim=False, eog=False, ecg=False, for key in ('grad', 'mag'): param_dict[key] = meg if isinstance(fnirs, bool): - for key in ('hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_fd_phase', - 'fnirs_od'): + for key in ('hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_od'): param_dict[key] = fnirs warned = [False] for k in range(nchan): @@ -429,8 +422,7 @@ def pick_types(info, meg=None, eeg=False, stim=False, eog=False, ecg=False, pick[k] = param_dict[ch_type] except KeyError: # not so simple assert ch_type in ('grad', 'mag', 'hbo', 'hbr', 'ref_meg', - 'fnirs_cw_amplitude', 'fnirs_fd_phase', - 'fnirs_od') + 'fnirs_cw_amplitude', 'fnirs_od') if ch_type in ('grad', 'mag'): pick[k] = _triage_meg_pick(info['chs'][k], meg) if meg_default_arg: @@ -728,8 +720,7 @@ def channel_indices_by_type(info, picks=None): idx_by_type = {key: list() for key in _PICK_TYPES_KEYS if key not in ('meg', 'fnirs')} idx_by_type.update(mag=list(), grad=list(), hbo=list(), hbr=list(), - fnirs_cw_amplitude=list(), fnirs_fd_phase=list(), - fnirs_od=list()) + fnirs_cw_amplitude=list(), fnirs_od=list()) picks = _picks_to_idx(info, picks, none='all', exclude=(), allow_empty=True) for k in picks: @@ -829,8 +820,7 @@ def _contains_ch_type(info, ch_type): _validate_type(ch_type, 'str', "ch_type") meg_extras = ['mag', 'grad', 'planar1', 'planar2'] - fnirs_extras = ['hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_fd_phase', - 'fnirs_od'] + fnirs_extras = ['hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_od'] ch_type = _fnirs_raw_dep(ch_type, [False]) valid_channel_types = sorted([key for key in _PICK_TYPES_KEYS if key != 'meg'] + meg_extras + fnirs_extras) @@ -936,23 +926,20 @@ def _check_excludes_includes(chs, info=None, allow_bads=False): seeg=True, dipole=False, gof=False, bio=False, ecog=True, fnirs=True) _PICK_TYPES_KEYS = tuple(list(_PICK_TYPES_DATA_DICT) + ['ref_meg']) _DATA_CH_TYPES_SPLIT = ('mag', 'grad', 'eeg', 'csd', 'seeg', 'ecog', - 'hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_fd_phase', - 'fnirs_od') + 'hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_od') _DATA_CH_TYPES_ORDER_DEFAULT = ('mag', 'grad', 'eeg', 'csd', 'eog', 'ecg', 'emg', 'ref_meg', 'misc', 'stim', 'resp', 'chpi', 'exci', 'ias', 'syst', 'seeg', 'bio', 'ecog', 'hbo', 'hbr', 'fnirs_cw_amplitude', - 'fnirs_fd_phase', 'fnirs_od', 'whitened') + 'fnirs_od', 'whitened') # Valid data types, ordered for consistency, used in viz/evoked. _VALID_CHANNEL_TYPES = ('eeg', 'grad', 'mag', 'seeg', 'eog', 'ecg', 'emg', 'dipole', 'gof', 'bio', 'ecog', 'hbo', 'hbr', - 'fnirs_cw_amplitude', 'fnirs_fd_phase', 'fnirs_od', - 'misc', 'csd') + 'fnirs_cw_amplitude', 'fnirs_od', 'misc', 'csd') _MEG_CH_TYPES_SPLIT = ('mag', 'grad', 'planar1', 'planar2') -_FNIRS_CH_TYPES_SPLIT = ('hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_fd_phase', - 'fnirs_od') +_FNIRS_CH_TYPES_SPLIT = ('hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_od') def _pick_data_channels(info, exclude='bads', with_ref_meg=True): diff --git a/mne/viz/raw.py b/mne/viz/raw.py index 2df9a6183ad..861965d2e3b 100644 --- a/mne/viz/raw.py +++ b/mne/viz/raw.py @@ -350,8 +350,7 @@ def plot_raw(raw, events=None, duration=10.0, start=0.0, n_channels=20, for t in ['grad', 'mag']: inds += [pick_types(info, meg=t, ref_meg=False, exclude=[])] types += [t] * len(inds[-1]) - for t in ['hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_fd_phase', - 'fnirs_od']: + for t in ['hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_od']: inds += [pick_types(info, meg=False, ref_meg=False, fnirs=t, exclude=[])] types += [t] * len(inds[-1]) diff --git a/mne/viz/topo.py b/mne/viz/topo.py index 0a5b6f17901..d2f95aca3d5 100644 --- a/mne/viz/topo.py +++ b/mne/viz/topo.py @@ -714,8 +714,7 @@ def _plot_evoked_topo(evoked, layout=None, layout_scale=0.945, color=None, # one check for all vendors meg_types = {'mag', 'grad'} is_meg = len(set.intersection(types_used, meg_types)) > 0 - nirs_types = {'hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_fd_phase', - 'fnirs_od'} + nirs_types = {'hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_od'} is_nirs = len(set.intersection(types_used, nirs_types)) > 0 if is_meg: types_used = list(types_used)[::-1] # -> restore kwarg order diff --git a/mne/viz/topomap.py b/mne/viz/topomap.py index 8622d7169f4..3b7399210df 100644 --- a/mne/viz/topomap.py +++ b/mne/viz/topomap.py @@ -37,8 +37,7 @@ from ..io.proj import Projection -_fnirs_types = ('hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_fd_phase', - 'fnirs_od') +_fnirs_types = ('hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_od') def _adjust_meg_sphere(sphere, info, ch_type): diff --git a/mne/viz/utils.py b/mne/viz/utils.py index 1a66d5f3d67..85479ae9f34 100644 --- a/mne/viz/utils.py +++ b/mne/viz/utils.py @@ -3063,8 +3063,7 @@ def _set_psd_plot_params(info, proj, picks, ax, area_mode): kwargs = dict(meg=False, ref_meg=False, exclude=[]) if name in ('mag', 'grad'): kwargs['meg'] = name - elif name in ('fnirs_cw_amplitude', 'fnirs_fd_phase', - 'fnirs_od', 'hbo', 'hbr'): + elif name in ('fnirs_cw_amplitude', 'fnirs_od', 'hbo', 'hbr'): kwargs['fnirs'] = name else: kwargs[name] = True @@ -3233,7 +3232,7 @@ def _plot_psd(inst, fig, freqs, psd_list, picks_list, titles_list, valid_channel_types = [ 'mag', 'grad', 'eeg', 'csd', 'seeg', 'eog', 'ecg', 'emg', 'dipole', 'gof', 'bio', 'ecog', 'hbo', - 'hbr', 'misc', 'fnirs_cw_amplitude', 'fnirs_fd_phase', 'fnirs_od'] + 'hbr', 'misc', 'fnirs_cw_amplitude', 'fnirs_od'] ch_types_used = list() for this_type in valid_channel_types: if this_type in types: diff --git a/tutorials/io/plot_30_reading_fnirs_data.py b/tutorials/io/plot_30_reading_fnirs_data.py index 0b6d603adfd..2cc8b9af4f7 100644 --- a/tutorials/io/plot_30_reading_fnirs_data.py +++ b/tutorials/io/plot_30_reading_fnirs_data.py @@ -43,10 +43,11 @@ ================================ BOXY recordings can be read in using :func:`mne.io.read_raw_boxy`. -The BOXY software and Imagent devices store data in a single .txt file -containing DC, AC, and Phase information for each source and detector -combination. These raw data files can be saved as parsed or unparsed .txt -files, which affects how the data in the file is organised. +The BOXY software and ISS Imagent I and II devices store data in a single .txt +file containing DC (overall background light intensity), +AC (modulated light intensity), and Phase information for each source and +detector combination. These raw data files can be saved as parsed or unparsed +.txt files, which affects how the data in the file is organised. MNE will read either file type and extract the raw DC, AC, and Phase data. If triggers are sent using the 'digaux' port of the recording hardware, MNE will also read the 'digaux' data and create annotations for any triggers. From a402c15b8ea3eb5fb1518d2816bf465ca2f0eb1f Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Mon, 31 Aug 2020 16:00:58 -0600 Subject: [PATCH 145/167] removed reference to fnirs_fd_phase --- mne/cov.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mne/cov.py b/mne/cov.py index cde84a271c8..2a4f72b0df9 100644 --- a/mne/cov.py +++ b/mne/cov.py @@ -1472,8 +1472,8 @@ def _smart_eigh(C, info, rank, scalings=None, projs=None, @verbose def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads', proj=True, seeg=0.1, ecog=0.1, hbo=0.1, hbr=0.1, - fnirs_cw_amplitude=0.1, fnirs_fd_phase=0.1, fnirs_od=0.1, - csd=0.1, rank=None, scalings=None, verbose=None): + fnirs_cw_amplitude=0.1, fnirs_od=0.1, csd=0.1, + rank=None, scalings=None, verbose=None): """Regularize noise covariance matrix. This method works by adding a constant to the diagonal for each From a02c20cab3f303ca7d7654224df7d0d36117df6f Mon Sep 17 00:00:00 2001 From: Jonathan Kuziek Date: Mon, 31 Aug 2020 16:04:12 -0600 Subject: [PATCH 146/167] removed reference to degrees for fnirs_fd_phase --- mne/tests/test_defaults.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mne/tests/test_defaults.py b/mne/tests/test_defaults.py index fc603a20dd7..aa3eee4af7c 100644 --- a/mne/tests/test_defaults.py +++ b/mne/tests/test_defaults.py @@ -39,7 +39,7 @@ def test_si_units(): 'n': 1e-9, 'f': 1e-15, } - known_SI = {'V', 'T', 'Am', 'm', 'M', u'\N{DEGREE SIGN}', + known_SI = {'V', 'T', 'Am', 'm', 'M', 'AU', 'GOF'} # not really SI but we tolerate them powers = '²' From 32dfa3ad92f58334b9eb89df0692d7bdd0873900 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Mon, 16 Nov 2020 17:16:11 -0700 Subject: [PATCH 147/167] added line to return channel type --- mne/io/boxy/boxy.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 9bfc36c8f14..b7d86e421b7 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -131,8 +131,16 @@ def __init__(self, fname, preload=False, verbose=None): boxy_labels.append('S' + str(src_num + 1) + '_D' + str(det_num + 1) + ' ' + i_type) + # Determine channel types. + if datatype == 'Ph': + chan_type = 'fnirs_fd_phase' + else: + chan_type = 'fnirs_cw_amplitude' + + ch_types = ([chan_type for i_chan in boxy_labels]) + # Create info structure. - info = create_info(boxy_labels, srate) + info = create_info(boxy_labels, srate, ch_types=ch_types) raw_extras = {'source_num': source_num, 'detect_num': detect_num, From e91a2ea111c052ea5e0b93a5fb9b24377dea1aac Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Tue, 17 Nov 2020 18:17:38 -0700 Subject: [PATCH 148/167] working through review --- mne/io/boxy/boxy.py | 4 +++- mne/io/boxy/tests/test_boxy.py | 15 ++++++++------- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index b7d86e421b7..237aed76c7c 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -134,8 +134,10 @@ def __init__(self, fname, preload=False, verbose=None): # Determine channel types. if datatype == 'Ph': chan_type = 'fnirs_fd_phase' + elif datatype == 'DC': + chan_type = 'fnirs_fd_dc_amplitude' else: - chan_type = 'fnirs_cw_amplitude' + chan_type = 'fnirs_fd_ac_amplitude' ch_types = ([chan_type for i_chan in boxy_labels]) diff --git a/mne/io/boxy/tests/test_boxy.py b/mne/io/boxy/tests/test_boxy.py index 93ce7a7f682..d03dc33c982 100644 --- a/mne/io/boxy/tests/test_boxy.py +++ b/mne/io/boxy/tests/test_boxy.py @@ -27,15 +27,16 @@ def test_boxy_load(): # Test sampling rate. assert boxy_data.info['sfreq'] == 62.5 + + #Test the returned types + assert 'fnirs_fd_phase' in raw + assert 'fnirs_fd_dc_amplitude' in raw + assert 'fnirs_fd_ac_amplitude' in raw # Grab our different data types. - chans_dc = np.arange(0, 80) * 3 + 0 - chans_ac = np.arange(0, 80) * 3 + 1 - chans_ph = np.arange(0, 80) * 3 + 2 - - mne_dc = boxy_data.copy().pick(chans_dc) - mne_ac = boxy_data.copy().pick(chans_ac) - mne_ph = boxy_data.copy().pick(chans_ph) + mne_ph = boxy_data.copy().pick(picks='fnirs_fd_ph') + mne_dc = boxy_data.copy().pick(picks='fnirs_fd_dc_amplitude') + mne_ac = boxy_data.copy().pick(picks='fnirs_fd_ac_amplitude') # Check channel names. first_chans = ['S1_D1', 'S2_D1', 'S3_D1', 'S4_D1', 'S5_D1', From c51bb0d05b6442d0abc1e637ef714a7ab42eeadc Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Tue, 17 Nov 2020 19:05:38 -0700 Subject: [PATCH 149/167] fixed indent bug --- mne/io/boxy/boxy.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 237aed76c7c..2e3e24e9698 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -130,16 +130,15 @@ def __init__(self, fname, preload=False, verbose=None): for i_type in ['DC', 'AC', 'Ph']: boxy_labels.append('S' + str(src_num + 1) + '_D' + str(det_num + 1) + ' ' + i_type) - - # Determine channel types. - if datatype == 'Ph': - chan_type = 'fnirs_fd_phase' - elif datatype == 'DC': - chan_type = 'fnirs_fd_dc_amplitude' - else: - chan_type = 'fnirs_fd_ac_amplitude' - - ch_types = ([chan_type for i_chan in boxy_labels]) + # Determine channel types. + if i_type == 'Ph': + chan_type = 'fnirs_fd_phase' + elif i_type == 'DC': + chan_type = 'fnirs_fd_dc_amplitude' + else: + chan_type = 'fnirs_fd_ac_amplitude' + + ch_types = ([chan_type for i_chan in boxy_labels]) # Create info structure. info = create_info(boxy_labels, srate, ch_types=ch_types) From 7b0468f805c027818e864b92137664364d2a9a1b Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Wed, 18 Nov 2020 04:59:31 -0500 Subject: [PATCH 150/167] MRG, MAINT: Test 3.9 (#8533) * MAINT: Test 3.9 * FIX: No dipy or statsmodels yet * FIX: Deps * FIX: Try again * FIX: GLIBC_2.29 * FIX: Missing * FIX: sys_info * FIX: Comments * FIX: No windows --- .../{main.yml => circle_artifacts.yml} | 0 .github/workflows/linux_conda.yml | 4 +--- .github/workflows/linux_pip.yml | 10 ++++----- README.rst | 9 +++++--- mne/preprocessing/tests/test_xdawn.py | 12 +++++------ mne/utils/config.py | 2 +- mne/viz/backends/_pyvista.py | 2 +- tools/github_actions_dependencies.sh | 21 ++++++++++++------- tools/setup_xvfb.sh | 4 ++++ 9 files changed, 36 insertions(+), 28 deletions(-) rename .github/workflows/{main.yml => circle_artifacts.yml} (100%) create mode 100755 tools/setup_xvfb.sh diff --git a/.github/workflows/main.yml b/.github/workflows/circle_artifacts.yml similarity index 100% rename from .github/workflows/main.yml rename to .github/workflows/circle_artifacts.yml diff --git a/.github/workflows/linux_conda.yml b/.github/workflows/linux_conda.yml index 3123aa67d8c..28132a94e83 100644 --- a/.github/workflows/linux_conda.yml +++ b/.github/workflows/linux_conda.yml @@ -27,9 +27,7 @@ jobs: - uses: actions/checkout@v2 with: fetch-depth: 0 - - run: | - sudo apt-get install -y libxkbcommon-x11-0 libxcb-icccm4 libxcb-image0 libxcb-keysyms1 libxcb-randr0 libxcb-render-util0 libxcb-xinerama0 libxcb-xfixes0 libopengl0 - /sbin/start-stop-daemon --start --quiet --pidfile /tmp/custom_xvfb_99.pid --make-pidfile --background --exec /usr/bin/Xvfb -- :99 -screen 0 1400x900x24 -ac +extension GLX +render -noreset; + - run: ./tools/setup_xvfb.sh name: 'Setup xvfb' - uses: conda-incubator/setup-miniconda@v2 with: diff --git a/.github/workflows/linux_pip.yml b/.github/workflows/linux_pip.yml index f35345c5a02..b0b5f890c38 100644 --- a/.github/workflows/linux_pip.yml +++ b/.github/workflows/linux_pip.yml @@ -11,8 +11,8 @@ jobs: # PIP + non-default stim channel + log level info job: if: "github.repository == 'mne-tools/mne-python' && !contains(github.event.head_commit.message, '[ci skip]') && !contains(github.event.head_commit.message, '[skip ci]') && !contains(github.event.head_commit.message, '[skip github]')" - name: 'py3.8' - runs-on: ubuntu-latest + name: 'py3.9' + runs-on: ubuntu-18.04 # same as ubuntu-latest, but more precise name defaults: run: shell: bash @@ -22,14 +22,12 @@ jobs: MNE_STIM_CHANNEL: 'STI101' OPENBLAS_NUM_THREADS: '1' PYTHONUNBUFFERED: '1' - PYTHON_VERSION: '3.8' + PYTHON_VERSION: '3.9' steps: - uses: actions/checkout@v2 with: fetch-depth: 0 - - run: | - sudo apt-get install -y libxkbcommon-x11-0 libxcb-icccm4 libxcb-image0 libxcb-keysyms1 libxcb-randr0 libxcb-render-util0 libxcb-xinerama0 libxcb-xfixes0 libopengl0 - /sbin/start-stop-daemon --start --quiet --pidfile /tmp/custom_xvfb_99.pid --make-pidfile --background --exec /usr/bin/Xvfb -- :99 -screen 0 1400x900x24 -ac +extension GLX +render -noreset; + - run: ./tools/setup_xvfb.sh name: 'Setup xvfb' - uses: actions/setup-python@v2 with: diff --git a/README.rst b/README.rst index 537342019a2..ee494dcbb34 100644 --- a/README.rst +++ b/README.rst @@ -1,11 +1,14 @@ .. -*- mode: rst -*- -|Travis|_ |Azure|_ |Circle|_ |Codecov|_ |PyPI|_ |conda-forge|_ |Zenodo|_ +|GH-Linux|_ |GH-macOS|_ |Azure|_ |Circle|_ |Codecov|_ |PyPI|_ |conda-forge|_ |Zenodo|_ |MNE|_ -.. |Travis| image:: https://api.travis-ci.org/mne-tools/mne-python.svg?branch=master -.. _Travis: https://travis-ci.org/mne-tools/mne-python/branches +.. |GH-Linux| image:: https://github.com/mne-tools/mne-python/workflows/linux%20/%20conda/badge.svg?branch=master +.. _GH-Linux: https://github.com/mne-tools/mne-python/actions?query=branch:master+event:push + +.. |GH-macOS| image:: https://github.com/mne-tools/mne-python/workflows/macos%20/%20conda/badge.svg?branch=master +.. _GH-macOS: https://github.com/mne-tools/mne-python/actions?query=branch:master+event:push .. |Azure| image:: https://dev.azure.com/mne-tools/mne-python/_apis/build/status/mne-tools.mne-python?branchName=master .. _Azure: https://dev.azure.com/mne-tools/mne-python/_build/latest?definitionId=1&branchName=master diff --git a/mne/preprocessing/tests/test_xdawn.py b/mne/preprocessing/tests/test_xdawn.py index e0c905547d2..9f7d9a05d4f 100644 --- a/mne/preprocessing/tests/test_xdawn.py +++ b/mne/preprocessing/tests/test_xdawn.py @@ -15,7 +15,7 @@ create_info, EpochsArray) from mne.decoding import Vectorizer from mne.io import read_raw_fif -from mne.utils import requires_sklearn, check_version +from mne.utils import requires_sklearn from mne.preprocessing.xdawn import Xdawn, _XdawnTransformer base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data') @@ -193,12 +193,10 @@ def test_xdawn_regularization(): xd.fit(epochs) xd = Xdawn(correct_overlap=False, reg='diagonal_fixed') xd.fit(epochs) - bad_eig = check_version('numpy', '1.16.5') # some problem with newer NumPy - if bad_eig: - pytest.skip('Unknown MKL+Windows error fails for eig check') - xd = Xdawn(correct_overlap=False, reg=None) - with pytest.raises(ValueError, match='Could not compute eigenvalues'): - xd.fit(epochs) + # XXX in principle this should maybe raise an error due to deficiency? + # xd = Xdawn(correct_overlap=False, reg=None) + # with pytest.raises(ValueError, match='Could not compute eigenvalues'): + # xd.fit(epochs) @requires_sklearn diff --git a/mne/utils/config.py b/mne/utils/config.py index a0168d92098..6a537ab668e 100644 --- a/mne/utils/config.py +++ b/mne/utils/config.py @@ -562,7 +562,7 @@ def sys_info(fid=None, show_paths=False): elif mod_name in ('mayavi', 'vtk'): has_3d = True if mod_name == 'vtk': - version = mod.VTK_VERSION + version = getattr(mod, 'VTK_VERSION', 'VTK_VERSION missing') elif mod_name == 'PyQt5': version = _check_pyqt5_version() else: diff --git a/mne/viz/backends/_pyvista.py b/mne/viz/backends/_pyvista.py index 4b8fa1ae041..e55c620f680 100644 --- a/mne/viz/backends/_pyvista.py +++ b/mne/viz/backends/_pyvista.py @@ -37,7 +37,7 @@ from pyvista import BackgroundPlotter from pyvista.utilities import try_callback from pyvista.plotting.plotting import _ALL_PLOTTERS -VTK9 = LooseVersion(vtk.VTK_VERSION) >= LooseVersion('9.0') +VTK9 = LooseVersion(getattr(vtk, 'VTK_VERSION', '9.0')) >= LooseVersion('9.0') _FIGURES = dict() diff --git a/tools/github_actions_dependencies.sh b/tools/github_actions_dependencies.sh index 407c4a29730..2f537f5abb2 100755 --- a/tools/github_actions_dependencies.sh +++ b/tools/github_actions_dependencies.sh @@ -4,15 +4,22 @@ if [ ! -z "$CONDA_ENV" ]; then pip uninstall -yq mne elif [ ! -z "$CONDA_DEPENDENCIES" ]; then conda install -y $CONDA_DEPENDENCIES -else # pip - python -m pip install --upgrade pip setuptools wheel +else # pip 3.9 (missing statsmodels and dipy) + python -m pip install --progress-bar off --upgrade pip setuptools wheel pip uninstall -yq numpy - pip install -i "https://pypi.anaconda.org/scipy-wheels-nightly/simple" --pre "numpy!=1.20.0.dev0+20201111233731.0ffaaf8,!=1.20.0.dev0+20201111232921.0ffaaf8" - pip install -f "https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com" scipy pandas scikit-learn matplotlib h5py Pillow - pip install https://github.com/pyvista/pyvista/zipball/master - pip install https://github.com/pyvista/pyvistaqt/zipball/master + pip install --progress-bar off --upgrade --pre --only-binary ":all:" python-dateutil pytz joblib threadpoolctl + pip install --progress-bar off --upgrade --pre --only-binary ":all:" -i "https://pypi.anaconda.org/scipy-wheels-nightly/simple" numpy scipy pandas scikit-learn + pip install --progress-bar off --upgrade --pre --only-binary ":all:" -f "https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com" matplotlib + # built using vtk master branch on an Ubuntu 18.04.5 VM and uploaded to OSF: + wget -q https://osf.io/kej3v/download -O vtk-9.0.20201117-cp39-cp39-linux_x86_64.whl + pip install vtk-9.0.20201117-cp39-cp39-linux_x86_64.whl + pip install --progress-bar off https://github.com/pyvista/pyvista/zipball/master + pip install --progress-bar off https://github.com/pyvista/pyvistaqt/zipball/master + pip install --progress-bar off --upgrade --pre PyQt5 + python -c "import vtk" + python -c "import pyvistaqt" fi -pip install --upgrade -r requirements_testing.txt +pip install --progress-bar off --upgrade -r requirements_testing.txt if [ "${DEPS}" != "minimal" ]; then pip install nitime fi diff --git a/tools/setup_xvfb.sh b/tools/setup_xvfb.sh new file mode 100755 index 00000000000..b7cac5ea025 --- /dev/null +++ b/tools/setup_xvfb.sh @@ -0,0 +1,4 @@ +#!/bin/bash -ef + +sudo apt-get install -yq libxkbcommon-x11-0 libxcb-icccm4 libxcb-image0 libxcb-keysyms1 libxcb-randr0 libxcb-render-util0 libxcb-xinerama0 libxcb-xfixes0 libopengl0 +/sbin/start-stop-daemon --start --quiet --pidfile /tmp/custom_xvfb_99.pid --make-pidfile --background --exec /usr/bin/Xvfb -- :99 -screen 0 1400x900x24 -ac +extension GLX +render -noreset From 5305feef94a1b8df9355900cb5281f90eeb762f4 Mon Sep 17 00:00:00 2001 From: Daniel McCloy Date: Wed, 18 Nov 2020 08:19:45 -0600 Subject: [PATCH 151/167] MRG, VIZ, FIX: plot_sensors title and interactivity (#8536) * fix: fig.suptitle() -> ax.set(title) * fix: only update the axes that were clicked in --- mne/viz/utils.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mne/viz/utils.py b/mne/viz/utils.py index 18fc54763a0..6cf9c4feefc 100644 --- a/mne/viz/utils.py +++ b/mne/viz/utils.py @@ -964,6 +964,9 @@ def plot_sensors(info, kind='topomap', ch_type=None, title=None, def _onpick_sensor(event, fig, ax, pos, ch_names, show_names): """Pick a channel in plot_sensors.""" + if event.mouseevent.inaxes != ax: + return + if event.mouseevent.key == 'control' and fig.lasso is not None: for ind in event.ind: fig.lasso.select_one(ind) @@ -1064,7 +1067,7 @@ def _plot_sensors(pos, info, picks, colors, bads, ch_names, title, show_names, ch_names=ch_names, show_names=show_names) fig.canvas.mpl_connect('pick_event', picker) - fig.suptitle(title) + ax.set(title=title) closed = partial(_close_event, fig=fig) fig.canvas.mpl_connect('close_event', closed) plt_show(show, block=block) From d120a32dca2a1dc66e2827a1586d86ab3212d94e Mon Sep 17 00:00:00 2001 From: Daniel McCloy Date: Wed, 18 Nov 2020 11:40:50 -0600 Subject: [PATCH 152/167] fix example (#8539) --- examples/visualization/plot_eeglab_head_sphere.py | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/visualization/plot_eeglab_head_sphere.py b/examples/visualization/plot_eeglab_head_sphere.py index e1b2896eb15..488c14edd57 100644 --- a/examples/visualization/plot_eeglab_head_sphere.py +++ b/examples/visualization/plot_eeglab_head_sphere.py @@ -99,7 +99,6 @@ fake_evoked.plot_sensors(sphere=(x, y, z, radius), axes=ax[1], show=False) # add titles -fig.texts[0].remove() ax[0].set_title('MNE channel projection', fontweight='bold') ax[1].set_title('EEGLAB channel projection', fontweight='bold') From b6bd62e0d511edb6b9973951d5c94c705f372702 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Wed, 18 Nov 2020 11:46:17 -0700 Subject: [PATCH 153/167] adding new channel info throughout codebase --- mne/channels/channels.py | 19 ++++++++++++-- mne/channels/layout.py | 4 ++- mne/cov.py | 22 +++++++++++++--- mne/defaults.py | 16 +++++++++--- mne/evoked.py | 14 ++++++++--- mne/io/meas_info.py | 8 ++++++ mne/io/pick.py | 54 ++++++++++++++++++++++++++++++++++------ 7 files changed, 117 insertions(+), 20 deletions(-) diff --git a/mne/channels/channels.py b/mne/channels/channels.py index a4ab98e03ee..96ef36ad0f7 100644 --- a/mne/channels/channels.py +++ b/mne/channels/channels.py @@ -80,7 +80,9 @@ def _get_ch_type(inst, ch_type, allow_ref_meg=False): """ if ch_type is None: allowed_types = ['mag', 'grad', 'planar1', 'planar2', 'eeg', 'csd', - 'fnirs_cw_amplitude', 'fnirs_od', 'hbo', 'hbr', + 'fnirs_cw_amplitude', 'fnirs_fd_dc_amplitude', + 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', 'fnirs_od', + 'hbo', 'hbr', 'ecog', 'seeg'] allowed_types += ['ref_meg'] if allow_ref_meg else [] for type_ in allowed_types: @@ -290,6 +292,9 @@ def get_montage(self): 'bio': FIFF.FIFFV_BIO_CH, 'ecog': FIFF.FIFFV_ECOG_CH, 'fnirs_cw_amplitude': FIFF.FIFFV_FNIRS_CH, + 'fnirs_fd_dc_amplitude': FIFF.FIFFV_FNIRS_CH, + 'fnirs_fd_ac_amplitude': FIFF.FIFFV_FNIRS_CH, + 'fnirs_fd_phase': FIFF.FIFFV_FNIRS_CH, 'fnirs_od': FIFF.FIFFV_FNIRS_CH, 'hbo': FIFF.FIFFV_FNIRS_CH, 'hbr': FIFF.FIFFV_FNIRS_CH} @@ -307,6 +312,9 @@ def get_montage(self): 'bio': FIFF.FIFF_UNIT_V, 'ecog': FIFF.FIFF_UNIT_V, 'fnirs_cw_amplitude': FIFF.FIFF_UNIT_V, + 'fnirs_fd_dc_amplitude': FIFF.FIFF_UNIT_V, + 'fnirs_fd_ac_amplitude': FIFF.FIFF_UNIT_V, + 'fnirs_fd_phase': FIFF.FIFF_UNIT_V, 'fnirs_od': FIFF.FIFF_UNIT_NONE, 'hbo': FIFF.FIFF_UNIT_MOL, 'hbr': FIFF.FIFF_UNIT_MOL} @@ -440,7 +448,8 @@ def set_channel_types(self, mapping, verbose=None): The following sensor types are accepted: ecg, eeg, emg, eog, exci, ias, misc, resp, seeg, stim, syst, ecog, - hbo, hbr, fnirs_cw_amplitude, fnirs_od + hbo, hbr, fnirs_cw_amplitude, fnirs_fd_dc_amplitude, + fnirs_fd_ac_amplitude, fnirs_fd_phase, fnirs_od .. versionadded:: 0.9.0 """ @@ -482,6 +491,12 @@ def set_channel_types(self, mapping, verbose=None): coil_type = FIFF.FIFFV_COIL_FNIRS_HBR elif ch_type == 'fnirs_cw_amplitude': coil_type = FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE + elif ch_type == 'fnirs_fd_dc_amplitude': + coil_type = FIFF.FIFFV_COIL_FNIRS_FD_DC_AMPLITUDE + elif ch_type == 'fnirs_fd_ac_amplitude': + coil_type = FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE + elif ch_type == 'fnirs_fd_phase': + coil_type = FIFF.FIFFV_COIL_FNIRS_FD_PHASE elif ch_type == 'fnirs_od': coil_type = FIFF.FIFFV_COIL_FNIRS_OD else: diff --git a/mne/channels/layout.py b/mne/channels/layout.py index a69d9e5a586..a3fc1a51021 100644 --- a/mne/channels/layout.py +++ b/mne/channels/layout.py @@ -917,7 +917,9 @@ def _merge_ch_data(data, ch_type, names, method='rms'): if ch_type == 'grad': data = _merge_grad_data(data, method) else: - assert ch_type in ('hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_od') + assert ch_type in ('hbo', 'hbr', 'fnirs_cw_amplitude', + 'fnirs_fd_dc_amplitude', 'fnirs_fd_ac_amplitude', + 'fnirs_fd_phase', 'fnirs_od') data, names = _merge_nirs_data(data, names) return data, names diff --git a/mne/cov.py b/mne/cov.py index 2b96cd40b1f..0e75975b57d 100644 --- a/mne/cov.py +++ b/mne/cov.py @@ -1253,7 +1253,9 @@ class _RegCovariance(BaseEstimator): """Aux class.""" def __init__(self, info, grad=0.1, mag=0.1, eeg=0.1, seeg=0.1, ecog=0.1, - hbo=0.1, hbr=0.1, fnirs_cw_amplitude=0.1, fnirs_od=0.1, + hbo=0.1, hbr=0.1, fnirs_cw_amplitude=0.1, + fnirs_fd_dc_amplitude=0.1, fnirs_fd_ac_amplitude=0.1, + fnirs_fd_phase=0.1, fnirs_od=0.1, csd=0.1, store_precision=False, assume_centered=False): self.info = info # For sklearn compat, these cannot (easily?) be combined into @@ -1266,6 +1268,9 @@ def __init__(self, info, grad=0.1, mag=0.1, eeg=0.1, seeg=0.1, ecog=0.1, self.hbo = hbo self.hbr = hbr self.fnirs_cw_amplitude = fnirs_cw_amplitude + self.fnirs_fd_dc_amplitude = fnirs_fd_dc_amplitude + self.fnirs_fd_ac_amplitude = fnirs_ac_ac_amplitude + self.fnirs_fd_phase = fnirs_fd_phase self.fnirs_od = fnirs_od self.csd = csd self.store_precision = store_precision @@ -1545,7 +1550,9 @@ def _smart_eigh(C, info, rank, scalings=None, projs=None, @verbose def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads', proj=True, seeg=0.1, ecog=0.1, hbo=0.1, hbr=0.1, - fnirs_cw_amplitude=0.1, fnirs_od=0.1, csd=0.1, + fnirs_cw_amplitude=0.1, fnirs_fd_dc_amplitude=0.1, + fnirs_fd_ac_amplitude=0.1, fnirs_fd_phase=0.1, + fnirs_od=0.1, csd=0.1, rank=None, scalings=None, verbose=None): """Regularize noise covariance matrix. @@ -1587,7 +1594,13 @@ def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads', hbr : float (default 0.1) Regularization factor for HBR signals. fnirs_cw_amplitude : float (default 0.1) - Regularization factor for fNIRS raw signals. + Regularization factor for fNIRS CW raw signals. + fnirs_fd_dc_amplitude : float (default 0.1) + Regularization factor for fNIRS FD DC raw signals. + fnirs_fd_ac_amplitude : float (default 0.1) + Regularization factor for fNIRS FD AC raw signals. + fnirs_fd_phase : float (default 0.1) + Regularization factor for fNIRS raw phase signals. fnirs_od : float (default 0.1) Regularization factor for fNIRS optical density signals. csd : float (default 0.1) @@ -1619,6 +1632,9 @@ def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads', scalings = _handle_default('scalings_cov_rank', scalings) regs = dict(eeg=eeg, seeg=seeg, ecog=ecog, hbo=hbo, hbr=hbr, fnirs_cw_amplitude=fnirs_cw_amplitude, + fnirs_fd_dc_amplitude=fnirs_fd_dc_amplitude, + fnirs_fd_ac_amplitude=fnirs_fd_ac_amplitude, + fnirs_fd_phase=fnirs_fd_phase, fnirs_od=fnirs_od, csd=csd) if exclude is None: diff --git a/mne/defaults.py b/mne/defaults.py index 82b9d4a58bd..5067150f038 100644 --- a/mne/defaults.py +++ b/mne/defaults.py @@ -11,22 +11,29 @@ ref_meg='steelblue', misc='k', stim='k', resp='k', chpi='k', exci='k', ias='k', syst='k', seeg='saddlebrown', dipole='k', gof='k', bio='k', ecog='k', hbo='#AA3377', hbr='b', - fnirs_cw_amplitude='k', fnirs_od='k', csd='k'), + fnirs_cw_amplitude='k', fnirs_fd_dc_amplitude='k', + fnirs_fd_ac_amplitude='k', fnirs_fd_phase='k', + fnirs_od='k', csd='k'), units=dict(mag='fT', grad='fT/cm', eeg='µV', eog='µV', ecg='µV', emg='µV', misc='AU', seeg='mV', dipole='nAm', gof='GOF', bio='µV', ecog='µV', hbo='µM', hbr='µM', ref_meg='fT', - fnirs_cw_amplitude='V', fnirs_od='V', csd='mV/m²'), + fnirs_cw_amplitude='V', fnirs_fd_dc_amplitude='V', + fnirs_fd_ac_amplitude='V', fnirs_fd_phase='V', + fnirs_od='V', csd='mV/m²'), # scalings for the units scalings=dict(mag=1e15, grad=1e13, eeg=1e6, eog=1e6, emg=1e6, ecg=1e6, misc=1.0, seeg=1e3, dipole=1e9, gof=1.0, bio=1e6, ecog=1e6, hbo=1e6, hbr=1e6, ref_meg=1e15, fnirs_cw_amplitude=1.0, - fnirs_od=1.0, csd=1e3), + fnirs_fd_dc_amplitude=1.0, fnirs_fd_ac_amplitude=1.0, + fnirs_fd_phase=1.0, fnirs_od=1.0, csd=1e3), # rough guess for a good plot scalings_plot_raw=dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4, emg=1e-3, ref_meg=1e-12, misc='auto', stim=1, resp=1, chpi=1e-4, exci=1, ias=1, syst=1, seeg=1e-4, bio=1e-6, ecog=1e-4, hbo=10e-6, hbr=10e-6, whitened=10., fnirs_cw_amplitude=2e-2, + fnirs_fd_dc_amplitude=2e-2, + fnirs_fd_ac_amplitude=2e-2, fnirs_fd_phase=2e-2, fnirs_od=2e-2, csd=200e-4), scalings_cov_rank=dict(mag=1e12, grad=1e11, eeg=1e5, # ~100x scalings seeg=1e1, ecog=1e4, hbo=1e4, hbr=1e4), @@ -39,6 +46,9 @@ dipole='Dipole', ecog='ECoG', hbo='Oxyhemoglobin', ref_meg='Reference Magnetometers', fnirs_cw_amplitude='fNIRS (CW amplitude)', + fnirs_fd_dc_amplitude='fNIRS (FD DC amplitude)', + fnirs_fd_ac_amplitude='fNIRS (FD AC amplitude)', + fnirs_fd_phase='fNIRS (FD phase)', fnirs_od='fNIRS (OD)', hbr='Deoxyhemoglobin', gof='Goodness of fit', csd='Current source density'), mask_params=dict(marker='o', diff --git a/mne/evoked.py b/mne/evoked.py index d98743a1f44..701e38d7fbd 100644 --- a/mne/evoked.py +++ b/mne/evoked.py @@ -392,9 +392,11 @@ def animate_topomap(self, ch_type=None, times=None, frame_rate=None, ---------- ch_type : str | None Channel type to plot. Accepted data types: 'mag', 'grad', 'eeg', - 'hbo', 'hbr', 'fnirs_od, and 'fnirs_cw_amplitude'. + 'hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_fd_dc_amplitude', + 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', and 'fnirs_od'. If None, first available channel type from ('mag', 'grad', 'eeg', - 'hbo', 'hbr', 'fnirs_od, 'fnirs_cw_amplitude') is used. + 'hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_fd_dc_amplitude', + 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', and 'fnirs_od') is used. Defaults to None. times : array of float | None The time points to plot. If None, 10 evenly spaced samples are @@ -559,7 +561,9 @@ def get_peak(self, ch_type=None, tmin=None, tmax=None, .. versionadded:: 0.16 """ # noqa: E501 supported = ('mag', 'grad', 'eeg', 'seeg', 'ecog', 'misc', 'hbo', - 'hbr', 'None', 'fnirs_cw_amplitude', 'fnirs_od') + 'hbr', 'None', 'fnirs_cw_amplitude', + 'fnirs_fd_dc_amplitude', 'fnirs_fd_ac_amplitude', + 'fnirs_fd_phase', 'fnirs_od') types_used = self.get_channel_types(unique=True, only_data_chs=True) _check_option('ch_type', str(ch_type), supported) @@ -592,7 +596,9 @@ def get_peak(self, ch_type=None, tmin=None, tmax=None, seeg = True elif ch_type == 'ecog': ecog = True - elif ch_type in ('hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_od'): + elif ch_type in ('hbo', 'hbr', 'fnirs_cw_amplitude', + 'fnirs_fd_dc_amplitude', 'fnirs_fd_ac_amplitude', + 'fnirs_fd_phase', 'fnirs_od'): fnirs = ch_type if ch_type is not None: diff --git a/mne/io/meas_info.py b/mne/io/meas_info.py index 3e676afdd55..bbc4d7e5ce1 100644 --- a/mne/io/meas_info.py +++ b/mne/io/meas_info.py @@ -56,6 +56,14 @@ ecog=(FIFF.FIFFV_ECOG_CH, FIFF.FIFFV_COIL_EEG, FIFF.FIFF_UNIT_V), fnirs_cw_amplitude=(FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE, FIFF.FIFF_UNIT_V), + fnirs_fd_dc_amplitude=(FIFF.FIFFV_FNIRS_CH, + FIFF.FIFFV_COIL_FNIRS_FD_DC_AMPLITUDE, + FIFF.FIFFV_UNIT_V), + fnirs_fd_ac_amplitude=(FIFF.FIFFV_FNIRS_CH, + FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE, + FIFF.FIFFV_UNIT_V), + fnirs_fd_phase=(FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_COIL_FNIRS_FD_PHASE, + FIFF.FIFFV_UNIT_V), fnirs_od=(FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_COIL_FNIRS_OD, FIFF.FIFF_UNIT_NONE), hbo=(FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_COIL_FNIRS_HBO, FIFF.FIFF_UNIT_MOL), diff --git a/mne/io/pick.py b/mne/io/pick.py index b00edcee4fa..369aa907e76 100644 --- a/mne/io/pick.py +++ b/mne/io/pick.py @@ -52,6 +52,15 @@ def get_channel_type_constants(): fnirs_cw_amplitude=dict( kind=FIFF.FIFFV_FNIRS_CH, coil_type=FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE), + fnirs_fd_dc_amplitude=dict( + kind=FIFF.FIFFV_FNIRS_CH, + coil_type=FIFF.FIFFV_COIL_FNIRS_FD_DC_AMPLITUDE), + fnirs_fd_ac_amplitude=dict( + kind=FIFF.FIFFV_FNIRS_CH, + coil_type=FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE), + fnirs_fd_phase=dict( + kind=FIFF.FIFFV_FNIRS_CH, + coil_type=FIFF.FIFFV_COIL_FNIRS_FD_PHASE), fnirs_od=dict(kind=FIFF.FIFFV_FNIRS_CH, coil_type=FIFF.FIFFV_COIL_FNIRS_OD), hbo=dict(kind=FIFF.FIFFV_FNIRS_CH, @@ -100,6 +109,12 @@ def get_channel_type_constants(): FIFF.FIFFV_COIL_FNIRS_HBR: 'hbr', FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE: 'fnirs_cw_amplitude', + FIFF.FIFFV_COIL_FNIRS_FD_DC_AMPLITUDE: + 'fnirs_fd_dc_amplitude', + FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE: + 'fnirs_fd_ac_amplitude', + FIFF.FIFFV_COIL_FNIRS_FD_PHASE: + 'fnirs_fd_phase', FIFF.FIFFV_COIL_FNIRS_OD: 'fnirs_od', }), 'eeg': ('coil_type', {FIFF.FIFFV_COIL_EEG: 'eeg', @@ -272,6 +287,15 @@ def _triage_fnirs_pick(ch, fnirs, warned): elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE and \ fnirs == 'fnirs_cw_amplitude': return True + elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_FD_DC_AMPLITUDE and \ + fnirs == 'fnirs_fd_dc_amplitude': + return True + elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE and \ + fnirs == 'fnirs_fd_ac_amplitude': + return True + elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_PHASE and \ + fnirs == 'fnirs_fd_phase': + return True elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_OD and fnirs == 'fnirs_od': return True return False @@ -403,7 +427,9 @@ def pick_types(info, meg=False, eeg=False, stim=False, eog=False, ecg=False, for key in ('grad', 'mag'): param_dict[key] = meg if isinstance(fnirs, bool): - for key in ('hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_od'): + for key in ('hbo', 'hbr', 'fnirs_cw_amplitude', + 'fnirs_fd_dc_amplitude', 'fnirs_fd_ac_amplitude', + 'fnirs_fd_phase', 'fnirs_od'): param_dict[key] = fnirs warned = [False] for k in range(nchan): @@ -412,7 +438,9 @@ def pick_types(info, meg=False, eeg=False, stim=False, eog=False, ecg=False, pick[k] = param_dict[ch_type] except KeyError: # not so simple assert ch_type in ('grad', 'mag', 'hbo', 'hbr', 'ref_meg', - 'fnirs_cw_amplitude', 'fnirs_od') + 'fnirs_cw_amplitude', 'fnirs_fd_dc_amplitude', + 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', + 'fnirs_od') if ch_type in ('grad', 'mag'): pick[k] = _triage_meg_pick(info['chs'][k], meg) elif ch_type == 'ref_meg': @@ -703,7 +731,9 @@ def channel_indices_by_type(info, picks=None): idx_by_type = {key: list() for key in _PICK_TYPES_KEYS if key not in ('meg', 'fnirs')} idx_by_type.update(mag=list(), grad=list(), hbo=list(), hbr=list(), - fnirs_cw_amplitude=list(), fnirs_od=list()) + fnirs_cw_amplitude=list(), fnirs_fd_dc_amplitude=list(), + fnirs_fd_ac_amplitude=list(), fnirs_fd_phase=list(), + fnirs_od=list()) picks = _picks_to_idx(info, picks, none='all', exclude=(), allow_empty=True) for k in picks: @@ -792,7 +822,9 @@ def _contains_ch_type(info, ch_type): _validate_type(ch_type, 'str', "ch_type") meg_extras = ['mag', 'grad', 'planar1', 'planar2'] - fnirs_extras = ['hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_od'] + fnirs_extras = ['hbo', 'hbr', 'fnirs_cw_amplitude', + 'fnirs_fd_dc_amplitude', 'fnirs_fd_ac_amplitude', + 'fnirs_fd_phase', 'fnirs_od'] valid_channel_types = sorted([key for key in _PICK_TYPES_KEYS if key != 'meg'] + meg_extras + fnirs_extras) _check_option('ch_type', ch_type, valid_channel_types) @@ -897,20 +929,28 @@ def _check_excludes_includes(chs, info=None, allow_bads=False): seeg=True, dipole=False, gof=False, bio=False, ecog=True, fnirs=True) _PICK_TYPES_KEYS = tuple(list(_PICK_TYPES_DATA_DICT) + ['ref_meg']) _DATA_CH_TYPES_SPLIT = ('mag', 'grad', 'eeg', 'csd', 'seeg', 'ecog', - 'hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_od') + 'hbo', 'hbr', 'fnirs_cw_amplitude', + 'fnirs_fd_dc_amplitude', 'fnirs_fd_ac_amplitude', + 'fnirs_fd_phase', 'fnirs_od') _DATA_CH_TYPES_ORDER_DEFAULT = ('mag', 'grad', 'eeg', 'csd', 'eog', 'ecg', 'emg', 'ref_meg', 'misc', 'stim', 'resp', 'chpi', 'exci', 'ias', 'syst', 'seeg', 'bio', 'ecog', 'hbo', 'hbr', 'fnirs_cw_amplitude', + 'fnirs_fd_dc_amplitude', + 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', 'fnirs_od', 'whitened') # Valid data types, ordered for consistency, used in viz/evoked. _VALID_CHANNEL_TYPES = ('eeg', 'grad', 'mag', 'seeg', 'eog', 'ecg', 'emg', 'dipole', 'gof', 'bio', 'ecog', 'hbo', 'hbr', - 'fnirs_cw_amplitude', 'fnirs_od', 'misc', 'csd') + 'fnirs_cw_amplitude', 'fnirs_fd_dc_amplitude', + 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', + 'fnirs_od', 'misc', 'csd') _MEG_CH_TYPES_SPLIT = ('mag', 'grad', 'planar1', 'planar2') -_FNIRS_CH_TYPES_SPLIT = ('hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_od') +_FNIRS_CH_TYPES_SPLIT = ('hbo', 'hbr', 'fnirs_cw_amplitude', + 'fnirs_fd_dc_amplitude', 'fnirs_fd_ac_amplitude', + 'fnirs_fd_phase', 'fnirs_od') def _pick_data_channels(info, exclude='bads', with_ref_meg=True): From d8e8955888865022274067ae9dc27dc124ae0ec3 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Wed, 18 Nov 2020 11:50:13 -0700 Subject: [PATCH 154/167] fixing merging errors --- doc/changes/latest.inc | 4 +--- doc/conf.py | 4 ---- doc/python_reference.rst | 3 --- 3 files changed, 1 insertion(+), 10 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 05e150f52e8..4090e9f7201 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -155,7 +155,6 @@ API changes - Add ``group_by`` parameter to `mne.viz.plot_epochs` and `mne.Epochs.plot` to allow displaying channel data by sensor position (:gh:`8381` by `Daniel McCloy`_) -<<<<<<< HEAD - Add ``proj='reconstruct'`` to :meth:`mne.Evoked.plot` and related functions to apply projectors and then undo the signal bias using field mapping by `Eric Larson`_ - When picking a subset of channels, or when dropping channels from `~mne.io.Raw`, `~mne.Epochs`, or `~mne.Evoked`, projectors that can only be applied to the removed channels will now be dropped automatically by `Richard Höchenberger`_ @@ -357,6 +356,5 @@ API - The ``threshold`` argument in :meth:`mne.preprocessing.ICA.find_bads_ecg` defaults to ``None`` in version 0.21 but will change to ``'auto'`` in 0.22 by `Yu-Han Luo`_ - The default argument ``meg=True`` in :func:`mne.pick_types` will change to ``meg=False`` in version 0.22 by `Clemens Brunner`_ -======= + - Parameter ``event_colors`` in `mne.viz.plot_epochs` and `mne.Epochs.plot` is deprecated, replaced by ``event_color`` which is consistent with `mne.viz.plot_raw` and provides greater flexibility (:gh:`8381` by `Daniel McCloy`_) ->>>>>>> master diff --git a/doc/conf.py b/doc/conf.py index c2cf4a174a7..df315811271 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -677,12 +677,8 @@ def reset_warnings(gallery_conf, fname): 'n_dipoles_fwd', 'n_picks_ref', 'n_coords', # Undocumented (on purpose) 'RawKIT', 'RawEximia', 'RawEGI', 'RawEEGLAB', 'RawEDF', 'RawCTF', 'RawBTi', -<<<<<<< HEAD 'RawBrainVision', 'RawCurry', 'RawNIRX', 'RawGDF', 'RawSNIRF', 'RawBOXY', -======= - 'RawBrainVision', 'RawCurry', 'RawNIRX', 'RawGDF', 'RawSNIRF', 'RawPersyst', 'RawNihon', ->>>>>>> master # sklearn subclasses 'mapping', 'to', 'any', # unlinkable diff --git a/doc/python_reference.rst b/doc/python_reference.rst index 70c869d4ea1..e87bc137eb6 100644 --- a/doc/python_reference.rst +++ b/doc/python_reference.rst @@ -72,12 +72,9 @@ Reading raw data read_raw_fif read_raw_eximia read_raw_fieldtrip -<<<<<<< HEAD read_raw_boxy -======= read_raw_persyst read_raw_nihon ->>>>>>> master Base class: From 344f5ab46d3dee155837cf3299fecfa4a884040c Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Wed, 18 Nov 2020 15:25:58 -0500 Subject: [PATCH 155/167] CI: Use 20.04 (#8541) * CI: Use 20.04 * FIX: Actual fix * WIP: Really show it * FIX: Restore --- .github/workflows/circle_artifacts.yml | 2 +- .github/workflows/codespell_and_flake.yml | 2 +- .github/workflows/compat_minimal.yml | 2 +- .github/workflows/compat_old.yml | 2 +- .github/workflows/linux_conda.yml | 2 +- .github/workflows/linux_pip.yml | 2 +- tools/github_actions_dependencies.sh | 2 +- tools/setup_xvfb.sh | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/circle_artifacts.yml b/.github/workflows/circle_artifacts.yml index 7153fe66a06..b4b246e595f 100644 --- a/.github/workflows/circle_artifacts.yml +++ b/.github/workflows/circle_artifacts.yml @@ -1,7 +1,7 @@ on: [status] jobs: circleci_artifacts_redirector_job: - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 name: Run CircleCI artifacts redirector steps: - name: GitHub Action step diff --git a/.github/workflows/codespell_and_flake.yml b/.github/workflows/codespell_and_flake.yml index d3a38a82ac0..82159f70fab 100644 --- a/.github/workflows/codespell_and_flake.yml +++ b/.github/workflows/codespell_and_flake.yml @@ -10,7 +10,7 @@ on: jobs: style: if: "github.repository == 'mne-tools/mne-python' && !contains(github.event.head_commit.message, '[ci skip]') && !contains(github.event.head_commit.message, '[skip ci]') && !contains(github.event.head_commit.message, '[skip github]')" - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 env: CODESPELL_DIRS: 'mne/ doc/ tutorials/ examples/' CODESPELL_SKIPS: 'doc/auto_*,*.fif,*.eve,*.gz,*.tgz,*.zip,*.mat,*.stc,*.label,*.w,*.bz2,*.annot,*.sulc,*.log,*.local-copy,*.orig_avg,*.inflated_avg,*.gii,*.pyc,*.doctree,*.pickle,*.inv,*.png,*.edf,*.touch,*.thickness,*.nofix,*.volume,*.defect_borders,*.mgh,lh.*,rh.*,COR-*,FreeSurferColorLUT.txt,*.examples,.xdebug_mris_calc,bad.segments,BadChannels,*.hist,empty_file,*.orig,*.js,*.map,*.ipynb,searchindex.dat,install_mne_c.rst,plot_*.rst,*.rst.txt,c_EULA.rst*,*.html,gdf_encodes.txt,*.svg' diff --git a/.github/workflows/compat_minimal.yml b/.github/workflows/compat_minimal.yml index 8e7eafc5c29..f81220b8482 100644 --- a/.github/workflows/compat_minimal.yml +++ b/.github/workflows/compat_minimal.yml @@ -12,7 +12,7 @@ jobs: job: if: "github.repository == 'mne-tools/mne-python' && !contains(github.event.head_commit.message, '[ci skip]') && !contains(github.event.head_commit.message, '[skip ci]') && !contains(github.event.head_commit.message, '[skip github]')" name: 'py3.7' - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 defaults: run: shell: bash diff --git a/.github/workflows/compat_old.yml b/.github/workflows/compat_old.yml index 04cbf08d00d..91a1493c620 100644 --- a/.github/workflows/compat_old.yml +++ b/.github/workflows/compat_old.yml @@ -12,7 +12,7 @@ jobs: job: if: "github.repository == 'mne-tools/mne-python' && !contains(github.event.head_commit.message, '[ci skip]') && !contains(github.event.head_commit.message, '[skip ci]') && !contains(github.event.head_commit.message, '[skip github]')" name: 'py3.6' - runs-on: ubuntu-latest + runs-on: ubuntu-18.04 defaults: run: shell: bash diff --git a/.github/workflows/linux_conda.yml b/.github/workflows/linux_conda.yml index 28132a94e83..676c3a01a4b 100644 --- a/.github/workflows/linux_conda.yml +++ b/.github/workflows/linux_conda.yml @@ -12,7 +12,7 @@ jobs: job: if: "github.repository == 'mne-tools/mne-python' && !contains(github.event.head_commit.message, '[ci skip]') && !contains(github.event.head_commit.message, '[skip ci]') && !contains(github.event.head_commit.message, '[skip github]')" name: 'py3.8' - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 defaults: run: shell: bash diff --git a/.github/workflows/linux_pip.yml b/.github/workflows/linux_pip.yml index b0b5f890c38..07cfb18c0b8 100644 --- a/.github/workflows/linux_pip.yml +++ b/.github/workflows/linux_pip.yml @@ -12,7 +12,7 @@ jobs: job: if: "github.repository == 'mne-tools/mne-python' && !contains(github.event.head_commit.message, '[ci skip]') && !contains(github.event.head_commit.message, '[skip ci]') && !contains(github.event.head_commit.message, '[skip github]')" name: 'py3.9' - runs-on: ubuntu-18.04 # same as ubuntu-latest, but more precise name + runs-on: ubuntu-20.04 defaults: run: shell: bash diff --git a/tools/github_actions_dependencies.sh b/tools/github_actions_dependencies.sh index 2f537f5abb2..7fa42126bfc 100755 --- a/tools/github_actions_dependencies.sh +++ b/tools/github_actions_dependencies.sh @@ -13,7 +13,7 @@ else # pip 3.9 (missing statsmodels and dipy) # built using vtk master branch on an Ubuntu 18.04.5 VM and uploaded to OSF: wget -q https://osf.io/kej3v/download -O vtk-9.0.20201117-cp39-cp39-linux_x86_64.whl pip install vtk-9.0.20201117-cp39-cp39-linux_x86_64.whl - pip install --progress-bar off https://github.com/pyvista/pyvista/zipball/master + pip install --progress-bar off https://github.com/pyvista/pyvista/zipball/5ee02e2f295f667e33f11e71946e774cca40256c pip install --progress-bar off https://github.com/pyvista/pyvistaqt/zipball/master pip install --progress-bar off --upgrade --pre PyQt5 python -c "import vtk" diff --git a/tools/setup_xvfb.sh b/tools/setup_xvfb.sh index b7cac5ea025..cfeb6a0bd92 100755 --- a/tools/setup_xvfb.sh +++ b/tools/setup_xvfb.sh @@ -1,4 +1,4 @@ #!/bin/bash -ef -sudo apt-get install -yq libxkbcommon-x11-0 libxcb-icccm4 libxcb-image0 libxcb-keysyms1 libxcb-randr0 libxcb-render-util0 libxcb-xinerama0 libxcb-xfixes0 libopengl0 +sudo apt-get install -yqq libxkbcommon-x11-0 libxcb-icccm4 libxcb-image0 libxcb-keysyms1 libxcb-randr0 libxcb-render-util0 libxcb-xinerama0 libxcb-xfixes0 libopengl0 /sbin/start-stop-daemon --start --quiet --pidfile /tmp/custom_xvfb_99.pid --make-pidfile --background --exec /usr/bin/Xvfb -- :99 -screen 0 1400x900x24 -ac +extension GLX +render -noreset From 50d5863e1a823db06b369547a5452b9cd9a67a85 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Wed, 18 Nov 2020 15:50:37 -0500 Subject: [PATCH 156/167] ENH: Add realign_raw (#8540) --- doc/changes/latest.inc | 2 + doc/python_reference.rst | 1 + mne/epochs.py | 2 +- mne/preprocessing/__init__.py | 1 + mne/preprocessing/realign.py | 107 ++++++++++++++++++++++ mne/preprocessing/tests/test_realign.py | 116 ++++++++++++++++++++++++ 6 files changed, 228 insertions(+), 1 deletion(-) create mode 100644 mne/preprocessing/realign.py create mode 100644 mne/preprocessing/tests/test_realign.py diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 5b79925a788..4b2001860e8 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -35,6 +35,8 @@ Enhancements - Add ``proj`` argument to :func:`mne.make_fixed_length_epochs` (:gh:`8351` by `Eric Larson`_) +- Add :func:`mne.preprocessing.realign_raw` to realign simultaneous raw recordings in the presence of clock drift (:gh:`8539` by `Eric Larson`_) + - Reduce memory usage of volume source spaces (:gh:`8379` by `Eric Larson`_) - Speed up heavy use of :meth:`mne.SourceMorph.apply` for volumetric source spaces by use of the method :meth:`mne.SourceMorph.compute_vol_morph_mat` (:gh:`8366` by `Eric Larson`_) diff --git a/doc/python_reference.rst b/doc/python_reference.rst index 5782ef73c9d..31d625f8c65 100644 --- a/doc/python_reference.rst +++ b/doc/python_reference.rst @@ -380,6 +380,7 @@ Projections: oversampled_temporal_projection peak_finder read_ica + realign_raw regress_artifact corrmap read_ica_eeglab diff --git a/mne/epochs.py b/mne/epochs.py index 4721078227d..d00c7ded4d6 100644 --- a/mne/epochs.py +++ b/mne/epochs.py @@ -408,7 +408,7 @@ def __init__(self, info, data, events, event_id=None, tmin=-0.2, tmax=0.5, f'got {events_type}') if events.ndim != 2 or events.shape[1] != 3: raise ValueError( - 'events must be of shape (N, 3), got {events.shape}') + f'events must be of shape (N, 3), got {events.shape}') events_max = events.max() if events_max > INT32_MAX: raise ValueError( diff --git a/mne/preprocessing/__init__.py b/mne/preprocessing/__init__.py index 0d48c102775..dddf063846c 100644 --- a/mne/preprocessing/__init__.py +++ b/mne/preprocessing/__init__.py @@ -19,6 +19,7 @@ from .infomax_ import infomax from .stim import fix_stim_artifact from .maxwell import maxwell_filter, find_bad_channels_maxwell +from .realign import realign_raw from .xdawn import Xdawn from ._csd import compute_current_source_density from . import nirs diff --git a/mne/preprocessing/realign.py b/mne/preprocessing/realign.py new file mode 100644 index 00000000000..1f5987f11e9 --- /dev/null +++ b/mne/preprocessing/realign.py @@ -0,0 +1,107 @@ +# -*- coding: utf-8 -*- +# Authors: Eric Larson + +# License: BSD (3-clause) + +import numpy as np + +from ..io import BaseRaw +from ..utils import _validate_type, warn, logger, verbose + + +@verbose +def realign_raw(raw, other, t_raw, t_other, verbose=None): + """Realign two simultaneous recordings. + + Due to clock drift, recordings at a given same sample rate made by two + separate devices simultaneously can become out of sync over time. This + function uses event times captured by both acquisition devices to resample + ``other`` to match ``raw``. + + Parameters + ---------- + raw : instance of Raw + The first raw instance. + other : instance of Raw + The second raw instance. It will be resampled to match ``raw``. + t_raw : array-like, shape (n_events,) + The times of shared events in ``raw`` relative to ``raw.times[0]`` (0). + Typically these could be events on some TTL channel like + ``find_events(raw)[:, 0] - raw.first_event``. + t_other : array-like, shape (n_events,) + The times of shared events in ``other`` relative to ``other.times[0]``. + %(verbose)s + + Notes + ----- + This function operates inplace. It will: + + 1. Estimate the zero-order (start offset) and first-order (clock drift) + correction. + 2. Crop the start of ``raw`` or ``other``, depending on which started + recording first. + 3. Resample ``other`` to match ``raw`` based on the clock drift. + 4. Crop the end of ``raw`` or ``other``, depending on which stopped + recording first (and the clock drift rate). + + This function is primarily designed to work on recordings made at the same + sample rate, but it can also operate on recordings made at different + sample rates to resample and deal with clock drift simultaneously. + + .. versionadded:: 0.22 + """ + from scipy import stats + _validate_type(raw, BaseRaw, 'raw') + _validate_type(other, BaseRaw, 'other') + t_raw = np.array(t_raw, float) + t_other = np.array(t_other, float) + if t_raw.ndim != 1 or t_raw.shape != t_other.shape: + raise ValueError('t_raw and t_other must be 1D with the same shape, ' + f'got shapes {t_raw.shape} and {t_other.shape}') + if len(t_raw) < 20: + warn('Fewer than 20 times passed, results may be unreliable') + + # 1. Compute correction factors + coef = np.polyfit(t_other, t_raw, deg=1) + r, p = stats.pearsonr(t_other, t_raw) + msg = f'Linear correlation computed as R={r:0.3f} and p={p:0.2e}' + if p > 0.05 or r <= 0: + raise ValueError(msg + ', cannot resample safely') + if p > 1e-6: + warn(msg + ', results may be unreliable') + else: + logger.info(msg) + dr_ms_s = 1000 * abs(1 - coef[0]) + logger.info( + f'Drift rate: {1000 * dr_ms_s:0.1f} μs/sec ' + f'(total drift over {raw.times[-1]:0.1f} sec recording: ' + f'{raw.times[-1] * dr_ms_s:0.1f} ms)') + + # 2. Crop start of recordings to match using the zero-order term + msg = f'Cropping {coef[1]:0.3f} sec from the start of ' + if coef[1] > 0: # need to crop start of raw to match other + logger.info(msg + 'raw') + raw.crop(coef[1], None) + t_raw -= coef[1] + else: # need to crop start of other to match raw + logger.info(msg + 'other') + other.crop(-coef[1], None) + t_other += coef[1] + + # 3. Resample data using the first-order term + logger.info('Resampling other') + coef = coef[0] + sfreq_new = raw.info['sfreq'] * coef + other.load_data().resample(sfreq_new, verbose=True) + other.info['sfreq'] = raw.info['sfreq'] + other._update_times() + + # 4. Crop the end of one of the recordings if necessary + delta = raw.times[-1] - other.times[-1] + msg = f'Cropping {abs(delta):0.3f} sec from the end of ' + if delta > 0: + logger.info(msg + 'raw') + raw.crop(0, other.times[-1]) + elif delta < 0: + logger.info(msg + 'other') + other.crop(0, raw.times[-1]) diff --git a/mne/preprocessing/tests/test_realign.py b/mne/preprocessing/tests/test_realign.py new file mode 100644 index 00000000000..7434f597348 --- /dev/null +++ b/mne/preprocessing/tests/test_realign.py @@ -0,0 +1,116 @@ +# Author: Mark Wronkiewicz +# +# License: BSD (3-clause) + +import numpy as np +from numpy.testing import assert_allclose +from scipy.interpolate import interp1d +import pytest + +from mne import create_info, find_events, Epochs +from mne.io import RawArray +from mne.preprocessing import realign_raw + + +@pytest.mark.parametrize('ratio_other', (1., 0.999, 1.001)) # drifts +@pytest.mark.parametrize('start_raw, start_other', [(0, 0), (0, 3), (3, 0)]) +@pytest.mark.parametrize('stop_raw, stop_other', [(0, 0), (0, 3), (3, 0)]) +def test_realign(ratio_other, start_raw, start_other, stop_raw, stop_other): + """Test realigning raw.""" + # construct a true signal + sfreq = 100. + duration = 50 + stop_raw = duration - stop_raw + stop_other = duration - stop_other + signal = np.zeros(int(round((duration + 1) * sfreq))) + orig_events = np.round( + np.arange(max(start_raw, start_other) + 2, + min(stop_raw, stop_other) - 2) * sfreq).astype(int) + signal[orig_events] = 1. + n_events = len(orig_events) + times = np.arange(len(signal)) / sfreq + stim = np.convolve(signal, np.ones(int(round(0.02 * sfreq))))[:len(times)] + signal = np.convolve( + signal, np.hanning(int(round(0.2 * sfreq))))[:len(times)] + + # construct our sampled versions of these signals (linear interp is fine) + sfreq_raw = sfreq + sfreq_other = ratio_other * sfreq + raw_times = np.arange(start_raw, stop_raw, 1. / sfreq_raw) + other_times = np.arange(start_other, stop_other, 1. / sfreq_other) + assert raw_times[0] >= times[0] + assert raw_times[-1] <= times[-1] + assert other_times[0] >= times[0] + assert other_times[-1] <= times[-1] + data_raw = np.array( + [interp1d(times, d, kind)(raw_times) + for d, kind in ((signal, 'linear'), (stim, 'nearest'))]) + data_other = np.array( + [interp1d(times, d, kind)(other_times) + for d, kind in ((signal, 'linear'), (stim, 'nearest'))]) + info_raw = create_info( + ['raw_data', 'raw_stim'], sfreq, ['eeg', 'stim']) + info_other = create_info( + ['other_data', 'other_stim'], sfreq, ['eeg', 'stim']) + raw = RawArray(data_raw, info_raw, first_samp=111) + other = RawArray(data_other, info_other, first_samp=222) + + # naive processing + evoked_raw, events_raw, _, events_other = _assert_similarity( + raw, other, n_events) + if start_raw == start_other: # can just naively crop + a, b = data_raw[0], data_other[0] + n = min(len(a), len(b)) + corr = np.corrcoef(a[:n], b[:n])[0, 1] + min_, max_ = (0.99999, 1.) if sfreq_raw == sfreq_other else (0.8, 0.9) + assert min_ <= corr <= max_ + + # realign + t_raw = (events_raw[:, 0] - raw.first_samp) / other.info['sfreq'] + t_other = (events_other[:, 0] - other.first_samp) / other.info['sfreq'] + assert duration - 10 <= len(events_raw) < duration + raw_orig, other_orig = raw.copy(), other.copy() + realign_raw(raw, other, t_raw, t_other) + + # old events should still work for raw and produce the same result + evoked_raw_2, _, _, _ = _assert_similarity( + raw, other, n_events, events_raw=events_raw) + assert_allclose(evoked_raw.data, evoked_raw_2.data) + assert_allclose(raw.times, other.times) + # raw data now aligned + corr = np.corrcoef(raw.get_data([0])[0], other.get_data([0])[0])[0, 1] + assert 0.99 < corr <= 1. + + # Degenerate conditions -- only test in one run + test_degenerate = (start_raw == start_other and + stop_raw == stop_other and + ratio_other == 1) + if not test_degenerate: + return + # these alignments will not be correct but it shouldn't matter + with pytest.warns(RuntimeWarning, match='^Fewer.*may be unreliable.*'): + realign_raw(raw, other, raw_times[:5], other_times[:5]) + with pytest.raises(ValueError, match='same shape'): + realign_raw(raw_orig, other_orig, raw_times[:5], other_times) + rand_times = np.random.RandomState(0).randn(len(other_times)) + with pytest.raises(ValueError, match='cannot resample safely'): + realign_raw(raw_orig, other_orig, rand_times, other_times) + with pytest.warns(RuntimeWarning, match='.*computed as R=.*unreliable'): + realign_raw( + raw_orig, other_orig, raw_times + rand_times * 1000, other_times) + + +def _assert_similarity(raw, other, n_events, events_raw=None): + if events_raw is None: + events_raw = find_events(raw) + events_other = find_events(other) + assert len(events_raw) == n_events + assert len(events_other) == n_events + kwargs = dict(baseline=None, tmin=0, tmax=0.2) + evoked_raw = Epochs(raw, events_raw, **kwargs).average() + evoked_other = Epochs(other, events_other, **kwargs).average() + assert evoked_raw.nave == evoked_other.nave == len(events_raw) + assert len(evoked_raw.data) == len(evoked_other.data) == 1 # just EEG + corr = np.corrcoef(evoked_raw.data[0], evoked_other.data[0])[0, 1] + assert 0.9 <= corr <= 1. + return evoked_raw, events_raw, evoked_other, events_other From 02db0b82d3cc42c1f30fc27a850340d890c4a538 Mon Sep 17 00:00:00 2001 From: Daniel McCloy Date: Wed, 18 Nov 2020 20:23:00 -0600 Subject: [PATCH 157/167] MRG, MAINT: deduplicate definition of FIFF constants (#8537) * closes #4851 * fix tests * modernize some tests along the way * more flexible * nest call within test --- mne/channels/channels.py | 42 +++------------ mne/channels/tests/test_channels.py | 15 +++--- mne/io/array/tests/test_array.py | 6 ++- mne/io/edf/tests/test_edf.py | 7 +-- mne/io/meas_info.py | 55 +++++++------------- mne/io/pick.py | 81 ++++++++++++++++++++++------- mne/io/tests/test_pick.py | 12 +++-- mne/preprocessing/tests/test_ica.py | 5 +- 8 files changed, 112 insertions(+), 111 deletions(-) diff --git a/mne/channels/channels.py b/mne/channels/channels.py index a4ab98e03ee..b098a949dc8 100644 --- a/mne/channels/channels.py +++ b/mne/channels/channels.py @@ -27,7 +27,7 @@ from ..io.pick import (channel_type, pick_info, pick_types, _picks_by_type, _check_excludes_includes, _contains_ch_type, channel_indices_by_type, pick_channels, _picks_to_idx, - _get_channel_types) + _get_channel_types, get_channel_type_constants) from ..io.write import DATE_NONE from ..io._digitization import _get_data_as_dict_from_dig @@ -275,41 +275,11 @@ def get_montage(self): return montage -# XXX Eventually de-duplicate with _kind_dict of mne/io/meas_info.py -_human2fiff = {'ecg': FIFF.FIFFV_ECG_CH, - 'eeg': FIFF.FIFFV_EEG_CH, - 'emg': FIFF.FIFFV_EMG_CH, - 'eog': FIFF.FIFFV_EOG_CH, - 'exci': FIFF.FIFFV_EXCI_CH, - 'ias': FIFF.FIFFV_IAS_CH, - 'misc': FIFF.FIFFV_MISC_CH, - 'resp': FIFF.FIFFV_RESP_CH, - 'seeg': FIFF.FIFFV_SEEG_CH, - 'stim': FIFF.FIFFV_STIM_CH, - 'syst': FIFF.FIFFV_SYST_CH, - 'bio': FIFF.FIFFV_BIO_CH, - 'ecog': FIFF.FIFFV_ECOG_CH, - 'fnirs_cw_amplitude': FIFF.FIFFV_FNIRS_CH, - 'fnirs_od': FIFF.FIFFV_FNIRS_CH, - 'hbo': FIFF.FIFFV_FNIRS_CH, - 'hbr': FIFF.FIFFV_FNIRS_CH} -_human2unit = {'ecg': FIFF.FIFF_UNIT_V, - 'eeg': FIFF.FIFF_UNIT_V, - 'emg': FIFF.FIFF_UNIT_V, - 'eog': FIFF.FIFF_UNIT_V, - 'exci': FIFF.FIFF_UNIT_NONE, - 'ias': FIFF.FIFF_UNIT_NONE, - 'misc': FIFF.FIFF_UNIT_V, - 'resp': FIFF.FIFF_UNIT_NONE, - 'seeg': FIFF.FIFF_UNIT_V, - 'stim': FIFF.FIFF_UNIT_NONE, - 'syst': FIFF.FIFF_UNIT_NONE, - 'bio': FIFF.FIFF_UNIT_V, - 'ecog': FIFF.FIFF_UNIT_V, - 'fnirs_cw_amplitude': FIFF.FIFF_UNIT_V, - 'fnirs_od': FIFF.FIFF_UNIT_NONE, - 'hbo': FIFF.FIFF_UNIT_MOL, - 'hbr': FIFF.FIFF_UNIT_MOL} +channel_type_constants = get_channel_type_constants() +_human2fiff = {k: v.get('kind', FIFF.FIFFV_COIL_NONE) for k, v in + channel_type_constants.items()} +_human2unit = {k: v.get('unit', FIFF.FIFF_UNIT_NONE) for k, v in + channel_type_constants.items()} _unit2human = {FIFF.FIFF_UNIT_V: 'V', FIFF.FIFF_UNIT_T: 'T', FIFF.FIFF_UNIT_T_M: 'T/m', diff --git a/mne/channels/tests/test_channels.py b/mne/channels/tests/test_channels.py index a2a2edbfaf4..06dec3155ec 100644 --- a/mne/channels/tests/test_channels.py +++ b/mne/channels/tests/test_channels.py @@ -114,21 +114,22 @@ def test_set_channel_types(): # Error Tests # Test channel name exists in ch_names mapping = {'EEG 160': 'EEG060'} - pytest.raises(ValueError, raw.set_channel_types, mapping) + with pytest.raises(ValueError, match=r"name \(EEG 160\) doesn't exist"): + raw.set_channel_types(mapping) # Test change to illegal channel type mapping = {'EOG 061': 'xxx'} - pytest.raises(ValueError, raw.set_channel_types, mapping) - # Test changing type if in proj (avg eeg ref here) + with pytest.raises(ValueError, match='cannot change to this channel type'): + raw.set_channel_types(mapping) + # Test changing type if in proj mapping = {'EEG 058': 'ecog', 'EEG 059': 'ecg', 'EEG 060': 'eog', 'EOG 061': 'seeg', 'MEG 2441': 'eeg', 'MEG 2443': 'eeg', 'MEG 2442': 'hbo'} - pytest.raises(RuntimeError, raw.set_channel_types, mapping) - # Test type change raw2 = read_raw_fif(raw_fname) raw2.info['bads'] = ['EEG 059', 'EEG 060', 'EOG 061'] - pytest.raises(RuntimeError, raw2.set_channel_types, mapping) # has prj + with pytest.raises(RuntimeError, match='type .* in projector "PCA-v1"'): + raw2.set_channel_types(mapping) # has prj raw2.add_proj([], remove_existing=True) - with pytest.warns(RuntimeWarning, match='The unit for channel'): + with pytest.warns(RuntimeWarning, match='unit for channel.* has changed'): raw2 = raw2.set_channel_types(mapping) info = raw2.info assert info['chs'][372]['ch_name'] == 'EEG 058' diff --git a/mne/io/array/tests/test_array.py b/mne/io/array/tests/test_array.py index df1d790cd8f..4d6966ed67e 100644 --- a/mne/io/array/tests/test_array.py +++ b/mne/io/array/tests/test_array.py @@ -14,7 +14,8 @@ from mne.io import read_raw_fif from mne.io.array import RawArray from mne.io.tests.test_raw import _test_raw_reader -from mne.io.meas_info import create_info, _kind_dict +from mne.io.meas_info import create_info +from mne.io.pick import get_channel_type_constants from mne.utils import run_tests_if_main from mne.channels import make_dig_montage @@ -101,7 +102,8 @@ def test_array_raw(): types[-1] = 'eog' # default type info = create_info(ch_names, sfreq) - assert_equal(info['chs'][0]['kind'], _kind_dict['misc'][0]) + assert_equal(info['chs'][0]['kind'], + get_channel_type_constants()['misc']['kind']) # use real types info = create_info(ch_names, sfreq, types) raw2 = _test_raw_reader(RawArray, test_preloading=False, diff --git a/mne/io/edf/tests/test_edf.py b/mne/io/edf/tests/test_edf.py index d4507d47a53..a96203c7c2d 100644 --- a/mne/io/edf/tests/test_edf.py +++ b/mne/io/edf/tests/test_edf.py @@ -27,9 +27,8 @@ from mne.io.edf.edf import _read_annotations_edf from mne.io.edf.edf import _read_ch from mne.io.edf.edf import _parse_prefilter_string -from mne.io.pick import channel_indices_by_type +from mne.io.pick import channel_indices_by_type, get_channel_type_constants from mne.annotations import events_from_annotations, read_annotations -from mne.io.meas_info import _kind_dict as _KIND_DICT FILE = inspect.getfile(inspect.currentframe()) @@ -363,7 +362,9 @@ def test_load_generator(fname, recwarn): def test_edf_stim_ch_pick_up(test_input, EXPECTED): """Test stim_channel.""" # This is fragile for EEG/EEG-CSD, so just omit csd - TYPE_LUT = {v[0]: k for k, v in _KIND_DICT.items() if k != 'csd'} + KIND_DICT = get_channel_type_constants() + TYPE_LUT = {v['kind']: k for k, v in KIND_DICT.items() if k not in + ('csd', 'chpi')} # chpi not needed, and unhashable (a list) fname = op.join(data_dir, 'test_stim_channel.edf') raw = read_raw_edf(fname, stim_channel=test_input) diff --git a/mne/io/meas_info.py b/mne/io/meas_info.py index 3e676afdd55..135334e77a2 100644 --- a/mne/io/meas_info.py +++ b/mne/io/meas_info.py @@ -17,7 +17,8 @@ import numpy as np from scipy import linalg -from .pick import channel_type, pick_channels, pick_info +from .pick import (channel_type, pick_channels, pick_info, + get_channel_type_constants) from .constants import FIFF, _coord_frame_named from .open import fiff_open from .tree import dir_tree_find @@ -40,30 +41,6 @@ b = bytes # alias -_kind_dict = dict( - eeg=(FIFF.FIFFV_EEG_CH, FIFF.FIFFV_COIL_EEG, FIFF.FIFF_UNIT_V), - mag=(FIFF.FIFFV_MEG_CH, FIFF.FIFFV_COIL_VV_MAG_T3, FIFF.FIFF_UNIT_T), - grad=(FIFF.FIFFV_MEG_CH, FIFF.FIFFV_COIL_VV_PLANAR_T1, FIFF.FIFF_UNIT_T_M), - ref_meg=(FIFF.FIFFV_REF_MEG_CH, FIFF.FIFFV_COIL_VV_MAG_T3, - FIFF.FIFF_UNIT_T), - misc=(FIFF.FIFFV_MISC_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_NONE), - stim=(FIFF.FIFFV_STIM_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V), - eog=(FIFF.FIFFV_EOG_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V), - ecg=(FIFF.FIFFV_ECG_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V), - emg=(FIFF.FIFFV_EMG_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V), - seeg=(FIFF.FIFFV_SEEG_CH, FIFF.FIFFV_COIL_EEG, FIFF.FIFF_UNIT_V), - bio=(FIFF.FIFFV_BIO_CH, FIFF.FIFFV_COIL_NONE, FIFF.FIFF_UNIT_V), - ecog=(FIFF.FIFFV_ECOG_CH, FIFF.FIFFV_COIL_EEG, FIFF.FIFF_UNIT_V), - fnirs_cw_amplitude=(FIFF.FIFFV_FNIRS_CH, - FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE, FIFF.FIFF_UNIT_V), - fnirs_od=(FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_COIL_FNIRS_OD, - FIFF.FIFF_UNIT_NONE), - hbo=(FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_COIL_FNIRS_HBO, FIFF.FIFF_UNIT_MOL), - hbr=(FIFF.FIFFV_FNIRS_CH, FIFF.FIFFV_COIL_FNIRS_HBR, FIFF.FIFF_UNIT_MOL), - csd=(FIFF.FIFFV_EEG_CH, FIFF.FIFFV_COIL_EEG_CSD, FIFF.FIFF_UNIT_V_M2), -) - - _SCALAR_CH_KEYS = ('scanno', 'logno', 'kind', 'range', 'cal', 'coil_type', 'unit', 'unit_mul', 'coord_frame') _ALL_CH_KEYS_SET = set(_SCALAR_CH_KEYS + ('loc', 'ch_name')) @@ -2018,20 +1995,26 @@ def create_info(ch_names, sfreq, ch_types='misc', verbose=None): '(%s != %s) for ch_types=%s' % (len(ch_types), nchan, ch_types)) info = _empty_info(sfreq) - for ci, (name, kind) in enumerate(zip(ch_names, ch_types)): - _validate_type(name, 'str', "each entry in ch_names") - _validate_type(kind, 'str', "each entry in ch_types") - if kind not in _kind_dict: - raise KeyError('kind must be one of %s, not %s' - % (list(_kind_dict.keys()), kind)) - kind = _kind_dict[kind] + ch_types_dict = get_channel_type_constants(include_defaults=True) + for ci, (ch_name, ch_type) in enumerate(zip(ch_names, ch_types)): + _validate_type(ch_name, 'str', "each entry in ch_names") + _validate_type(ch_type, 'str', "each entry in ch_types") + if ch_type not in ch_types_dict: + raise KeyError(f'kind must be one of {list(ch_types_dict)}, ' + f'not {ch_type}') + this_ch_dict = ch_types_dict[ch_type] + kind = this_ch_dict['kind'] + # handle chpi, where kind is a *list* of FIFF constants: + kind = kind[0] if isinstance(kind, (list, tuple)) else kind # mirror what tag.py does here - coord_frame = _ch_coord_dict.get(kind[0], FIFF.FIFFV_COORD_UNKNOWN) + coord_frame = _ch_coord_dict.get(kind, FIFF.FIFFV_COORD_UNKNOWN) + coil_type = this_ch_dict.get('coil_type', FIFF.FIFFV_COIL_NONE) + unit = this_ch_dict.get('unit', FIFF.FIFF_UNIT_NONE) chan_info = dict(loc=np.full(12, np.nan), unit_mul=FIFF.FIFF_UNITM_NONE, range=1., cal=1., - kind=kind[0], coil_type=kind[1], - unit=kind[2], coord_frame=coord_frame, - ch_name=str(name), scanno=ci + 1, logno=ci + 1) + kind=kind, coil_type=coil_type, unit=unit, + coord_frame=coord_frame, ch_name=str(ch_name), + scanno=ci + 1, logno=ci + 1) info['chs'].append(chan_info) info._update_redundant() diff --git a/mne/io/pick.py b/mne/io/pick.py index b00edcee4fa..da46bf7e615 100644 --- a/mne/io/pick.py +++ b/mne/io/pick.py @@ -15,51 +15,94 @@ _check_option) -def get_channel_type_constants(): - """Return all known channel types. +def get_channel_type_constants(include_defaults=False): + """Return all known channel types, and associated FIFF constants. + + Parameters + ---------- + include_defaults : bool + Whether to include default values for "unit" and "coil_type" for all + entries (see Notes). Defaults are generally based on values normally + present for a VectorView MEG system. Defaults to ``False``. Returns ------- channel_types : dict - The keys contain the channel types, and the values contain the - corresponding values in the info['chs'][idx] dictionary. + The keys are channel type strings, and the values are dictionaries of + FIFF constants for "kind", and possibly "unit" and "coil_type". + + Notes + ----- + Values which might vary within a channel type across real data + recordings are excluded unless ``include_defaults=True``. For example, + "ref_meg" channels may have coil type + ``FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD``, ``FIFFV_COIL_VV_MAG_T3``, etc + (depending on the recording system), so no "coil_type" entry is given + for "ref_meg" unless ``include_defaults`` is requested. """ - return dict(grad=dict(kind=FIFF.FIFFV_MEG_CH, - unit=FIFF.FIFF_UNIT_T_M), - mag=dict(kind=FIFF.FIFFV_MEG_CH, - unit=FIFF.FIFF_UNIT_T), + base = dict(grad=dict(kind=FIFF.FIFFV_MEG_CH, unit=FIFF.FIFF_UNIT_T_M), + mag=dict(kind=FIFF.FIFFV_MEG_CH, unit=FIFF.FIFF_UNIT_T), ref_meg=dict(kind=FIFF.FIFFV_REF_MEG_CH), - eeg=dict(kind=FIFF.FIFFV_EEG_CH), + eeg=dict(kind=FIFF.FIFFV_EEG_CH, + unit=FIFF.FIFF_UNIT_V, + coil_type=FIFF.FIFFV_COIL_EEG), + seeg=dict(kind=FIFF.FIFFV_SEEG_CH, + unit=FIFF.FIFF_UNIT_V, + coil_type=FIFF.FIFFV_COIL_EEG), + ecog=dict(kind=FIFF.FIFFV_ECOG_CH, + unit=FIFF.FIFF_UNIT_V, + coil_type=FIFF.FIFFV_COIL_EEG), + eog=dict(kind=FIFF.FIFFV_EOG_CH, unit=FIFF.FIFF_UNIT_V), + emg=dict(kind=FIFF.FIFFV_EMG_CH, unit=FIFF.FIFF_UNIT_V), + ecg=dict(kind=FIFF.FIFFV_ECG_CH, unit=FIFF.FIFF_UNIT_V), + bio=dict(kind=FIFF.FIFFV_BIO_CH, unit=FIFF.FIFF_UNIT_V), + misc=dict(kind=FIFF.FIFFV_MISC_CH, unit=FIFF.FIFF_UNIT_V), stim=dict(kind=FIFF.FIFFV_STIM_CH), - eog=dict(kind=FIFF.FIFFV_EOG_CH), - emg=dict(kind=FIFF.FIFFV_EMG_CH), - ecg=dict(kind=FIFF.FIFFV_ECG_CH), resp=dict(kind=FIFF.FIFFV_RESP_CH), - misc=dict(kind=FIFF.FIFFV_MISC_CH), exci=dict(kind=FIFF.FIFFV_EXCI_CH), - ias=dict(kind=FIFF.FIFFV_IAS_CH), syst=dict(kind=FIFF.FIFFV_SYST_CH), - seeg=dict(kind=FIFF.FIFFV_SEEG_CH), - bio=dict(kind=FIFF.FIFFV_BIO_CH), + ias=dict(kind=FIFF.FIFFV_IAS_CH), + gof=dict(kind=FIFF.FIFFV_GOODNESS_FIT), + dipole=dict(kind=FIFF.FIFFV_DIPOLE_WAVE), chpi=dict(kind=[FIFF.FIFFV_QUAT_0, FIFF.FIFFV_QUAT_1, FIFF.FIFFV_QUAT_2, FIFF.FIFFV_QUAT_3, FIFF.FIFFV_QUAT_4, FIFF.FIFFV_QUAT_5, FIFF.FIFFV_QUAT_6, FIFF.FIFFV_HPI_G, FIFF.FIFFV_HPI_ERR, FIFF.FIFFV_HPI_MOV]), - dipole=dict(kind=FIFF.FIFFV_DIPOLE_WAVE), - gof=dict(kind=FIFF.FIFFV_GOODNESS_FIT), - ecog=dict(kind=FIFF.FIFFV_ECOG_CH), fnirs_cw_amplitude=dict( kind=FIFF.FIFFV_FNIRS_CH, + unit=FIFF.FIFF_UNIT_V, coil_type=FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE), fnirs_od=dict(kind=FIFF.FIFFV_FNIRS_CH, coil_type=FIFF.FIFFV_COIL_FNIRS_OD), hbo=dict(kind=FIFF.FIFFV_FNIRS_CH, + unit=FIFF.FIFF_UNIT_MOL, coil_type=FIFF.FIFFV_COIL_FNIRS_HBO), hbr=dict(kind=FIFF.FIFFV_FNIRS_CH, + unit=FIFF.FIFF_UNIT_MOL, coil_type=FIFF.FIFFV_COIL_FNIRS_HBR), csd=dict(kind=FIFF.FIFFV_EEG_CH, + unit=FIFF.FIFF_UNIT_V_M2, coil_type=FIFF.FIFFV_COIL_EEG_CSD)) + if include_defaults: + coil_none = dict(coil_type=FIFF.FIFFV_COIL_NONE) + unit_none = dict(unit=FIFF.FIFF_UNIT_NONE) + defaults = dict( + grad=dict(coil_type=FIFF.FIFFV_COIL_VV_PLANAR_T1), + mag=dict(coil_type=FIFF.FIFFV_COIL_VV_MAG_T3), + ref_meg=dict(coil_type=FIFF.FIFFV_COIL_VV_MAG_T3, + unit=FIFF.FIFF_UNIT_T), + misc=dict(**coil_none, **unit_none), # NB: overwrites UNIT_V + stim=dict(unit=FIFF.FIFF_UNIT_V, **coil_none), + eog=coil_none, + ecg=coil_none, + emg=coil_none, + bio=coil_none, + fnirs_od=unit_none, + ) + for key, value in defaults.items(): + base[key].update(value) + return base _first_rule = { diff --git a/mne/io/tests/test_pick.py b/mne/io/tests/test_pick.py index 0741379780a..ed089b23334 100644 --- a/mne/io/tests/test_pick.py +++ b/mne/io/tests/test_pick.py @@ -69,8 +69,8 @@ def _channel_type_old(info, idx): # iterate through all defined channel types until we find a match with ch # go in order from most specific (most rules entries) to least specific - channel_types = sorted( - get_channel_type_constants().items(), key=lambda x: len(x[1]))[::-1] + channel_types = sorted(get_channel_type_constants().items(), + key=lambda x: len(x[1]), reverse=True) for t, rules in channel_types: for key, vals in rules.items(): # all keys must match the values if ch.get(key, None) not in np.array(vals): @@ -78,7 +78,7 @@ def _channel_type_old(info, idx): else: return t - raise ValueError('Unknown channel type for {}'.format(ch["ch_name"])) + raise ValueError(f'Unknown channel type for {ch["ch_name"]}') def _assert_channel_types(info): @@ -112,8 +112,10 @@ def test_pick_refs(): for info in infos: info['bads'] = [] _assert_channel_types(info) - pytest.raises(ValueError, pick_types, info, meg='foo') - pytest.raises(ValueError, pick_types, info, ref_meg='foo') + with pytest.raises(ValueError, match="'planar2'] or bool, not foo"): + pick_types(info, meg='foo') + with pytest.raises(ValueError, match="'planar2', 'auto'] or bool,"): + pick_types(info, ref_meg='foo') picks_meg_ref = pick_types(info, meg=True, ref_meg=True) picks_meg = pick_types(info, meg=True, ref_meg=False) picks_ref = pick_types(info, meg=False, ref_meg=True) diff --git a/mne/preprocessing/tests/test_ica.py b/mne/preprocessing/tests/test_ica.py index c510dfb1472..fc21a218d59 100644 --- a/mne/preprocessing/tests/test_ica.py +++ b/mne/preprocessing/tests/test_ica.py @@ -25,8 +25,7 @@ from mne.preprocessing.ica import (get_score_funcs, corrmap, _sort_components, _ica_explained_variance, read_ica_eeglab) from mne.io import read_raw_fif, Info, RawArray, read_raw_ctf, read_raw_eeglab -from mne.io.meas_info import _kind_dict -from mne.io.pick import _DATA_CH_TYPES_SPLIT +from mne.io.pick import _DATA_CH_TYPES_SPLIT, get_channel_type_constants from mne.io.eeglab.eeglab import _check_load_mat from mne.rank import _compute_rank_int from mne.utils import catch_logging, requires_sklearn, run_tests_if_main @@ -1005,7 +1004,7 @@ def test_fit_params(method, tmpdir): def test_bad_channels(method, allow_ref_meg): """Test exception when unsupported channels are used.""" _skip_check_picard(method) - chs = [i for i in _kind_dict] + chs = list(get_channel_type_constants()) info = create_info(len(chs), 500, chs) rng = np.random.RandomState(0) data = rng.rand(len(chs), 50) From 0e56673d9032eee88156ca5af4dd4b95098a9d15 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Thu, 19 Nov 2020 07:38:03 -0500 Subject: [PATCH 158/167] MRG, MAINT: Try conda-forge (#8046) * MAINT: Use conda-forge * STY: Flake * FIX: Speed up * FIX: Restore * FIX: Restore * FIX: MKL? * FIX: Tol --- .github/workflows/linux_conda.yml | 2 +- azure-pipelines.yml | 1 - environment.yml | 29 +++++++++++-------------- mne/beamformer/tests/test_lcmv.py | 4 ++-- mne/preprocessing/tests/test_maxwell.py | 7 +++--- 5 files changed, 20 insertions(+), 23 deletions(-) diff --git a/.github/workflows/linux_conda.yml b/.github/workflows/linux_conda.yml index 676c3a01a4b..fcc5919cbce 100644 --- a/.github/workflows/linux_conda.yml +++ b/.github/workflows/linux_conda.yml @@ -20,7 +20,7 @@ jobs: CONDA_ENV: 'environment.yml' DISPLAY: ':99.0' MNE_LOGGING_LEVEL: 'warning' - OPENBLAS_NUM_THREADS: '1' + MKL_NUM_THREADS: '1' PYTHONUNBUFFERED: '1' PYTHON_VERSION: '3.8' steps: diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 8c06ca8ac03..19cb1192334 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -174,7 +174,6 @@ stages: OPENBLAS_NUM_THREADS: 1 PYTHONUNBUFFERED: 1 PYTHONIOENCODING: 'utf-8' - MKL_NUM_THREADS: 1 AZURE_CI_WINDOWS: 'true' PYTHON_ARCH: 'x64' strategy: diff --git a/environment.yml b/environment.yml index 87f26985877..39ea020c4a9 100644 --- a/environment.yml +++ b/environment.yml @@ -1,6 +1,6 @@ name: mne channels: -- defaults +- conda-forge dependencies: - python>=3.8 - pip @@ -24,18 +24,15 @@ dependencies: - imageio - tqdm - spyder-kernels -- pip: - - mne - - imageio-ffmpeg>=0.4.1 - - vtk>=9.0.1 - - pyvista>=0.24 - - pyvistaqt>=0.2.0 - - mayavi - - PySurfer[save_movie] - - dipy --only-binary dipy - - nibabel - - nilearn - - neo - - python-picard - - PyQt5>=5.10,<5.14; platform_system == "Darwin" - - PyQt5>=5.10; platform_system != "Darwin" +- imageio-ffmpeg>=0.4.1 +- vtk>=9.0.1 +- pyvista>=0.24 +- pyvistaqt>=0.2.0 +- mayavi +- PySurfer +- dipy +- nibabel +- nilearn +- python-picard +- pyqt +- mne diff --git a/mne/beamformer/tests/test_lcmv.py b/mne/beamformer/tests/test_lcmv.py index 576b97158c8..835f494228d 100644 --- a/mne/beamformer/tests/test_lcmv.py +++ b/mne/beamformer/tests/test_lcmv.py @@ -648,7 +648,7 @@ def test_lcmv_reg_proj(proj, weight_norm): (0.05, 'unit-noise-gain', False, None, 83, 86), (0.05, 'unit-noise-gain', False, 0.8, 83, 86), # depth same for wn != None # no reg - (0.00, 'unit-noise-gain', True, None, 45, 99), # TODO: Still not stable + (0.00, 'unit-noise-gain', True, None, 35, 99), # TODO: Still not stable ]) def test_localization_bias_fixed(bias_params_fixed, reg, weight_norm, use_cov, depth, lower, upper): @@ -687,7 +687,7 @@ def test_localization_bias_fixed(bias_params_fixed, reg, weight_norm, use_cov, (0.00, 'vector', 'unit-noise-gain-invariant', True, None, 50, 65), (0.00, 'vector', 'unit-noise-gain', True, None, 42, 65), (0.00, 'vector', 'nai', True, None, 42, 65), - (0.00, 'max-power', None, True, None, 15, 19), + (0.00, 'max-power', None, True, None, 13, 19), (0.00, 'max-power', 'unit-noise-gain-invariant', True, None, 43, 50), (0.00, 'max-power', 'unit-noise-gain', True, None, 43, 50), (0.00, 'max-power', 'nai', True, None, 43, 50), diff --git a/mne/preprocessing/tests/test_maxwell.py b/mne/preprocessing/tests/test_maxwell.py index ccdbe972934..da75146aabf 100644 --- a/mne/preprocessing/tests/test_maxwell.py +++ b/mne/preprocessing/tests/test_maxwell.py @@ -1083,9 +1083,10 @@ def test_shielding_factor(tmpdir): for line in fid: fid_out.write(' '.join(line.strip().split(' ')[:14]) + '\n') with get_n_projected() as counts: - raw_sss = maxwell_filter(raw_erm, calibration=temp_fname, - cross_talk=ctc_fname, st_duration=1., - coord_frame='meg', regularize='in') + with pytest.warns(None): # SVD convergence sometimes + raw_sss = maxwell_filter(raw_erm, calibration=temp_fname, + cross_talk=ctc_fname, st_duration=1., + coord_frame='meg', regularize='in') # Our 3D cal has worse defaults for this ERM than the 1D file _assert_shielding(raw_sss, erm_power, 44, 45) assert counts[0] == 3 From 3e69ffc7cbe0539f7c2d0e38ec311fa030aa97ae Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Thu, 19 Nov 2020 10:14:49 -0500 Subject: [PATCH 159/167] FIX: Fix constants, reading, tests --- mne/channels/channels.py | 11 +- mne/channels/layout.py | 6 +- mne/cov.py | 17 +- mne/defaults.py | 16 +- mne/evoked.py | 13 +- mne/io/boxy/boxy.py | 13 +- mne/io/boxy/tests/test_boxy.py | 273 ++++++++++----------------------- mne/io/pick.py | 38 ++--- mne/viz/utils.py | 4 +- 9 files changed, 123 insertions(+), 268 deletions(-) diff --git a/mne/channels/channels.py b/mne/channels/channels.py index ca15abe2b39..cb187842e3d 100644 --- a/mne/channels/channels.py +++ b/mne/channels/channels.py @@ -80,9 +80,8 @@ def _get_ch_type(inst, ch_type, allow_ref_meg=False): """ if ch_type is None: allowed_types = ['mag', 'grad', 'planar1', 'planar2', 'eeg', 'csd', - 'fnirs_cw_amplitude', 'fnirs_fd_dc_amplitude', - 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', 'fnirs_od', - 'hbo', 'hbr', + 'fnirs_cw_amplitude', 'fnirs_fd_ac_amplitude', + 'fnirs_fd_phase', 'fnirs_od', 'hbo', 'hbr', 'ecog', 'seeg'] allowed_types += ['ref_meg'] if allow_ref_meg else [] for type_ in allowed_types: @@ -412,8 +411,8 @@ def set_channel_types(self, mapping, verbose=None): The following sensor types are accepted: ecg, eeg, emg, eog, exci, ias, misc, resp, seeg, stim, syst, ecog, - hbo, hbr, fnirs_cw_amplitude, fnirs_fd_dc_amplitude, - fnirs_fd_ac_amplitude, fnirs_fd_phase, fnirs_od + hbo, hbr, fnirs_cw_amplitude, fnirs_fd_ac_amplitude, + fnirs_fd_phase, fnirs_od .. versionadded:: 0.9.0 """ @@ -455,8 +454,6 @@ def set_channel_types(self, mapping, verbose=None): coil_type = FIFF.FIFFV_COIL_FNIRS_HBR elif ch_type == 'fnirs_cw_amplitude': coil_type = FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE - elif ch_type == 'fnirs_fd_dc_amplitude': - coil_type = FIFF.FIFFV_COIL_FNIRS_FD_DC_AMPLITUDE elif ch_type == 'fnirs_fd_ac_amplitude': coil_type = FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE elif ch_type == 'fnirs_fd_phase': diff --git a/mne/channels/layout.py b/mne/channels/layout.py index a3fc1a51021..6a20cbf9ed9 100644 --- a/mne/channels/layout.py +++ b/mne/channels/layout.py @@ -17,7 +17,7 @@ import numpy as np from ..transforms import _pol_to_cart, _cart_to_sph -from ..io.pick import pick_types, _picks_to_idx +from ..io.pick import pick_types, _picks_to_idx, _FNIRS_CH_TYPES_SPLIT from ..io.constants import FIFF from ..io.meas_info import Info from ..utils import (_clean_names, warn, _check_ch_locs, fill_doc, @@ -917,9 +917,7 @@ def _merge_ch_data(data, ch_type, names, method='rms'): if ch_type == 'grad': data = _merge_grad_data(data, method) else: - assert ch_type in ('hbo', 'hbr', 'fnirs_cw_amplitude', - 'fnirs_fd_dc_amplitude', 'fnirs_fd_ac_amplitude', - 'fnirs_fd_phase', 'fnirs_od') + assert ch_type in _FNIRS_CH_TYPES_SPLIT data, names = _merge_nirs_data(data, names) return data, names diff --git a/mne/cov.py b/mne/cov.py index 0e75975b57d..72e33bec09f 100644 --- a/mne/cov.py +++ b/mne/cov.py @@ -1254,8 +1254,7 @@ class _RegCovariance(BaseEstimator): def __init__(self, info, grad=0.1, mag=0.1, eeg=0.1, seeg=0.1, ecog=0.1, hbo=0.1, hbr=0.1, fnirs_cw_amplitude=0.1, - fnirs_fd_dc_amplitude=0.1, fnirs_fd_ac_amplitude=0.1, - fnirs_fd_phase=0.1, fnirs_od=0.1, + fnirs_fd_ac_amplitude=0.1, fnirs_fd_phase=0.1, fnirs_od=0.1, csd=0.1, store_precision=False, assume_centered=False): self.info = info # For sklearn compat, these cannot (easily?) be combined into @@ -1268,8 +1267,7 @@ def __init__(self, info, grad=0.1, mag=0.1, eeg=0.1, seeg=0.1, ecog=0.1, self.hbo = hbo self.hbr = hbr self.fnirs_cw_amplitude = fnirs_cw_amplitude - self.fnirs_fd_dc_amplitude = fnirs_fd_dc_amplitude - self.fnirs_fd_ac_amplitude = fnirs_ac_ac_amplitude + self.fnirs_fd_ac_amplitude = fnirs_fd_ac_amplitude self.fnirs_fd_phase = fnirs_fd_phase self.fnirs_od = fnirs_od self.csd = csd @@ -1550,9 +1548,8 @@ def _smart_eigh(C, info, rank, scalings=None, projs=None, @verbose def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads', proj=True, seeg=0.1, ecog=0.1, hbo=0.1, hbr=0.1, - fnirs_cw_amplitude=0.1, fnirs_fd_dc_amplitude=0.1, - fnirs_fd_ac_amplitude=0.1, fnirs_fd_phase=0.1, - fnirs_od=0.1, csd=0.1, + fnirs_cw_amplitude=0.1, fnirs_fd_ac_amplitude=0.1, + fnirs_fd_phase=0.1, fnirs_od=0.1, csd=0.1, rank=None, scalings=None, verbose=None): """Regularize noise covariance matrix. @@ -1595,8 +1592,6 @@ def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads', Regularization factor for HBR signals. fnirs_cw_amplitude : float (default 0.1) Regularization factor for fNIRS CW raw signals. - fnirs_fd_dc_amplitude : float (default 0.1) - Regularization factor for fNIRS FD DC raw signals. fnirs_fd_ac_amplitude : float (default 0.1) Regularization factor for fNIRS FD AC raw signals. fnirs_fd_phase : float (default 0.1) @@ -1632,10 +1627,8 @@ def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads', scalings = _handle_default('scalings_cov_rank', scalings) regs = dict(eeg=eeg, seeg=seeg, ecog=ecog, hbo=hbo, hbr=hbr, fnirs_cw_amplitude=fnirs_cw_amplitude, - fnirs_fd_dc_amplitude=fnirs_fd_dc_amplitude, fnirs_fd_ac_amplitude=fnirs_fd_ac_amplitude, - fnirs_fd_phase=fnirs_fd_phase, - fnirs_od=fnirs_od, csd=csd) + fnirs_fd_phase=fnirs_fd_phase, fnirs_od=fnirs_od, csd=csd) if exclude is None: raise ValueError('exclude must be a list of strings or "bads"') diff --git a/mne/defaults.py b/mne/defaults.py index 5067150f038..22f206d4b08 100644 --- a/mne/defaults.py +++ b/mne/defaults.py @@ -11,28 +11,25 @@ ref_meg='steelblue', misc='k', stim='k', resp='k', chpi='k', exci='k', ias='k', syst='k', seeg='saddlebrown', dipole='k', gof='k', bio='k', ecog='k', hbo='#AA3377', hbr='b', - fnirs_cw_amplitude='k', fnirs_fd_dc_amplitude='k', - fnirs_fd_ac_amplitude='k', fnirs_fd_phase='k', - fnirs_od='k', csd='k'), + fnirs_cw_amplitude='k', fnirs_fd_ac_amplitude='k', + fnirs_fd_phase='k', fnirs_od='k', csd='k'), units=dict(mag='fT', grad='fT/cm', eeg='µV', eog='µV', ecg='µV', emg='µV', misc='AU', seeg='mV', dipole='nAm', gof='GOF', bio='µV', ecog='µV', hbo='µM', hbr='µM', ref_meg='fT', - fnirs_cw_amplitude='V', fnirs_fd_dc_amplitude='V', - fnirs_fd_ac_amplitude='V', fnirs_fd_phase='V', - fnirs_od='V', csd='mV/m²'), + fnirs_cw_amplitude='V', fnirs_fd_ac_amplitude='V', + fnirs_fd_phase='V', fnirs_od='V', csd='mV/m²'), # scalings for the units scalings=dict(mag=1e15, grad=1e13, eeg=1e6, eog=1e6, emg=1e6, ecg=1e6, misc=1.0, seeg=1e3, dipole=1e9, gof=1.0, bio=1e6, ecog=1e6, hbo=1e6, hbr=1e6, ref_meg=1e15, fnirs_cw_amplitude=1.0, - fnirs_fd_dc_amplitude=1.0, fnirs_fd_ac_amplitude=1.0, - fnirs_fd_phase=1.0, fnirs_od=1.0, csd=1e3), + fnirs_fd_ac_amplitude=1.0, fnirs_fd_phase=1.0, fnirs_od=1.0, + csd=1e3), # rough guess for a good plot scalings_plot_raw=dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4, emg=1e-3, ref_meg=1e-12, misc='auto', stim=1, resp=1, chpi=1e-4, exci=1, ias=1, syst=1, seeg=1e-4, bio=1e-6, ecog=1e-4, hbo=10e-6, hbr=10e-6, whitened=10., fnirs_cw_amplitude=2e-2, - fnirs_fd_dc_amplitude=2e-2, fnirs_fd_ac_amplitude=2e-2, fnirs_fd_phase=2e-2, fnirs_od=2e-2, csd=200e-4), scalings_cov_rank=dict(mag=1e12, grad=1e11, eeg=1e5, # ~100x scalings @@ -46,7 +43,6 @@ dipole='Dipole', ecog='ECoG', hbo='Oxyhemoglobin', ref_meg='Reference Magnetometers', fnirs_cw_amplitude='fNIRS (CW amplitude)', - fnirs_fd_dc_amplitude='fNIRS (FD DC amplitude)', fnirs_fd_ac_amplitude='fNIRS (FD AC amplitude)', fnirs_fd_phase='fNIRS (FD phase)', fnirs_od='fNIRS (OD)', hbr='Deoxyhemoglobin', diff --git a/mne/evoked.py b/mne/evoked.py index 701e38d7fbd..102e6becf43 100644 --- a/mne/evoked.py +++ b/mne/evoked.py @@ -31,7 +31,7 @@ from .io.open import fiff_open from .io.tag import read_tag from .io.tree import dir_tree_find -from .io.pick import pick_types, _picks_to_idx +from .io.pick import pick_types, _picks_to_idx, _FNIRS_CH_TYPES_SPLIT from .io.meas_info import read_meas_info, write_meas_info from .io.proj import ProjMixin from .io.write import (start_file, start_block, end_file, end_block, @@ -392,10 +392,10 @@ def animate_topomap(self, ch_type=None, times=None, frame_rate=None, ---------- ch_type : str | None Channel type to plot. Accepted data types: 'mag', 'grad', 'eeg', - 'hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_fd_dc_amplitude', + 'hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', and 'fnirs_od'. If None, first available channel type from ('mag', 'grad', 'eeg', - 'hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_fd_dc_amplitude', + 'hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', and 'fnirs_od') is used. Defaults to None. times : array of float | None @@ -562,8 +562,7 @@ def get_peak(self, ch_type=None, tmin=None, tmax=None, """ # noqa: E501 supported = ('mag', 'grad', 'eeg', 'seeg', 'ecog', 'misc', 'hbo', 'hbr', 'None', 'fnirs_cw_amplitude', - 'fnirs_fd_dc_amplitude', 'fnirs_fd_ac_amplitude', - 'fnirs_fd_phase', 'fnirs_od') + 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', 'fnirs_od') types_used = self.get_channel_types(unique=True, only_data_chs=True) _check_option('ch_type', str(ch_type), supported) @@ -596,9 +595,7 @@ def get_peak(self, ch_type=None, tmin=None, tmax=None, seeg = True elif ch_type == 'ecog': ecog = True - elif ch_type in ('hbo', 'hbr', 'fnirs_cw_amplitude', - 'fnirs_fd_dc_amplitude', 'fnirs_fd_ac_amplitude', - 'fnirs_fd_phase', 'fnirs_od'): + elif ch_type in _FNIRS_CH_TYPES_SPLIT: fnirs = ch_type if ch_type is not None: diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 2e3e24e9698..7c3058ace04 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -8,6 +8,7 @@ from ..base import BaseRaw from ..meas_info import create_info +from ..utils import _mult_cal_one from ...utils import logger, verbose, fill_doc from ...annotations import Annotations @@ -125,6 +126,7 @@ def __init__(self, fname, preload=False, verbose=None): # 'source_num' rows correspond to the first detector, the next # 'source_num' rows correspond to the second detector, and so on. boxy_labels = list() + ch_types = list() for det_num in range(detect_num): for src_num in range(source_num): for i_type in ['DC', 'AC', 'Ph']: @@ -134,11 +136,10 @@ def __init__(self, fname, preload=False, verbose=None): if i_type == 'Ph': chan_type = 'fnirs_fd_phase' elif i_type == 'DC': - chan_type = 'fnirs_fd_dc_amplitude' + chan_type = 'fnirs_cw_amplitude' else: chan_type = 'fnirs_fd_ac_amplitude' - - ch_types = ([chan_type for i_chan in boxy_labels]) + ch_types.append(chan_type) # Create info structure. info = create_info(boxy_labels, srate, ch_types=ch_types) @@ -310,6 +311,6 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): all_data[index_loc, :] = boxy_array[:, channel] # Place our data into the data object in place. - data[:] = all_data - - return data + # XXX we only use the sub-block from start:stop, so this is pretty + # inefficient. + _mult_cal_one(data, all_data[:, start:stop], idx, cals, mult) diff --git a/mne/io/boxy/tests/test_boxy.py b/mne/io/boxy/tests/test_boxy.py index d03dc33c982..011ded4a706 100644 --- a/mne/io/boxy/tests/test_boxy.py +++ b/mne/io/boxy/tests/test_boxy.py @@ -2,41 +2,67 @@ # # License: BSD (3-clause) -import os +import os.path as op +import pytest import numpy as np from numpy.testing import assert_allclose, assert_array_equal import scipy.io as spio -import mne -from mne.datasets.testing import data_path, requires_testing_data - +from mne import pick_types +from mne.datasets import testing +from mne.io import read_raw_boxy +from mne.io.tests.test_raw import _test_raw_reader + +data_path = testing.data_path(download=False) +boxy_0_40 = op.join( + data_path, 'BOXY', 'boxy_0_40_recording', + 'boxy_0_40_notriggers_unparsed.txt') +p_pod_0_40 = op.join( + data_path, 'BOXY', 'boxy_0_40_recording', 'p_pod_10_6_3_loaded_data', + 'p_pod_10_6_3_notriggers_unparsed.mat') +boxy_0_84 = op.join( + data_path, 'BOXY', 'boxy_0_84_digaux_recording', + 'boxy_0_84_triggers_unparsed.txt') +boxy_0_84_parsed = op.join( + data_path, 'BOXY', 'boxy_0_84_digaux_recording', + 'boxy_0_84_triggers_parsed.txt') +p_pod_0_84 = op.join( + data_path, 'BOXY', 'boxy_0_84_digaux_recording', + 'p_pod_10_6_3_loaded_data', 'p_pod_10_6_3_triggers_unparsed.mat') + + +def _assert_ppod(raw, p_pod_file): + __tracebackhide__ = True + have_types = raw.get_channel_types(unique=True) + assert 'fnirs_fd_phase' in raw, have_types + assert 'fnirs_cw_amplitude' in raw, have_types + assert 'fnirs_fd_ac_amplitude' in raw, have_types + ppod_data = spio.loadmat(p_pod_file) -@requires_testing_data + # Compare MNE loaded data to p_pod loaded data. + map_ = dict(dc='fnirs_cw_amplitude', ac='fnirs_fd_ac_amplitude', + ph='fnirs_fd_phase') + for key, value in map_.items(): + ppod = ppod_data[key].T + m = np.median(np.abs(ppod)) + assert 1e-1 < m < 1e5, key # our atol is meaningful + atol = m * 1e-10 + py = raw.get_data(value) + assert_allclose(py, ppod, atol=atol, err_msg=key) + + +@testing.requires_testing_data def test_boxy_load(): """Test reading BOXY files.""" - # Determine to which decimal place we will compare. - thresh = 1e-10 - - # Load AC, DC, and Phase data. - boxy_file = os.path.join(data_path(download=False), - 'BOXY', 'boxy_0_40_recording', - 'boxy_0_40_notriggers_unparsed.txt') - - boxy_data = mne.io.read_raw_boxy(boxy_file, verbose=True).load_data() - - # Test sampling rate. - assert boxy_data.info['sfreq'] == 62.5 - - #Test the returned types - assert 'fnirs_fd_phase' in raw - assert 'fnirs_fd_dc_amplitude' in raw - assert 'fnirs_fd_ac_amplitude' in raw + raw = read_raw_boxy(boxy_0_40, verbose=True) + assert raw.info['sfreq'] == 62.5 + _assert_ppod(raw, p_pod_0_40) # Grab our different data types. - mne_ph = boxy_data.copy().pick(picks='fnirs_fd_ph') - mne_dc = boxy_data.copy().pick(picks='fnirs_fd_dc_amplitude') - mne_ac = boxy_data.copy().pick(picks='fnirs_fd_ac_amplitude') + mne_ph = raw.copy().pick(picks='fnirs_fd_phase') + mne_dc = raw.copy().pick(picks='fnirs_cw_amplitude') + mne_ac = raw.copy().pick(picks='fnirs_fd_ac_amplitude') # Check channel names. first_chans = ['S1_D1', 'S2_D1', 'S3_D1', 'S4_D1', 'S5_D1', @@ -60,41 +86,12 @@ def test_boxy_load(): # Since this data set has no 'digaux' for creating trigger annotations, # let's make sure our Raw object has no annotations. - - # Check description. - assert mne_dc._annotations.description.size == 0 - assert mne_ac._annotations.description.size == 0 - assert mne_ph._annotations.description.size == 0 - - # Check duration. - assert mne_dc._annotations.duration.size == 0 - assert mne_ac._annotations.duration.size == 0 - assert mne_ph._annotations.duration.size == 0 - - # Check onset. - assert mne_dc._annotations.onset.size == 0 - assert mne_ac._annotations.onset.size == 0 - assert mne_ph._annotations.onset.size == 0 - - # Load p_pod data. - p_pod_file = os.path.join(data_path(download=False), - 'BOXY', 'boxy_0_40_recording', - 'p_pod_10_6_3_loaded_data', - 'p_pod_10_6_3_notriggers_unparsed.mat') - ppod_data = spio.loadmat(p_pod_file) - - ppod_ac = np.transpose(ppod_data['ac']) - ppod_dc = np.transpose(ppod_data['dc']) - ppod_ph = np.transpose(ppod_data['ph']) - - # Compare MNE loaded data to p_pod loaded data. - assert (abs(ppod_ac - mne_ac._data) <= thresh).all() - assert (abs(ppod_dc - mne_dc._data) <= thresh).all() - assert (abs(ppod_ph - mne_ph._data) <= thresh).all() + assert len(raw.annotations) == 0 -@requires_testing_data -def test_boxy_filetypes(): +@testing.requires_testing_data +@pytest.mark.parametrize('fname', (boxy_0_84, boxy_0_84_parsed)) +def test_boxy_filetypes(fname): """Test reading parsed and unparsed BOXY data files.""" # BOXY data files can be saved in two formats (parsed and unparsed) which # mostly determines how the data is organised. @@ -110,28 +107,14 @@ def test_boxy_filetypes(): # compare MNE and p_pod loaded data from an unparsed data file. If those # files are comparable, then we will compare the MNE loaded data between # parsed and unparsed files. - - # Determine to which decimal place we will compare. - thresh = 1e-10 - - # Load AC, DC, and Phase data. - boxy_file = os.path.join(data_path(download=False), - 'BOXY', 'boxy_0_84_digaux_recording', - 'boxy_0_84_triggers_unparsed.txt') - - boxy_data = mne.io.read_raw_boxy(boxy_file, verbose=True).load_data() - - # Test sampling rate. - assert boxy_data.info['sfreq'] == 79.4722 + raw = read_raw_boxy(fname, verbose=True) + assert raw.info['sfreq'] == 79.4722 + _assert_ppod(raw, p_pod_0_84) # Grab our different data types. - chans_dc = np.arange(0, 8) * 3 + 0 - chans_ac = np.arange(0, 8) * 3 + 1 - chans_ph = np.arange(0, 8) * 3 + 2 - - unp_dc = boxy_data.copy().pick(chans_dc) - unp_ac = boxy_data.copy().pick(chans_ac) - unp_ph = boxy_data.copy().pick(chans_ph) + unp_dc = raw.copy().pick('fnirs_cw_amplitude') + unp_ac = raw.copy().pick('fnirs_fd_ac_amplitude') + unp_ph = raw.copy().pick('fnirs_fd_phase') # Check channel names. chans = ['S1_D1', 'S2_D1', 'S3_D1', 'S4_D1', @@ -144,121 +127,34 @@ def test_boxy_filetypes(): assert unp_ph.info['ch_names'] == [i_chan + ' ' + 'Ph' for i_chan in chans] - # Load p_pod data. - p_pod_file = os.path.join(data_path(download=False), - 'BOXY', 'boxy_0_84_digaux_recording', - 'p_pod_10_6_3_loaded_data', - 'p_pod_10_6_3_triggers_unparsed.mat') - ppod_data = spio.loadmat(p_pod_file) - - ppod_ac = np.transpose(ppod_data['ac']) - ppod_dc = np.transpose(ppod_data['dc']) - ppod_ph = np.transpose(ppod_data['ph']) - - # Compare MNE loaded data to p_pod loaded data. - assert (abs(ppod_ac - unp_ac._data) <= thresh).all() - assert (abs(ppod_dc - unp_dc._data) <= thresh).all() - assert (abs(ppod_ph - unp_ph._data) <= thresh).all() - - # Now let's load our parsed data. - boxy_file = os.path.join(data_path(download=False), - 'BOXY', 'boxy_0_84_digaux_recording', - 'boxy_0_84_triggers_unparsed.txt') - - boxy_data = mne.io.read_raw_boxy(boxy_file, verbose=True).load_data() - - # Test sampling rate. - assert boxy_data.info['sfreq'] == 79.4722 - # Grab our different data types. - par_dc = boxy_data.copy().pick(chans_dc) - par_ac = boxy_data.copy().pick(chans_ac) - par_ph = boxy_data.copy().pick(chans_ph) - - # Check channel names. - assert par_dc.info['ch_names'] == [i_chan + ' ' + 'DC' - for i_chan in chans] - assert par_ac.info['ch_names'] == [i_chan + ' ' + 'AC' - for i_chan in chans] - assert par_ph.info['ch_names'] == [i_chan + ' ' + 'Ph' - for i_chan in chans] - - # Compare parsed and unparsed data. - assert (abs(unp_dc._data - par_dc._data) == 0).all() - assert (abs(unp_ac._data - par_ac._data) == 0).all() - assert (abs(unp_ph._data - par_ph._data) == 0).all() - - -@requires_testing_data -def test_boxy_digaux(): +@testing.requires_testing_data +@pytest.mark.parametrize('fname', (boxy_0_84, boxy_0_84_parsed)) +def test_boxy_digaux(fname): """Test reading BOXY files and generating annotations from digaux.""" - # We'll test both parsed and unparsed boxy data files. - # Set our comparison threshold and sampling rate. - thresh = 1e-6 srate = 79.4722 - - # Load AC, DC, and Phase data from a parsed file first. - boxy_file = os.path.join(data_path(download=False), - 'BOXY', 'boxy_0_84_digaux_recording', - 'boxy_0_84_triggers_parsed.txt') - - boxy_data = mne.io.read_raw_boxy(boxy_file, verbose=True).load_data() + raw = read_raw_boxy(fname, verbose=True) # Grab our different data types. - chans_dc = np.arange(0, 8) * 3 + 0 - chans_ac = np.arange(0, 8) * 3 + 1 - chans_ph = np.arange(0, 8) * 3 + 2 - - par_dc = boxy_data.copy().pick(chans_dc) - par_ac = boxy_data.copy().pick(chans_ac) - par_ph = boxy_data.copy().pick(chans_ph) + picks_dc = pick_types(raw.info, fnirs='fnirs_cw_amplitude') + picks_ac = pick_types(raw.info, fnirs='fnirs_fd_ac_amplitude') + picks_ph = pick_types(raw.info, fnirs='fnirs_fd_phase') + assert_array_equal(picks_dc, np.arange(0, 8) * 3 + 0) + assert_array_equal(picks_ac, np.arange(0, 8) * 3 + 1) + assert_array_equal(picks_ph, np.arange(0, 8) * 3 + 2) # Check that our event order matches what we expect. event_list = ['1.0', '2.0', '3.0', '4.0', '5.0'] - assert_array_equal(par_dc.annotations.description, event_list) - assert_array_equal(par_ac.annotations.description, event_list) - assert_array_equal(par_ph.annotations.description, event_list) + assert_array_equal(raw.annotations.description, event_list) # Check that our event timings are what we expect. event_onset = [i_time * (1.0 / srate) for i_time in [105, 185, 265, 344, 424]] - assert_allclose(par_dc.annotations.onset, event_onset, atol=thresh) - assert_allclose(par_ac.annotations.onset, event_onset, atol=thresh) - assert_allclose(par_ph.annotations.onset, event_onset, atol=thresh) - - # Now we'll load data from an unparsed file. - boxy_file = os.path.join(data_path(download=False), - 'BOXY', 'boxy_0_84_digaux_recording', - 'boxy_0_84_triggers_unparsed.txt') - - boxy_data = mne.io.read_raw_boxy(boxy_file, verbose=True).load_data() - - # Grab our different data types. - unp_dc = boxy_data.copy().pick(chans_dc) - unp_ac = boxy_data.copy().pick(chans_ac) - unp_ph = boxy_data.copy().pick(chans_ph) - - # Check that our event order matches what we expect. - event_list = ['1.0', '2.0', '3.0', '4.0', '5.0'] - assert_array_equal(unp_dc.annotations.description, event_list) - assert_array_equal(unp_ac.annotations.description, event_list) - assert_array_equal(unp_ph.annotations.description, event_list) - - # Check that our event timings are what we expect. - event_onset = [i_time * (1.0 / srate) for i_time in - [105, 185, 265, 344, 424]] - assert_allclose(unp_dc.annotations.onset, event_onset, atol=thresh) - assert_allclose(unp_ac.annotations.onset, event_onset, atol=thresh) - assert_allclose(unp_ph.annotations.onset, event_onset, atol=thresh) + assert_allclose(raw.annotations.onset, event_onset, atol=1e-6) # Now let's compare parsed and unparsed events to p_pod loaded digaux. # Load our p_pod data. - p_pod_file = os.path.join(data_path(download=False), - 'BOXY', 'boxy_0_84_digaux_recording', - 'p_pod_10_6_3_loaded_data', - 'p_pod_10_6_3_triggers_unparsed.mat') - - ppod_data = spio.loadmat(p_pod_file) + ppod_data = spio.loadmat(p_pod_0_84) ppod_digaux = np.transpose(ppod_data['digaux'])[0] # Now let's get our triggers from the p_pod digaux. @@ -279,19 +175,12 @@ def test_boxy_digaux(): onset = np.asarray([i_mrk * (1.0 / srate) for i_mrk in mrk_idx]) description = np.asarray([str(float(i_mrk))for i_mrk in ppod_digaux[mrk_idx]]) + assert_array_equal(raw.annotations.description, description) + assert_allclose(raw.annotations.onset, onset, atol=1e-6) - # Check that our event orders match. - assert_array_equal(par_dc.annotations.description, description) - assert_array_equal(par_ac.annotations.description, description) - assert_array_equal(par_ph.annotations.description, description) - assert_array_equal(unp_dc.annotations.description, description) - assert_array_equal(unp_ac.annotations.description, description) - assert_array_equal(unp_ph.annotations.description, description) - # Check that our event timings match. - assert_allclose(par_dc.annotations.onset, onset, atol=thresh) - assert_allclose(par_ac.annotations.onset, onset, atol=thresh) - assert_allclose(par_ph.annotations.onset, onset, atol=thresh) - assert_allclose(unp_dc.annotations.onset, onset, atol=thresh) - assert_allclose(unp_ac.annotations.onset, onset, atol=thresh) - assert_allclose(unp_ph.annotations.onset, onset, atol=thresh) +@testing.requires_testing_data +@pytest.mark.parametrize('fname', (boxy_0_40, boxy_0_84, boxy_0_84_parsed)) +def test_raw_properties(fname): + """Test raw reader properties.""" + _test_raw_reader(read_raw_boxy, fname=fname, boundary_decimal=1) diff --git a/mne/io/pick.py b/mne/io/pick.py index f3fce917681..42ce3eee86c 100644 --- a/mne/io/pick.py +++ b/mne/io/pick.py @@ -73,9 +73,6 @@ def get_channel_type_constants(include_defaults=False): kind=FIFF.FIFFV_FNIRS_CH, unit=FIFF.FIFF_UNIT_V, coil_type=FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE), - fnirs_fd_dc_amplitude=dict( - kind=FIFF.FIFFV_FNIRS_CH, - coil_type=FIFF.FIFFV_COIL_FNIRS_FD_DC_AMPLITUDE), fnirs_fd_ac_amplitude=dict( kind=FIFF.FIFFV_FNIRS_CH, coil_type=FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE), @@ -152,8 +149,6 @@ def get_channel_type_constants(include_defaults=False): FIFF.FIFFV_COIL_FNIRS_HBR: 'hbr', FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE: 'fnirs_cw_amplitude', - FIFF.FIFFV_COIL_FNIRS_FD_DC_AMPLITUDE: - 'fnirs_fd_dc_amplitude', FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE: 'fnirs_fd_ac_amplitude', FIFF.FIFFV_COIL_FNIRS_FD_PHASE: @@ -330,13 +325,10 @@ def _triage_fnirs_pick(ch, fnirs, warned): elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE and \ fnirs == 'fnirs_cw_amplitude': return True - elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_FD_DC_AMPLITUDE and \ - fnirs == 'fnirs_fd_dc_amplitude': - return True elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE and \ fnirs == 'fnirs_fd_ac_amplitude': return True - elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_PHASE and \ + elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_FD_PHASE and \ fnirs == 'fnirs_fd_phase': return True elif ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_OD and fnirs == 'fnirs_od': @@ -471,8 +463,7 @@ def pick_types(info, meg=False, eeg=False, stim=False, eog=False, ecg=False, param_dict[key] = meg if isinstance(fnirs, bool): for key in ('hbo', 'hbr', 'fnirs_cw_amplitude', - 'fnirs_fd_dc_amplitude', 'fnirs_fd_ac_amplitude', - 'fnirs_fd_phase', 'fnirs_od'): + 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', 'fnirs_od'): param_dict[key] = fnirs warned = [False] for k in range(nchan): @@ -481,9 +472,8 @@ def pick_types(info, meg=False, eeg=False, stim=False, eog=False, ecg=False, pick[k] = param_dict[ch_type] except KeyError: # not so simple assert ch_type in ('grad', 'mag', 'hbo', 'hbr', 'ref_meg', - 'fnirs_cw_amplitude', 'fnirs_fd_dc_amplitude', - 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', - 'fnirs_od') + 'fnirs_cw_amplitude', 'fnirs_fd_ac_amplitude', + 'fnirs_fd_phase', 'fnirs_od') if ch_type in ('grad', 'mag'): pick[k] = _triage_meg_pick(info['chs'][k], meg) elif ch_type == 'ref_meg': @@ -774,9 +764,8 @@ def channel_indices_by_type(info, picks=None): idx_by_type = {key: list() for key in _PICK_TYPES_KEYS if key not in ('meg', 'fnirs')} idx_by_type.update(mag=list(), grad=list(), hbo=list(), hbr=list(), - fnirs_cw_amplitude=list(), fnirs_fd_dc_amplitude=list(), - fnirs_fd_ac_amplitude=list(), fnirs_fd_phase=list(), - fnirs_od=list()) + fnirs_cw_amplitude=list(), fnirs_fd_ac_amplitude=list(), + fnirs_fd_phase=list(), fnirs_od=list()) picks = _picks_to_idx(info, picks, none='all', exclude=(), allow_empty=True) for k in picks: @@ -866,8 +855,7 @@ def _contains_ch_type(info, ch_type): meg_extras = ['mag', 'grad', 'planar1', 'planar2'] fnirs_extras = ['hbo', 'hbr', 'fnirs_cw_amplitude', - 'fnirs_fd_dc_amplitude', 'fnirs_fd_ac_amplitude', - 'fnirs_fd_phase', 'fnirs_od'] + 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', 'fnirs_od'] valid_channel_types = sorted([key for key in _PICK_TYPES_KEYS if key != 'meg'] + meg_extras + fnirs_extras) _check_option('ch_type', ch_type, valid_channel_types) @@ -973,27 +961,23 @@ def _check_excludes_includes(chs, info=None, allow_bads=False): _PICK_TYPES_KEYS = tuple(list(_PICK_TYPES_DATA_DICT) + ['ref_meg']) _DATA_CH_TYPES_SPLIT = ('mag', 'grad', 'eeg', 'csd', 'seeg', 'ecog', 'hbo', 'hbr', 'fnirs_cw_amplitude', - 'fnirs_fd_dc_amplitude', 'fnirs_fd_ac_amplitude', - 'fnirs_fd_phase', 'fnirs_od') + 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', 'fnirs_od') _DATA_CH_TYPES_ORDER_DEFAULT = ('mag', 'grad', 'eeg', 'csd', 'eog', 'ecg', 'emg', 'ref_meg', 'misc', 'stim', 'resp', 'chpi', 'exci', 'ias', 'syst', 'seeg', 'bio', 'ecog', 'hbo', 'hbr', 'fnirs_cw_amplitude', - 'fnirs_fd_dc_amplitude', 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', 'fnirs_od', 'whitened') # Valid data types, ordered for consistency, used in viz/evoked. _VALID_CHANNEL_TYPES = ('eeg', 'grad', 'mag', 'seeg', 'eog', 'ecg', 'emg', 'dipole', 'gof', 'bio', 'ecog', 'hbo', 'hbr', - 'fnirs_cw_amplitude', 'fnirs_fd_dc_amplitude', - 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', - 'fnirs_od', 'misc', 'csd') + 'fnirs_cw_amplitude', 'fnirs_fd_ac_amplitude', + 'fnirs_fd_phase', 'fnirs_od', 'misc', 'csd') _MEG_CH_TYPES_SPLIT = ('mag', 'grad', 'planar1', 'planar2') _FNIRS_CH_TYPES_SPLIT = ('hbo', 'hbr', 'fnirs_cw_amplitude', - 'fnirs_fd_dc_amplitude', 'fnirs_fd_ac_amplitude', - 'fnirs_fd_phase', 'fnirs_od') + 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', 'fnirs_od') def _pick_data_channels(info, exclude='bads', with_ref_meg=True): diff --git a/mne/viz/utils.py b/mne/viz/utils.py index 6cf9c4feefc..f6ec8788aa0 100644 --- a/mne/viz/utils.py +++ b/mne/viz/utils.py @@ -32,7 +32,7 @@ _pick_data_channels, _DATA_CH_TYPES_SPLIT, pick_types, _DATA_CH_TYPES_ORDER_DEFAULT, _VALID_CHANNEL_TYPES, pick_info, _picks_by_type, pick_channels_cov, - _picks_to_idx, _contains_ch_type) + _picks_to_idx, _contains_ch_type, _FNIRS_CH_TYPES_SPLIT) from ..io.meas_info import create_info from ..rank import compute_rank from ..io.proj import setup_proj @@ -2073,7 +2073,7 @@ def _set_psd_plot_params(info, proj, picks, ax, area_mode): kwargs = dict(meg=False, ref_meg=False, exclude=[]) if name in ('mag', 'grad'): kwargs['meg'] = name - elif name in ('fnirs_cw_amplitude', 'fnirs_od', 'hbo', 'hbr'): + elif name in _FNIRS_CH_TYPES_SPLIT: kwargs['fnirs'] = name else: kwargs[name] = True From cfdfa46f931b735d82b2c337372d3d5b674d8a53 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Thu, 19 Nov 2020 11:16:15 -0500 Subject: [PATCH 160/167] ENH: Efficient read --- doc/changes/latest.inc | 206 +------------------------- mne/evoked.py | 11 +- mne/io/boxy/boxy.py | 326 +++++++++++++++++------------------------ mne/io/pick.py | 39 ++--- mne/io/utils.py | 3 +- 5 files changed, 159 insertions(+), 426 deletions(-) diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 8d9f59dc638..156cf30c015 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -20,11 +20,15 @@ Current (0.22.dev0) .. |Victoria Peterson| replace:: **Victoria Peterson** +.. |Jonathan Kuziek| replace:: **Jonathan Kuziek** + Enhancements ~~~~~~~~~~~~ - Add :class:`mne.decoding.SSD` for spatial filtering with spatio-spectral-decomposition (:gh:`7070` **by new contributor** |Victoria Peterson|_ and `Denis Engemann`_) +- Add reader for optical imaging data recorded using ISS Imgagent I/II hardware and BOXY recording software in :func:`mne.io.read_raw_boxy` (:gh:`7717` **by new contributor** |Jonathan Kuziek|_ and `Kyle Mathewson`_) + - Add options to use labels in :func:`mne.minimum_norm.get_point_spread` and :func:`mne.minimum_norm.get_cross_talk` (:gh:`8275` by `Olaf Hauk`_) - Update ``surfaces`` argument in :func:`mne.viz.plot_alignment` to allow dict for transparency values, and set default for sEEG data to have transparency (:gh:`8445` by `Keith Doelling`_) @@ -157,206 +161,4 @@ API changes - Add ``group_by`` parameter to `mne.viz.plot_epochs` and `mne.Epochs.plot` to allow displaying channel data by sensor position (:gh:`8381` by `Daniel McCloy`_) -- Add ``proj='reconstruct'`` to :meth:`mne.Evoked.plot` and related functions to apply projectors and then undo the signal bias using field mapping by `Eric Larson`_ - -- When picking a subset of channels, or when dropping channels from `~mne.io.Raw`, `~mne.Epochs`, or `~mne.Evoked`, projectors that can only be applied to the removed channels will now be dropped automatically by `Richard Höchenberger`_ - -- :class:`mne.Report` now can add topomaps of SSP projectors to the generated report. This behavior can be toggled via the new ``projs`` argument by `Richard Höchenberger`_ - -- Add function :func:`mne.channels.combine_channels` to combine channels from Raw, Epochs, or Evoked according to ROIs (combinations including mean, median, or standard deviation; can also use a callable) by `Johann Benerradi`_ - -- Improved documentation building instructions and execution on Windows by `Eric Larson`_, `kalenkovich`_ and `Martin Schulz`_ - -- When passing a list of `~mne.Evoked` objects to `~mne.viz.plot_compare_evokeds`, each evoked's ``.comment`` attribute will be used to label the trace. If ``.comment`` is empty, a 1-based index is assigned as the label by `Richard Höchenberger`_ - -- Speed up :func:`mne.stats.summarize_clusters_stc` using Numba by `Yu-Han Luo`_ - -- Add ``reject_by_annotation=True`` to :func:`mne.make_fixed_length_epochs` and :meth:`mne.preprocessing.ICA.plot_properties` to reject bad data segments based on annotation by `Yu-Han Luo`_ - -- Add reader for optical imaging data recorded using ISS Imgagent I/II hardware and BOXY recording software in :func:`mne.io.read_raw_boxy` by `Kyle Mathewson`_ and `Jonathan Kuziek`_ - -- `~mne.Report.parse_folder` now accepts a path-like folder name (it used to work with strings only) by `Alex Gramfort`_ - -Bug -~~~ - -- Fix bug for writing and reading complex evoked data modifying :func:`mne.write_evokeds` and :func:`mne.read_evokeds` by `Lau Møller Andersen`_ - -- Fix bug by adding error message when trying to save complex stc data in a non.-h5 format :meth:`mne.VolSourceEstimate.save` by `Lau Møller Andersen`_ - -- Fix bug with :func:`mne.preprocessing.ICA.find_bads_eog` when more than one EOG components are present by `Christian O'Reilly`_ - -- Fix bug to permit :meth:`stc.project('nn', src) ` to be applied after ``stc`` was restricted to an :class:`mne.Label` by `Luke Bloy`_ - -- Fix bug with :func:`mne.io.Raw.set_meas_date` to support setting ``meas_date`` to ``None``, by `Luke Bloy`_ - -- Fix bug with :func:`mne.setup_volume_source_space` when ``volume_label`` was supplied where voxels slightly (in a worst case, about 37% times ``pos`` in distance) outside the voxel-grid-based bounds of regions were errantly included, by `Eric Larson`_ - -- Fix bug with :func:`mne.io.read_raw_ctf` when reference magnetometers have the compensation grade marked by `Eric Larson`_ - -- Fix bug with `mne.SourceSpaces.export_volume` with ``use_lut=False`` where no values were written by `Eric Larson`_ - -- Fix bug with :func:`mne.preprocessing.annotate_movement` where bad data segments, specified in ``raw.annotations``, would be handled incorrectly by `Luke Bloy`_ - -- Fix bug with :func:`mne.compute_source_morph` when more than one volume source space was present (e.g., when using labels) where only the first label would be interpolated when ``mri_resolution=True`` by `Eric Larson`_ - -- Fix bug with :func:`mne.compute_source_morph` when morphing to a volume source space when ``src_to`` is used and the destination subject is not ``fsaverage`` by `Eric Larson`_ - -- Fix bug with :func:`mne.compute_source_morph` where outermost voxels in the destination source space could be errantly omitted by `Eric Larson`_ - -- Fix bug with :func:`mne.compute_source_morph` where complex data was cast to real when doing a volumetric morph by `Eric Larson`_ - -- Fix bug with :func:`mne.minimum_norm.compute_source_psd_epochs` and :func:`mne.minimum_norm.source_band_induced_power` raised errors when ``method='eLORETA'`` by `Eric Larson`_ - -- Fix bug with :func:`mne.minimum_norm.apply_inverse` where the explained variance did not work for complex data by `Eric Larson`_ - -- Fix bug with :func:`mne.preprocessing.compute_current_source_density` where values were not properly computed; maps should now be more focal, by `Alex Rockhill`_ and `Eric Larson`_ - -- Fix bug with :func:`mne.combine_evoked` where equal-weighted averages were wrongly computed as equal-weighted sums, by `Daniel McCloy`_ - -- Fix bug with setting HTML classes when using :meth:`mne.Report.add_bem_to_section` by `Eric Larson`_ - -- Fix bug with convex-hull based MEG helmet creation where the face area could be covered up, by `Eric Larson`_ - -- Fix to enable interactive plotting with no colorbar with :func:`mne.viz.plot_evoked_topomap` by `Daniel McCloy`_ - -- Fix plotting with :func:`mne.viz.plot_evoked_topomap` to pre-existing axes by `Daniel McCloy`_ - -- Fix bug with :func:`mne.viz.plot_vector_source_estimates` using the PyVista backend with ``time_viewer=True`` when updating the arrow colormaps by `Eric Larson`_ - -- The default plotting mode for :func:`mne.io.Raw.plot` and :ref:`mne browse_raw` has been changed to ``clipping=3.`` to facilitate data analysis with large deflections, by `Eric Larson`_ - -- PSD plots will now show non-data channels (e.g., ``misc``) if those channels are explicitly passed to ``picks``, by `Daniel McCloy`_. - -- Fix bug with :func:`mne.time_frequency.read_tfrs` where ``info['meas_date']`` was not parsed correctly, by `Eric Larson`_ - -- Fix bug with :func:`mne.time_frequency.tfr_array_stockwell` where inputs were not properly validated by `Eric Larson`_ - -- Fix handling of NaN when using TFCE in clustering functions such as :func:`mne.stats.spatio_temporal_cluster_1samp_test` by `Eric Larson`_ - -- Fix handling of signs when using TFCE by `Eric Larson`_ - -- The :class:`mne.MixedSourceEstimate` class has been clarified to contain two cortical surface source spaces, plus at least one other source space. Creating source estimates in other orderings is not supported, by `Eric Larson`_ - -- Fix bug where :class:`VolSourceEstimate.vertices ` was an instance of :class:`~numpy.ndarray` instead of :class:`python:list` of one :class:`~numpy.ndarray`, by `Eric Larson`_ - -- Fix default to be ``foreground=None`` in :func:`mne.viz.plot_source_estimates` to use white or black text based on the background color by `Eric Larson`_ - -- Fix bug with writing EGI and CTF `mne.Info` to H5 format, e.g., with `mne.time_frequency.AverageTFR.save` by `Eric Larson`_ - -- Fix bug with :func:`mne.io.Raw.plot` where toggling all projectors did not actually take effect by `Eric Larson`_ - -- Fix bug with :func:`mne.read_epochs` when loading data in complex format with ``preload=False`` by `Eric Larson`_ - -- Fix bug with :meth:`mne.Epochs.save` where the file splitting calculations did not account for the sizes of non-data writes by `Eric Larson`_ - -- Fix bug with :class:`mne.Epochs` when metadata was not subselected properly when ``event_repeated='drop'`` by `Eric Larson`_ - -- Fix bug with :class:`mne.Epochs` where ``epochs.drop_log`` was a list of list of str rather than an immutable tuple of tuple of str (not meant to be changed by the user) by `Eric Larson`_ - -- Fix bug with :class:`mne.Report` where the BEM section could not be toggled by `Eric Larson`_ - -- Fix bug when using :meth:`mne.Epochs.crop` to exclude the baseline period would break :func:`mne.Epochs.save` / :func:`mne.read_epochs` round-trip by `Eric Larson`_ - -- Fix bug with `mne.Epochs.subtract_evoked` where using decimated epochs would lead to an error by `Eric Larson`_ - -- Fix bug with :func:`mne.viz.plot_bem` and :class:`mne.Report` when plotting BEM contours when MRIs are not in standard FreeSurfer orientation by `Eric Larson`_ - -- Fix bug with :func:`mne.minimum_norm.make_inverse_operator` where it would warn even when an explicit ``rank`` was used by `Eric Larson`_ - -- Fix bugs with :func:`mne.beamformer.make_lcmv` and :func:`mne.beamformer.make_dics` where: - - - Noise normalization factors ``weight_norm='unit-noise-gain'`` and ``weight_norm='nai'`` were computed incorrectly - - ``pick_ori='max-power'`` computed the max-power orientation incorrectly - - ``pick_ori='normal'`` did not compute power or noise normalization factors correctly - - :func:`mne.beamformer.apply_lcmv_cov` did not apply whitening and projections properly - -- Fix :ref:`mne setup_forward_model` to have it actually compute the BEM solution in addition to creating the BEM model by `Eric Larson`_ - -- Fix bug with :func:`mne.io.read_raw_edf` where null bytes were not properly handled, causing an error when opening a file by `Eric Larson`_ - -- Fix bug with :func:`mne.Report` where unicode characters were not rendered properly (encoding for HTML was not set) by `Eric Larson`_ - -- Fix bug with :func:`mne.preprocessing.nirs.scalp_coupling_index` where filter transition was incorrectly assigned by `Robert Luke`_ - -- Fix bug with :func:`mne.make_forward_dipole` where :func:`mne.write_forward_solution` could not be used by `Eric Larson`_ - -- Fix bug with :meth:`mne.io.Raw.plot` when ``scalings='auto'`` where bad data would prevent channel plotting by `Eric Larson`_ - -- Default ``border`` and ``extrapolate`` arguments for :func:`mne.Evoked.plot_topomap` and related functions were changed from ``0.`` to ``'mean'`` and ``'box'`` to ``'auto'``, respectively, to help more accurately reflect sensor geometries and boundary conditions. ``extrapolate='auto'`` uses ``extrapolate='local'`` for MEG data and ``extrapolate='head'`` otherwise, by `Eric Larson`_ - -- Fix bug that prevents ``n_jobs`` from being a NumPy integer type, by `Daniel McCloy`_. - -- Fix bug with :func:`mne.epochs.average_movements` where epoch weights were computed using all basis vectors instead of the internal basis only by `Eric Larson`_ - -- Fix bug with :func:`mne.io.read_raw_gdf` where birthdays were not parsed properly, leading to an error by `Svea Marie Meyer`_ - -- Fix bug with :func:`mne.io.read_raw_edf` where recording ID was not read properly for non-ASCII characters by `Lx37`_ - -- Fix bug with :func:`mne.get_volume_labels_from_aseg` where the returned labels were alphabetical instead of reflecting their volumetric ID-based order by `Eric Larson`_ - -- Fix bug with :func:`mne.preprocessing.find_bad_channels_maxwell` where good data of exactly ``step`` duration would lead to an error by `Eric Larson`_ - -- Fix bug with :func:`mne.preprocessing.find_bad_channels_maxwell` where indices were not handled properly when MEG channels were not first in the raw instance, and logging messages incorrectly reported the interval used by `Eric Larson`_ - -- Make :func:`mne.set_config` accept path-like input values by `Richard Höchenberger`_ - -- Fix bug with :func:`mne.write_labels_to_annot` and :func:`mne.datasets.fetch_hcp_mmp_parcellation` where label name strings were not properly terminated, leading to problems loading in FreeSurfer by `Eric Larson`_ - -- Fix bug with :func:`mne.beamformer.make_dics` where complex conjugates were not applied properly by `Britta Westner`_ and `Eric Larson`_ - -- Fix bug with :func:`mne.bem.make_watershed_bem` where the RAS coordinates of watershed bem surfaces were not updated correctly from the volume file by `Yu-Han Luo`_ - -- Fix bug with :meth:`mne.io.Raw.get_channel_types` and related methods where the ordering of ``picks`` was not preserved, by `Eric Larson`_ - -- Fix bug with :meth:`mne.io.Raw.plot_psd` with ``average=False`` and multiple channel types where channel locations were not shown properly by `Eric Larson`_ - -- Fix bug in FieldTrip reader functions when channels are missing in the ``info`` object by `Thomas Hartmann`_ - -- Throw proper error when trying to import FieldTrip Epochs data with non-uniform time for trials by `Thomas Hartmann`_ - -- Throw proper error when trying to import FieldTrip data saved by an old, incompatible version by `Thomas Hartmann`_ - -- Fix bug in :func:`mne.read_epochs_fieldtrip` when importing data without a ``trialinfo`` field by `Thomas Hartmann`_ - -- Fix bug in :meth:`mne.preprocessing.ICA.plot_properties` where time series plot doesn't start at the proper tmin by `Teon Brooks`_ - -- Fix bug with :meth:`mne.preprocessing.ICA.plot_properties` where a :class:`mne.io.Raw` object with annotations would lead to an error by `Yu-Han Luo`_ - -API -~~~ - -- Python 3.5 is no longer supported, Python 3.6+ is required, by `Eric Larson`_ - -- ``adjacency`` has replaced ``connectivity`` in the names of: - - 1. Arguments to clustering functions, such as `mne.stats.permutation_cluster_test`, and - 2. Function names for defining adjacency, such as `mne.spatio_temporal_src_adjacency` replacing ``mne.spatio_temporal_src_connectivity``. - - "connectivity" is now reserved for discussions of functional and effective connectivity of the brain, and "adjacency" for source or sensor neighbor definitions for cluster-based analyses, by `Eric Larson`_. - -- The default for the ``standardize_names`` argument of :func:`mne.io.read_raw_kit` will change from ``True`` to ``False`` in 0.22, by `Eric Larson`_ - -- The ``normalize_fwd`` argument of :func:`mne.beamformer.make_dics` has been deprecated in favor of ``depth``, by `Eric Larson`_ - -- Add ``n_cols`` parameter to :meth:`mne.preprocessing.ICA.plot_scores` to allow plotting scores in multiple columns, by `Luke Bloy`_ - -- In :func:`mne.stats.permutation_cluster_test` and :func:`mne.stats.permutation_cluster_1samp_test` the default parameter value ``out_type='mask'`` has changed to ``None``, which in 0.21 means ``'mask'`` but will change to mean ``'indices'`` in the next version, by `Daniel McCloy`_ - -- The default window size set by ``filter_length`` when ``method='spectrum_fit'`` in :meth:`mne.io.Raw.notch_filter` will change from ``None`` (use whole file) to ``'10s'`` in 0.22, by `Eric Larson`_ - -- ``vmin`` and ``vmax`` parameters are deprecated in :meth:`mne.Epochs.plot_psd_topomap` and :func:`mne.viz.plot_epochs_psd_topomap`; use new ``vlim`` parameter instead, by `Daniel McCloy`_. - -- The method ``stc_mixed.plot_surface`` for a :class:`mne.MixedSourceEstimate` has been deprecated in favor of :meth:`stc.surface().plot(...) ` by `Eric Larson`_ - -- The method ``stc.normal`` for :class:`mne.VectorSourceEstimate` has been deprecated in favor of :meth:`stc.project('nn', src) ` by `Eric Larson`_ - -- Add ``use_dev_head_trans`` parameter to :func:`mne.preprocessing.annotate_movement` to allow choosing the device to head transform is used to define the fixed cHPI coordinates by `Luke Bloy`_ - -- The function ``mne.channels.read_dig_captrack`` will be deprecated in version 0.22 in favor of :func:`mne.channels.read_dig_captrak` to correct the spelling error: "captraCK" -> "captraK", by `Stefan Appelhoff`_ - -- The ``threshold`` argument in :meth:`mne.preprocessing.ICA.find_bads_ecg` defaults to ``None`` in version 0.21 but will change to ``'auto'`` in 0.22 by `Yu-Han Luo`_ - -- The default argument ``meg=True`` in :func:`mne.pick_types` will change to ``meg=False`` in version 0.22 by `Clemens Brunner`_ - - Parameter ``event_colors`` in `mne.viz.plot_epochs` and `mne.Epochs.plot` is deprecated, replaced by ``event_color`` which is consistent with `mne.viz.plot_raw` and provides greater flexibility (:gh:`8381` by `Daniel McCloy`_) diff --git a/mne/evoked.py b/mne/evoked.py index 102e6becf43..c3527aba76e 100644 --- a/mne/evoked.py +++ b/mne/evoked.py @@ -394,9 +394,7 @@ def animate_topomap(self, ch_type=None, times=None, frame_rate=None, Channel type to plot. Accepted data types: 'mag', 'grad', 'eeg', 'hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', and 'fnirs_od'. - If None, first available channel type from ('mag', 'grad', 'eeg', - 'hbo', 'hbr', 'fnirs_cw_amplitude', - 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', and 'fnirs_od') is used. + If None, first available channel type from the above list is used. Defaults to None. times : array of float | None The time points to plot. If None, 10 evenly spaced samples are @@ -523,7 +521,7 @@ def get_peak(self, ch_type=None, tmin=None, tmax=None, Parameters ---------- - ch_type : 'mag', 'grad', 'eeg', 'seeg', 'ecog', 'hbo', hbr', 'misc', None + ch_type : str | None The channel type to use. Defaults to None. If more than one sensor Type is present in the data the channel type has to be explicitly set. @@ -560,9 +558,8 @@ def get_peak(self, ch_type=None, tmin=None, tmax=None, .. versionadded:: 0.16 """ # noqa: E501 - supported = ('mag', 'grad', 'eeg', 'seeg', 'ecog', 'misc', 'hbo', - 'hbr', 'None', 'fnirs_cw_amplitude', - 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', 'fnirs_od') + supported = ('mag', 'grad', 'eeg', 'seeg', 'ecog', 'misc', + 'None') + _FNIRS_CH_TYPES_SPLIT types_used = self.get_channel_types(unique=True, only_data_chs=True) _check_option('ch_type', str(ch_type), supported) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index 7c3058ace04..cdcb3faf88d 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -60,119 +60,112 @@ def __init__(self, fname, preload=False, verbose=None): logger.info('Loading %s' % fname) # Read header file and grab some info. - filetype = 'parsed' - start_line = 0 - end_line = 0 - mrk_col = 0 - mrk_data = list() - col_names = list() - with open(fname, 'r') as data: - for line_num, i_line in enumerate(data, 1): - if 'BOXY.EXE:' in i_line: + start_line = np.inf + col_names = mrk_col = filetype = mrk_data = end_line = None + raw_extras = dict() + raw_extras['offsets'] = list() # keep track of our offsets + sfreq = None + with open(fname, 'r') as fid: + line_num = 0 + i_line = fid.readline() + while i_line: + # most of our lines will be data lines, so check that first + if line_num >= start_line: + assert col_names is not None + assert filetype is not None + if '#DATA ENDS' in i_line: + # Data ends just before this. + end_line = line_num + break + if mrk_col is not None: + if filetype == 'non-parsed': + # Non-parsed files have different lines lengths. + crnt_line = i_line.rsplit(' ')[0] + temp_data = re.findall( + r'[-+]?\d*\.?\d+', crnt_line) + if len(temp_data) == len(col_names): + mrk_data.append(float( + re.findall(r'[-+]?\d*\.?\d+', crnt_line) + [mrk_col])) + else: + crnt_line = i_line.rsplit(' ')[0] + mrk_data.append(float(re.findall( + r'[-+]?\d*\.?\d+', crnt_line)[mrk_col])) + raw_extras['offsets'].append(fid.tell()) + # now proceed with more standard header parsing + elif 'BOXY.EXE:' in i_line: boxy_ver = re.findall(r'\d*\.\d+', i_line.rsplit(' ')[-1])[0] # Check that the BOXY version is supported if boxy_ver not in ['0.40', '0.84']: raise RuntimeError('MNE has not been tested with BOXY ' 'version (%s)' % boxy_ver) - if '#DATA ENDS' in i_line: - # Data ends just before this. - end_line = line_num - 1 - break - if 'Detector Channels' in i_line: - detect_num = int(i_line.rsplit(' ')[0]) + elif 'Detector Channels' in i_line: + raw_extras['detect_num'] = int(i_line.rsplit(' ')[0]) elif 'External MUX Channels' in i_line: - source_num = int(i_line.rsplit(' ')[0]) - elif 'Update Rate (Hz)' in i_line: - srate = float(i_line.rsplit(' ')[0]) - elif 'Updata Rate (Hz)' in i_line: + raw_extras['source_num'] = int(i_line.rsplit(' ')[0]) + elif 'Update Rate (Hz)' in i_line or \ + 'Updata Rate (Hz)' in i_line: # Version 0.40 of the BOXY recording software # (and possibly other versions lower than 0.84) contains a # typo in the raw data file where 'Update Rate' is spelled # "Updata Rate. This will account for this typo. - srate = float(i_line.rsplit(' ')[0]) + sfreq = float(i_line.rsplit(' ')[0]) elif '#DATA BEGINS' in i_line: # Data should start a couple lines later. - start_line = line_num + 2 - if start_line > 0 & end_line == 0: - if line_num == start_line - 1: - # Grab names for each column of data. - col_names = np.asarray(re.findall( - r'\w+\-\w+|\w+\-\d+|\w+', i_line.rsplit(' ')[0])) - if 'exmux' in col_names: - # Change filetype based on data organisation. - filetype = 'non-parsed' - if 'digaux' in col_names: - mrk_col = np.where(col_names == 'digaux')[0][0] - # Need to treat parsed and non-parsed files differently. - elif (mrk_col > 0 and line_num > start_line and - filetype == 'non-parsed'): - # Non-parsed files have different lines lengths. - crnt_line = i_line.rsplit(' ')[0] - temp_data = re.findall(r'[-+]?\d*\.?\d+', crnt_line) - if len(temp_data) == len(col_names): - mrk_data.append(float( - re.findall(r'[-+]?\d*\.?\d+', crnt_line) - [mrk_col])) - elif (mrk_col > 0 and line_num > start_line - and filetype == 'parsed'): - # Parsed files have the same line lengths for data. - crnt_line = i_line.rsplit(' ')[0] - mrk_data.append(float( - re.findall(r'[-+]?\d*\.?\d+', crnt_line)[mrk_col])) + start_line = line_num + 3 + elif line_num == start_line - 2: + # Grab names for each column of data. + raw_extras['col_names'] = col_names = re.findall( + r'\w+\-\w+|\w+\-\d+|\w+', i_line.rsplit(' ')[0]) + if 'exmux' in col_names: + # Change filetype based on data organisation. + filetype = 'non-parsed' + else: + filetype = 'parsed' + if 'digaux' in col_names: + mrk_col = col_names.index('digaux') + mrk_data = list() + # raw_extras['offsets'].append(fid.tell()) + elif line_num == start_line - 1: + raw_extras['offsets'].append(fid.tell()) + line_num += 1 + i_line = fid.readline() + assert sfreq is not None + raw_extras.update( + filetype=filetype, start_line=start_line, end_line=end_line) # Label each channel in our data, for each data type (DC, AC, Ph). # Data is organised by channels x timepoint, where the first # 'source_num' rows correspond to the first detector, the next # 'source_num' rows correspond to the second detector, and so on. - boxy_labels = list() + ch_names = list() ch_types = list() - for det_num in range(detect_num): - for src_num in range(source_num): - for i_type in ['DC', 'AC', 'Ph']: - boxy_labels.append('S' + str(src_num + 1) + - '_D' + str(det_num + 1) + ' ' + i_type) - # Determine channel types. - if i_type == 'Ph': - chan_type = 'fnirs_fd_phase' - elif i_type == 'DC': - chan_type = 'fnirs_cw_amplitude' - else: - chan_type = 'fnirs_fd_ac_amplitude' - ch_types.append(chan_type) + for det_num in range(raw_extras['detect_num']): + for src_num in range(raw_extras['source_num']): + for i_type, ch_type in [ + ('DC', 'fnirs_cw_amplitude'), + ('AC', 'fnirs_fd_ac_amplitude'), + ('Ph', 'fnirs_fd_phase')]: + ch_names.append( + f'S{src_num + 1}_D{det_num + 1} {i_type}') + ch_types.append(ch_type) # Create info structure. - info = create_info(boxy_labels, srate, ch_types=ch_types) - - raw_extras = {'source_num': source_num, - 'detect_num': detect_num, - 'start_line': start_line, - 'end_line': end_line, - 'filetype': filetype, - 'file': fname, - 'srate': srate, - } + info = create_info(ch_names, sfreq, ch_types) # Determine how long our data is. - diff = end_line - (start_line) - - # Number of rows in data file depends on data file type. + delta = end_line - start_line + assert len(raw_extras['offsets']) == delta + 1 if filetype == 'non-parsed': - last_samps = (diff // (source_num)) - elif filetype == 'parsed': - last_samps = diff - - # First sample is technically sample 0, not the start line in the file. - first_samps = 0 - + delta //= (raw_extras['source_num']) super(RawBOXY, self).__init__( - info, preload, filenames=[fname], first_samps=[first_samps], - last_samps=[last_samps - 1], - raw_extras=[raw_extras], verbose=verbose) + info, preload, filenames=[fname], first_samps=[0], + last_samps=[delta - 1], raw_extras=[raw_extras], verbose=verbose) # Now let's grab our markers, if they are present. - if len(mrk_data) != 0: - mrk_data = np.asarray(mrk_data) + if mrk_data is not None: + mrk_data = np.array(mrk_data, float) # We only want the first instance of each trigger. prev_mrk = 0 mrk_idx = list() @@ -184,11 +177,11 @@ def __init__(self, fname, preload=False, verbose=None): if i_mrk != 0 and i_mrk == prev_mrk: tmp_dur += 1 if i_mrk == 0 and i_mrk != prev_mrk: - duration.append((tmp_dur + 1) * (1.0 / srate)) + duration.append((tmp_dur + 1) / sfreq) tmp_dur = 0 prev_mrk = i_mrk - onset = [i_mrk * (1.0 / srate) for i_mrk in mrk_idx] - description = [float(i_mrk)for i_mrk in mrk_data[mrk_idx]] + onset = np.array(mrk_idx) / sfreq + description = mrk_data[mrk_idx] annot = Annotations(onset, duration, description) self.set_annotations(annot) @@ -204,113 +197,62 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): start_line = self._raw_extras[fi]['start_line'] end_line = self._raw_extras[fi]['end_line'] filetype = self._raw_extras[fi]['filetype'] - boxy_file = self._raw_extras[fi]['file'] + col_names = self._raw_extras[fi]['col_names'] + offsets = self._raw_extras[fi]['offsets'] + boxy_file = self._filenames[fi] + + # Non-parsed multiplexes sources, so we need source_num times as many + # lines in that case + if filetype == 'parsed': + start_read = start_line + start + stop_read = start_read + (stop - start) + else: + assert filetype == 'non-parsed' + start_read = start_line + start * source_num + stop_read = start_read + (stop - start) * source_num + assert start_read >= start_line + assert stop_read <= end_line # Possible detector names. - detectors = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', - 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', - 'W', 'X', 'Y', 'Z'] - - # Load our optical data. - boxy_data = list() + detectors = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'[:detect_num] # Loop through our data. - with open(boxy_file, 'r') as data_file: - for line_num, i_line in enumerate(data_file, 1): - if line_num == (start_line - 1): - - # Grab column names. - col_names = np.asarray(re.findall(r'\w+\-\w+|\w+\-\d+|\w+', - i_line.rsplit(' ')[0])) - if (line_num > start_line and line_num <= end_line): - - # Grab actual data. - boxy_data.append(i_line.rsplit(' ')) - - # Get number of sources. - sources = np.arange(1, source_num + 1, 1) - - # Grab the individual data points for each column. - boxy_data = [re.findall(r'[-+]?\d*\.?\d+', i_row[0]) - for i_row in boxy_data] - - # Make variable to store our data as an array - # rather than list of strings. - boxy_length = len(col_names) - boxy_array = np.full((len(boxy_data), boxy_length), np.nan) - for ii, i_data in enumerate(boxy_data): - - # Need to make sure our rows are the same length. - # This is done by padding the shorter ones. - line_diff = boxy_length - len(i_data) - if line_diff == 0: - full_line = i_data - boxy_array[ii] = np.asarray(i_data, dtype=float) - else: - pad = full_line[-line_diff:] - i_data.extend(pad) - boxy_array[ii] = np.asarray(i_data, dtype=float) - - # Grab data from the other columns that aren't AC, DC, or Ph. - meta_data = dict() - keys = ['time', 'record', 'group', 'exmux', 'step', 'mark', 'flag', - 'aux1', 'digaux'] - for i_detect in detectors[0:detect_num]: - keys.append('bias-' + i_detect) - - # Data that isn't in our boxy file will be an empty list. - for key in keys: - meta_data[key] = (boxy_array[:, np.where(col_names == key)[0][0]] - if key in col_names else list()) - - # Make some empty variables to store our data. + one = np.zeros((len(col_names), stop_read - start_read)) + with open(boxy_file, 'r') as fid: + # Just a more efficient version of this: + # ii = 0 + # for line_num, i_line in enumerate(fid): + # if line_num >= start_read: + # if line_num >= stop_read: + # break + # # Grab actual data. + # i_data = i_line.strip().split() + # one[:len(i_data), ii] = i_data + # ii += 1 + fid.seek(offsets[start_read - start_line], 0) + for oo in one.T: + i_data = fid.readline().strip().split() + oo[:len(i_data)] = i_data + + # in theory we could index in the loop above, but it's painfully slow, + # so let's just take a hopefully minor memory hit if filetype == 'non-parsed': - all_data = np.zeros(((detect_num * source_num * 3), - int(len(boxy_data) / source_num))) - elif filetype == 'parsed': - all_data = np.zeros(((detect_num * source_num * 3), - int(len(boxy_data)))) - - # Loop through detectors. - for i_detect in detectors[0:detect_num]: - - # Loop through sources. - for i_source in sources: - - for i_num, i_type in enumerate(['DC', 'AC', 'Ph']): - - # Determine where to store our data. - index_loc = (detectors.index(i_detect) * source_num * 3 + - ((i_source - 1) * 3) + i_num) - - # Need to treat our filetypes differently. - if filetype == 'non-parsed': - - # Non-parsed saves timepoints in groups and - # this should account for that. - time_points = np.arange(i_source - 1, - int(meta_data['record'][-1]) * - source_num, source_num) - - # Determine which channel to - # look for in boxy_array. - channel = np.where(col_names == i_detect + '-' + - i_type)[0][0] - - # Save our data based on data type. - all_data[index_loc, :] = boxy_array[time_points, - channel] - - elif filetype == 'parsed': - - # Which channel to look for in boxy_array. - channel = np.where(col_names == i_detect + '-' + - i_type + str(i_source))[0][0] - - # Save our data based on data type. - all_data[index_loc, :] = boxy_array[:, channel] + ch_idxs = [col_names.index(f'{det}-{i_type}') + for det in detectors + for i_type in ['DC', 'AC', 'Ph']] + one = one[ch_idxs].reshape( # each "time point" multiplexes srcs + len(detectors), 3, -1, source_num + ).transpose( # reorganize into (det, source, DC/AC/Ph, t) order + 0, 3, 1, 2 + ).reshape( # reshape the way we store it (det x source x DAP, t) + len(detectors) * source_num * 3, -1) + else: + assert filetype == 'parsed' + ch_idxs = [col_names.index(f'{det}-{i_type}{si + 1}') + for det in detectors + for si in range(source_num) + for i_type in ['DC', 'AC', 'Ph']] + one = one[ch_idxs] # Place our data into the data object in place. - # XXX we only use the sub-block from start:stop, so this is pretty - # inefficient. - _mult_cal_one(data, all_data[:, start:stop], idx, cals, mult) + _mult_cal_one(data, one, idx, cals, mult) diff --git a/mne/io/pick.py b/mne/io/pick.py index 42ce3eee86c..47caf0fdb18 100644 --- a/mne/io/pick.py +++ b/mne/io/pick.py @@ -462,8 +462,7 @@ def pick_types(info, meg=False, eeg=False, stim=False, eog=False, ecg=False, for key in ('grad', 'mag'): param_dict[key] = meg if isinstance(fnirs, bool): - for key in ('hbo', 'hbr', 'fnirs_cw_amplitude', - 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', 'fnirs_od'): + for key in _FNIRS_CH_TYPES_SPLIT: param_dict[key] = fnirs warned = [False] for k in range(nchan): @@ -471,9 +470,8 @@ def pick_types(info, meg=False, eeg=False, stim=False, eog=False, ecg=False, try: pick[k] = param_dict[ch_type] except KeyError: # not so simple - assert ch_type in ('grad', 'mag', 'hbo', 'hbr', 'ref_meg', - 'fnirs_cw_amplitude', 'fnirs_fd_ac_amplitude', - 'fnirs_fd_phase', 'fnirs_od') + assert ch_type in ( + 'grad', 'mag', 'ref_meg') + _FNIRS_CH_TYPES_SPLIT if ch_type in ('grad', 'mag'): pick[k] = _triage_meg_pick(info['chs'][k], meg) elif ch_type == 'ref_meg': @@ -853,9 +851,8 @@ def _contains_ch_type(info, ch_type): """ _validate_type(ch_type, 'str', "ch_type") - meg_extras = ['mag', 'grad', 'planar1', 'planar2'] - fnirs_extras = ['hbo', 'hbr', 'fnirs_cw_amplitude', - 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', 'fnirs_od'] + meg_extras = list(_MEG_CH_TYPES_SPLIT) + fnirs_extras = list(_FNIRS_CH_TYPES_SPLIT) valid_channel_types = sorted([key for key in _PICK_TYPES_KEYS if key != 'meg'] + meg_extras + fnirs_extras) _check_option('ch_type', ch_type, valid_channel_types) @@ -959,25 +956,19 @@ def _check_excludes_includes(chs, info=None, allow_bads=False): misc=False, resp=False, chpi=False, exci=False, ias=False, syst=False, seeg=True, dipole=False, gof=False, bio=False, ecog=True, fnirs=True) _PICK_TYPES_KEYS = tuple(list(_PICK_TYPES_DATA_DICT) + ['ref_meg']) -_DATA_CH_TYPES_SPLIT = ('mag', 'grad', 'eeg', 'csd', 'seeg', 'ecog', - 'hbo', 'hbr', 'fnirs_cw_amplitude', - 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', 'fnirs_od') -_DATA_CH_TYPES_ORDER_DEFAULT = ('mag', 'grad', 'eeg', 'csd', 'eog', 'ecg', - 'emg', 'ref_meg', 'misc', 'stim', 'resp', - 'chpi', 'exci', 'ias', 'syst', 'seeg', 'bio', - 'ecog', 'hbo', 'hbr', 'fnirs_cw_amplitude', - 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', - 'fnirs_od', 'whitened') - -# Valid data types, ordered for consistency, used in viz/evoked. -_VALID_CHANNEL_TYPES = ('eeg', 'grad', 'mag', 'seeg', 'eog', 'ecg', 'emg', - 'dipole', 'gof', 'bio', 'ecog', 'hbo', 'hbr', - 'fnirs_cw_amplitude', 'fnirs_fd_ac_amplitude', - 'fnirs_fd_phase', 'fnirs_od', 'misc', 'csd') - _MEG_CH_TYPES_SPLIT = ('mag', 'grad', 'planar1', 'planar2') _FNIRS_CH_TYPES_SPLIT = ('hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', 'fnirs_od') +_DATA_CH_TYPES_ORDER_DEFAULT = ( + 'mag', 'grad', 'eeg', 'csd', 'eog', 'ecg', 'emg', 'ref_meg', 'misc', + 'stim', 'resp', 'chpi', 'exci', 'ias', 'syst', 'seeg', 'bio', + 'ecog') + _FNIRS_CH_TYPES_SPLIT + ('whitened',) +# Valid data types, ordered for consistency, used in viz/evoked. +_VALID_CHANNEL_TYPES = ( + 'eeg', 'grad', 'mag', 'seeg', 'eog', 'ecg', 'emg', 'dipole', 'gof', 'bio', + 'ecog') + _FNIRS_CH_TYPES_SPLIT + ('misc', 'csd') +_DATA_CH_TYPES_SPLIT = ( + 'mag', 'grad', 'eeg', 'csd', 'seeg', 'ecog') + _FNIRS_CH_TYPES_SPLIT def _pick_data_channels(info, exclude='bads', with_ref_meg=True): diff --git a/mne/io/utils.py b/mne/io/utils.py index f33a43a2b74..a272cd23065 100644 --- a/mne/io/utils.py +++ b/mne/io/utils.py @@ -77,7 +77,8 @@ def _find_channels(ch_names, ch_type='EOG'): def _mult_cal_one(data_view, one, idx, cals, mult): """Take a chunk of raw data, multiply by mult or cals, and store.""" one = np.asarray(one, dtype=data_view.dtype) - assert data_view.shape[1] == one.shape[1] + assert data_view.shape[1] == one.shape[1], \ + (data_view.shape[1], one.shape[1]) if mult is not None: mult.ndim == one.ndim == 2 data_view[:] = mult @ one[idx] From d0a605060ccbefae42e87a31753958b99cd22d3d Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Thu, 19 Nov 2020 14:31:46 -0700 Subject: [PATCH 161/167] Update tutorials/io/plot_30_reading_fnirs_data.py Co-authored-by: Robert Luke <748691+rob-luke@users.noreply.github.com> --- tutorials/io/plot_30_reading_fnirs_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorials/io/plot_30_reading_fnirs_data.py b/tutorials/io/plot_30_reading_fnirs_data.py index 2cc8b9af4f7..6fe27cddcf9 100644 --- a/tutorials/io/plot_30_reading_fnirs_data.py +++ b/tutorials/io/plot_30_reading_fnirs_data.py @@ -45,7 +45,7 @@ BOXY recordings can be read in using :func:`mne.io.read_raw_boxy`. The BOXY software and ISS Imagent I and II devices store data in a single .txt file containing DC (overall background light intensity), -AC (modulated light intensity), and Phase information for each source and +AC `fnirs_fd_ac_amplitude` (modulated light intensity), and Phase `fnirs_fd_phase` information for each source and detector combination. These raw data files can be saved as parsed or unparsed .txt files, which affects how the data in the file is organised. MNE will read either file type and extract the raw DC, AC, and Phase data. From c96908d582be2de683e678ee32dc0173856b4774 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Thu, 19 Nov 2020 14:32:03 -0700 Subject: [PATCH 162/167] Update tutorials/io/plot_30_reading_fnirs_data.py Co-authored-by: Robert Luke <748691+rob-luke@users.noreply.github.com> --- tutorials/io/plot_30_reading_fnirs_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorials/io/plot_30_reading_fnirs_data.py b/tutorials/io/plot_30_reading_fnirs_data.py index 6fe27cddcf9..7e6c01c14ae 100644 --- a/tutorials/io/plot_30_reading_fnirs_data.py +++ b/tutorials/io/plot_30_reading_fnirs_data.py @@ -44,7 +44,7 @@ BOXY recordings can be read in using :func:`mne.io.read_raw_boxy`. The BOXY software and ISS Imagent I and II devices store data in a single .txt -file containing DC (overall background light intensity), +file containing DC `fnirs_cw_amplitude` (overall background light intensity), AC `fnirs_fd_ac_amplitude` (modulated light intensity), and Phase `fnirs_fd_phase` information for each source and detector combination. These raw data files can be saved as parsed or unparsed .txt files, which affects how the data in the file is organised. From 759f042afeaff3589f0e27a5a641531917f8d607 Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Thu, 19 Nov 2020 14:37:09 -0700 Subject: [PATCH 163/167] updated plot_30 tutorial boxy info --- tutorials/io/plot_30_reading_fnirs_data.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/tutorials/io/plot_30_reading_fnirs_data.py b/tutorials/io/plot_30_reading_fnirs_data.py index 7e6c01c14ae..7fbdfce23fb 100644 --- a/tutorials/io/plot_30_reading_fnirs_data.py +++ b/tutorials/io/plot_30_reading_fnirs_data.py @@ -43,10 +43,14 @@ ================================ BOXY recordings can be read in using :func:`mne.io.read_raw_boxy`. -The BOXY software and ISS Imagent I and II devices store data in a single .txt -file containing DC `fnirs_cw_amplitude` (overall background light intensity), -AC `fnirs_fd_ac_amplitude` (modulated light intensity), and Phase `fnirs_fd_phase` information for each source and -detector combination. These raw data files can be saved as parsed or unparsed +The BOXY software and ISS Imagent I and II devices are frequency domain +systems that store data in a single .txt file containing +DC `fnirs_cw_amplitude` (all light collected by the detector), +AC `fnirs_fd_ac_amplitude` (modulated light intensity), and +Phase `fnirs_fd_phase` information for each source and detector +combination. DC data is stored as the type 'fnirs_cw_amplitude to since +it collects both the modulated and any unmodulated light, and to conform +to SNIRF standard types. These raw data files can be saved as parsed or unparsed .txt files, which affects how the data in the file is organised. MNE will read either file type and extract the raw DC, AC, and Phase data. If triggers are sent using the 'digaux' port of the recording hardware, MNE From fbd9e797d2c97bf9fde154bc96cc7e0297a023ff Mon Sep 17 00:00:00 2001 From: Kyle Mathewson Date: Thu, 19 Nov 2020 14:38:15 -0700 Subject: [PATCH 164/167] typo --- tutorials/io/plot_30_reading_fnirs_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorials/io/plot_30_reading_fnirs_data.py b/tutorials/io/plot_30_reading_fnirs_data.py index 7fbdfce23fb..700f93cafb1 100644 --- a/tutorials/io/plot_30_reading_fnirs_data.py +++ b/tutorials/io/plot_30_reading_fnirs_data.py @@ -48,7 +48,7 @@ DC `fnirs_cw_amplitude` (all light collected by the detector), AC `fnirs_fd_ac_amplitude` (modulated light intensity), and Phase `fnirs_fd_phase` information for each source and detector -combination. DC data is stored as the type 'fnirs_cw_amplitude to since +combination. DC data is stored as the type 'fnirs_cw_amplitude since it collects both the modulated and any unmodulated light, and to conform to SNIRF standard types. These raw data files can be saved as parsed or unparsed .txt files, which affects how the data in the file is organised. From 983200d41d7b5358cf048700119220b0db42a179 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Thu, 19 Nov 2020 18:53:42 -0500 Subject: [PATCH 165/167] FIX: SI units and backticks --- mne/io/boxy/boxy.py | 4 +++ mne/io/boxy/tests/test_boxy.py | 8 ++++-- mne/io/pick.py | 2 ++ tutorials/io/plot_30_reading_fnirs_data.py | 31 ++++++++++++++-------- 4 files changed, 32 insertions(+), 13 deletions(-) diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index cdcb3faf88d..a48efd67bf9 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -141,6 +141,7 @@ def __init__(self, fname, preload=False, verbose=None): # 'source_num' rows correspond to the second detector, and so on. ch_names = list() ch_types = list() + cals = list() for det_num in range(raw_extras['detect_num']): for src_num in range(raw_extras['source_num']): for i_type, ch_type in [ @@ -150,9 +151,12 @@ def __init__(self, fname, preload=False, verbose=None): ch_names.append( f'S{src_num + 1}_D{det_num + 1} {i_type}') ch_types.append(ch_type) + cals.append(np.pi / 180. if i_type == 'Ph' else 1.) # Create info structure. info = create_info(ch_names, sfreq, ch_types) + for ch, cal in zip(info['chs'], cals): + ch['cal'] = cal # Determine how long our data is. delta = end_line - start_line diff --git a/mne/io/boxy/tests/test_boxy.py b/mne/io/boxy/tests/test_boxy.py index 011ded4a706..f4d6ef2656f 100644 --- a/mne/io/boxy/tests/test_boxy.py +++ b/mne/io/boxy/tests/test_boxy.py @@ -6,7 +6,8 @@ import pytest import numpy as np -from numpy.testing import assert_allclose, assert_array_equal +from numpy.testing import (assert_allclose, assert_array_equal, + assert_array_less) import scipy.io as spio from mne import pick_types @@ -33,7 +34,6 @@ def _assert_ppod(raw, p_pod_file): - __tracebackhide__ = True have_types = raw.get_channel_types(unique=True) assert 'fnirs_fd_phase' in raw, have_types assert 'fnirs_cw_amplitude' in raw, have_types @@ -49,6 +49,10 @@ def _assert_ppod(raw, p_pod_file): assert 1e-1 < m < 1e5, key # our atol is meaningful atol = m * 1e-10 py = raw.get_data(value) + if key == 'ph': # radians + assert_array_less(-np.pi, py) + assert_array_less(py, 3 * np.pi) + py = np.rad2deg(py) assert_allclose(py, ppod, atol=atol, err_msg=key) diff --git a/mne/io/pick.py b/mne/io/pick.py index 47caf0fdb18..4a7ae650994 100644 --- a/mne/io/pick.py +++ b/mne/io/pick.py @@ -75,9 +75,11 @@ def get_channel_type_constants(include_defaults=False): coil_type=FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE), fnirs_fd_ac_amplitude=dict( kind=FIFF.FIFFV_FNIRS_CH, + unit=FIFF.FIFF_UNIT_V, coil_type=FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE), fnirs_fd_phase=dict( kind=FIFF.FIFFV_FNIRS_CH, + unit=FIFF.FIFF_UNIT_RAD, coil_type=FIFF.FIFFV_COIL_FNIRS_FD_PHASE), fnirs_od=dict(kind=FIFF.FIFFV_FNIRS_CH, coil_type=FIFF.FIFFV_COIL_FNIRS_OD), diff --git a/tutorials/io/plot_30_reading_fnirs_data.py b/tutorials/io/plot_30_reading_fnirs_data.py index 700f93cafb1..6059fa69dbb 100644 --- a/tutorials/io/plot_30_reading_fnirs_data.py +++ b/tutorials/io/plot_30_reading_fnirs_data.py @@ -40,21 +40,30 @@ .. _import-boxy: BOXY (.txt) -================================ +=========== BOXY recordings can be read in using :func:`mne.io.read_raw_boxy`. The BOXY software and ISS Imagent I and II devices are frequency domain -systems that store data in a single .txt file containing -DC `fnirs_cw_amplitude` (all light collected by the detector), -AC `fnirs_fd_ac_amplitude` (modulated light intensity), and -Phase `fnirs_fd_phase` information for each source and detector -combination. DC data is stored as the type 'fnirs_cw_amplitude since -it collects both the modulated and any unmodulated light, and to conform -to SNIRF standard types. These raw data files can be saved as parsed or unparsed -.txt files, which affects how the data in the file is organised. +systems that store data in a single ``.txt`` file containing what they call +(with MNE's name for that type of data in parens): + +- DC + All light collected by the detector (``fnirs_cw_amplitude``) +- AC + High-frequency modulated light intensity (``fnirs_fd_ac_amplitude``) +- Phase + Information for each source and detector (``fnirs_fd_phase``) + +DC data is stored as the type ``fnirs_cw_amplitude`` because it +collects both the modulated and any unmodulated light, and hence is analogous +to what is collected by NIRx and other systems. This helps with conformance +to SNIRF standard types. + +These raw data files can be saved by the acquisition devices as parsed or +unparsed ``.txt`` files, which affects how the data in the file is organised. MNE will read either file type and extract the raw DC, AC, and Phase data. -If triggers are sent using the 'digaux' port of the recording hardware, MNE -will also read the 'digaux' data and create annotations for any triggers. +If triggers are sent using the ``digaux`` port of the recording hardware, MNE +will also read the ``digaux`` data and create annotations for any triggers. Storing of optode locations From 4398d126cdb0ce3b6231280081140a5be409e269 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Thu, 19 Nov 2020 19:12:54 -0500 Subject: [PATCH 166/167] FIX: Unit --- mne/defaults.py | 8 ++++---- mne/tests/test_defaults.py | 4 +++- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/mne/defaults.py b/mne/defaults.py index 22f206d4b08..5b85be312c2 100644 --- a/mne/defaults.py +++ b/mne/defaults.py @@ -17,20 +17,20 @@ misc='AU', seeg='mV', dipole='nAm', gof='GOF', bio='µV', ecog='µV', hbo='µM', hbr='µM', ref_meg='fT', fnirs_cw_amplitude='V', fnirs_fd_ac_amplitude='V', - fnirs_fd_phase='V', fnirs_od='V', csd='mV/m²'), + fnirs_fd_phase='rad', fnirs_od='V', csd='mV/m²'), # scalings for the units scalings=dict(mag=1e15, grad=1e13, eeg=1e6, eog=1e6, emg=1e6, ecg=1e6, misc=1.0, seeg=1e3, dipole=1e9, gof=1.0, bio=1e6, ecog=1e6, hbo=1e6, hbr=1e6, ref_meg=1e15, fnirs_cw_amplitude=1.0, - fnirs_fd_ac_amplitude=1.0, fnirs_fd_phase=1.0, fnirs_od=1.0, - csd=1e3), + fnirs_fd_ac_amplitude=1.0, fnirs_fd_phase=1., + fnirs_od=1.0, csd=1e3), # rough guess for a good plot scalings_plot_raw=dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4, emg=1e-3, ref_meg=1e-12, misc='auto', stim=1, resp=1, chpi=1e-4, exci=1, ias=1, syst=1, seeg=1e-4, bio=1e-6, ecog=1e-4, hbo=10e-6, hbr=10e-6, whitened=10., fnirs_cw_amplitude=2e-2, - fnirs_fd_ac_amplitude=2e-2, fnirs_fd_phase=2e-2, + fnirs_fd_ac_amplitude=2e-2, fnirs_fd_phase=2e-1, fnirs_od=2e-2, csd=200e-4), scalings_cov_rank=dict(mag=1e12, grad=1e11, eeg=1e5, # ~100x scalings seeg=1e1, ecog=1e4, hbo=1e4, hbr=1e4), diff --git a/mne/tests/test_defaults.py b/mne/tests/test_defaults.py index aa3eee4af7c..4254a03c40b 100644 --- a/mne/tests/test_defaults.py +++ b/mne/tests/test_defaults.py @@ -39,7 +39,7 @@ def test_si_units(): 'n': 1e-9, 'f': 1e-15, } - known_SI = {'V', 'T', 'Am', 'm', 'M', + known_SI = {'V', 'T', 'Am', 'm', 'M', 'rad' 'AU', 'GOF'} # not really SI but we tolerate them powers = '²' @@ -50,6 +50,8 @@ def _split_si(x): prefix, si = '', 'GOF' elif x == 'AU': prefix, si = '', 'AU' + elif x == 'rad': + prefix, si = '', 'rad' elif len(x) == 2: if x[1] in powers: prefix, si = '', x From 9f6beb51161ccb871690c6cc9f49195afb38e84a Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Fri, 20 Nov 2020 10:53:34 -0500 Subject: [PATCH 167/167] FIX: Wording, comma --- mne/tests/test_defaults.py | 2 +- tutorials/io/plot_30_reading_fnirs_data.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/mne/tests/test_defaults.py b/mne/tests/test_defaults.py index 4254a03c40b..55bc1883926 100644 --- a/mne/tests/test_defaults.py +++ b/mne/tests/test_defaults.py @@ -39,7 +39,7 @@ def test_si_units(): 'n': 1e-9, 'f': 1e-15, } - known_SI = {'V', 'T', 'Am', 'm', 'M', 'rad' + known_SI = {'V', 'T', 'Am', 'm', 'M', 'rad', 'AU', 'GOF'} # not really SI but we tolerate them powers = '²' diff --git a/tutorials/io/plot_30_reading_fnirs_data.py b/tutorials/io/plot_30_reading_fnirs_data.py index 6059fa69dbb..6968d32d685 100644 --- a/tutorials/io/plot_30_reading_fnirs_data.py +++ b/tutorials/io/plot_30_reading_fnirs_data.py @@ -52,12 +52,12 @@ - AC High-frequency modulated light intensity (``fnirs_fd_ac_amplitude``) - Phase - Information for each source and detector (``fnirs_fd_phase``) + Phase of the modulated light (``fnirs_fd_phase``) DC data is stored as the type ``fnirs_cw_amplitude`` because it collects both the modulated and any unmodulated light, and hence is analogous -to what is collected by NIRx and other systems. This helps with conformance -to SNIRF standard types. +to what is collected by continuous wave systems such as NIRx. This helps with +conformance to SNIRF standard types. These raw data files can be saved by the acquisition devices as parsed or unparsed ``.txt`` files, which affects how the data in the file is organised.