From 5fff7376852528240849114a8542a15ab3a9b5e7 Mon Sep 17 00:00:00 2001 From: "Helen E. Feibes" Date: Thu, 11 Dec 2025 10:42:16 -0500 Subject: [PATCH 1/2] update passive task functions --- bin/passive_task_functions.py | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/bin/passive_task_functions.py b/bin/passive_task_functions.py index bf99009..a7bd978 100755 --- a/bin/passive_task_functions.py +++ b/bin/passive_task_functions.py @@ -131,11 +131,25 @@ def load_betas(beta_coeffs_key, conditions_to_quant, content_root, subj_root): # For each condition you are interested in for cond in conditions_to_quant: # Load the condition betas image - cond_path = beta_coeffs_key[(beta_coeffs_key['session_ima']==run)&(beta_coeffs_key['condition']==cond)]['beta_path'].item() - try: - cond_data = nib.load(content_root + cond_path).get_fdata() - except: - cond_data = nib.load(cond_path).get_fdata() + #cond_path = beta_coeffs_key[(beta_coeffs_key['session_ima']==run)&(beta_coeffs_key['condition']==cond)]['beta_path'].item() + run_split_redundant = str(run).split('_') + if len(run_split_redundant)==3: + cond_path = os.path.join(subj_root, 'sessions', run_split_redundant[0]+'_'+run_split_redundant[1], run_split_redundant[2], cond + '_beta_coeffs.nii.gz') + elif len(run_split_redundant)==4: + cond_path = os.path.join(subj_root, 'sessions', run_split_redundant[0]+'_'+run_split_redundant[1]+'_'+run_split_redundant[2], + run_split_redundant[3], cond + '_beta_coeffs.nii.gz') + else: + print('cannot determine cond path') +# ============================================================================= +# try: +# if cond_path[0:2]=='/s': # handle an os path join problem +# cond_path = cond_path[1:] +# cond_data = nib.load(os.path.join(content_root, cond_path)).get_fdata() +# except: +# cond_data = nib.load(cond_path).get_fdata() +# ============================================================================= + cond_data = nib.load(cond_path).get_fdata() + print(cond_path) betas_3d.append([run, cond, cond_data]) betas_df = pd.DataFrame(betas_3d, columns = ['run', 'condition', 'betas']) return betas_df @@ -245,4 +259,4 @@ def get_top_vox(mod_dir, condition, prop, roi_def): masked_c_data = np.max(cond_masks, axis=0) top_prop = np.argpartition(masked_c_data, -n_top)[-n_top:] # get indices of top voxels top_roi_coords = tuple([[l[i] for i in range(len(l)) if i in top_prop] for l in roi_coords]) - return top_roi_coords + return top_roi_coords, masked_c_data From 847b1c67edadd7a5a5bd27a0da2f1c48a0be84f6 Mon Sep 17 00:00:00 2001 From: "Helen E. Feibes" Date: Thu, 11 Dec 2025 10:42:37 -0500 Subject: [PATCH 2/2] update for revisions --- analysis/bin_compute_confidence.py | 223 ++++++++++++++++++-------- analysis/color_biased_regions.py | 17 +- analysis/compute_selectivity.py | 7 +- analysis/learning_performance.py | 131 ++++++++++----- visualize/plot_color_biased_region.py | 132 ++++++++++++--- visualize/plot_selectivity.py | 18 ++- 6 files changed, 378 insertions(+), 150 deletions(-) diff --git a/analysis/bin_compute_confidence.py b/analysis/bin_compute_confidence.py index 735be45..c4191c5 100755 --- a/analysis/bin_compute_confidence.py +++ b/analysis/bin_compute_confidence.py @@ -13,85 +13,172 @@ import numpy as np import os from datetime import datetime +import matplotlib.pyplot as plt -# Choose subject -subject = 'w' # one of 'w', 'je', 'jo' +# make sure text is saved in svgs as text, not path +plt.rcParams['svg.fonttype'] = 'none' +plt.rcParams['font.family'] = 'sans-serif' +plt.rcParams['font.sans-serif'] = ['Helvetica'] +plt.rcParams['font.serif'] = ['Times'] +plt.rcParams['lines.linewidth'] = .75 * .4 -# Choose task -tasks = ['Probe_4AFC', 'Train_2AFC_idtrials', 'Train_4AFC', 'Train_2AFC'] # options -task = tasks[1] # which task to bin data for +# Choose task +probe_task = False +nafc = [4] # Set directories data_dir = 'data/learning_data' out_dir = 'results/learning' -# Load Data -subject_data_path = os.path.join(data_dir, subject + '_' + task + '.csv') -subject_data = pd.read_csv(subject_data_path) - -# Get year of each trial for plotting later -try: - subject_data['year'] = [datetime.fromtimestamp(x/1000).strftime("%Y") for x in subject_data['timestamp']] -except: - subject_data['year'] = '2016' # timestamp not currently in Train 2AFC id trial csvs, but all trials were in 2016 -# Split into color and shape trials on the basis of the choice -# E.g, Probe_4AFC choose_shape means cued color, chose shape, but for Train_4AFC it means cued colored shape, chose shape -choose_shape_trials = subject_data[subject_data['is_choice_color']==0].reset_index(drop=True) -choose_color_trials = subject_data[subject_data['is_choice_color']==1].reset_index(drop=True) - -# Bin trials -# Define how many bins to use -if task == 'Train_2AFC_idtrials': - n_in_bin = 50 # smaller than probe because many fewer trials -elif task == 'Train_2AFC': - n_in_bin = 75 # smaller than probe because many fewer trials +if probe_task: + bin_size = [1000] + task_names = ['_Probe_4AFC'] + plot_size = (3.5, 2) + name = 'probe' else: - n_in_bin = 500 + if len(nafc)==2: + bin_size = [200,1000] + task_names = ['_Train_2AFC', '_Train_4AFC'] + plot_size = (2.5,2) #(3.5, 2) + name = 'train' + elif len(nafc) == 1 and nafc[0] == 4: + bin_size = [1000] + task_names = ['_Train_4AFC'] + plot_size = (2.4,2)#(2.7 , 2) + name = 'train4afc' + else: + bin_size = [200] + task_names = ['_Train_2AFC'] + plot_size = (.7,2)#(.8, 2) + name = 'train2afc' + +x = [] +y = [] +y_ci = [] +year_ticks = [] +for subject in ['w', 'je']: + subj_x = [] + subj_y = [] + subj_y_ci = [] + subj_year_ticks = [] + subj_years = [] + for j, t in enumerate(task_names): + n_in_bin = bin_size[j] + subject_data = pd.read_csv(os.path.join(data_dir, subject + t + '.csv')) + # Get year of each trial for plotting later + + + # Bin data and get nested list containing the outcome values (0 or 1) of all trials in that bin + binned_trials = [subject_data['chose_correct'][i:i+n_in_bin] for i in range(0, len(subject_data), n_in_bin)] + ### + # counting sewssions in bins + sessions_ints = [int(x) for x in subject_data['days_from_20160101']] + sessions_binned = [sessions_ints[i:i+n_in_bin] for i in range(0, len(subject_data), n_in_bin)] + id_bin = [] + bin_how_many = [] + for bb, b in enumerate(sessions_binned): + which_sessions = np.unique(b) + bin_how_many.append(which_sessions.shape[0]) + id_bin.append(bb) + + to_bev = pd.DataFrame({'bin num':id_bin, 'num sessions':bin_how_many}) + # to_bev.to_csv('/home/ssbeast/Projects/HFTemp/ColorShapeContingency1/analysis/'+subject+'_sessions_bins.csv', index=False) + ### + # For each bin, bootstrap the accuracy 1000 times + n_boots = 1000 + n_bins = len(binned_trials) + shape_color = np.zeros((3, n_bins)) # + + for l, b in enumerate(binned_trials): + boot_accs = [] + for i in range(n_boots): + sample = np.random.choice(b, size=n_in_bin) # array of 0s and 1s, resample to bin size + boot_accs.append(sample.mean()) # calculate accuracy for that sample of trials + boot_mean_acc = np.array(boot_accs).mean() # accuracy at trial l + boot_lcb = np.quantile(boot_accs, q=.025) # lower confidence bound of acccuracy at trial l + boot_ucb = np.quantile(boot_accs, q=.975) # upper confidence bound of accuracy at trial l + shape_color[0,l] = boot_mean_acc + shape_color[1,l] = boot_lcb + shape_color[2,l] = boot_ucb + + # To preserve trial number as x axis, get trial number each bin would be centered on + x_vals = len(subject_data) + use_x = list(range(int(n_in_bin/2),x_vals+int(n_in_bin/2), n_in_bin)) + use_x = use_x[:n_bins] + + try: + subject_data['year'] = [datetime.fromtimestamp(x/1000).strftime("%Y") for x in subject_data['timestamp']] + except: + subject_data['year'] = '2016' # timestamp not currently in Train 2AFC id trial csvs, but all trials were in 2016 + # For each bin, approximate which year most trials in that bin were completed in, for plotting later + + if t == '_Train_2AFC': + year_bins, include_years = [0], ['2016'] + else: + bin_year = [subject_data['year'][i:i+n_in_bin] for i in range(0, len(subject_data), n_in_bin)] + year_labels = [x.mode()[0] for x in bin_year] + if len(year_labels) > n_bins: # if more years than bins, drop last, years are aligned to the first bin + year_labels = year_labels[:n_bins] + # Only want to plot a year mark at the start of each year + year_changes = [i for i in range(len(year_labels)) if year_labels[i] != year_labels[i-1]] # which bins are year transitions + include_years = [year_labels[x] for x in year_changes] # keep those years + year_bins = [use_x[i] for i in year_changes] # get corresponding trial number (bin) values + if j == 1: + use_x = [x + subj_x[0][-1] for x in use_x] + year_bins = [x + subj_x[0][-1] for x in year_bins] + if include_years[0] == '2016': + include_years = include_years[1:] + year_bins = year_bins[1:] + subj_x.append(use_x) + subj_y.append(shape_color[0]) + subj_y_ci.append(np.array([shape_color[1],shape_color[2]]).T) + subj_year_ticks.extend(year_bins) + subj_years.extend(include_years) + x.append(subj_x) + y.append(subj_y) + y_ci.append(subj_y_ci) + year_ticks.append([subj_year_ticks, subj_years]) + +bin_data = [x,y] +ci_data = [x, y_ci] -# Bin data and get nested list containing the outcome values (0 or 1) of all trials in that bin -binned_choose_shape = [choose_shape_trials['chose_correct'][i:i+n_in_bin] for i in range(0, len(choose_shape_trials), n_in_bin)] -binned_choose_color = [choose_color_trials['chose_correct'][i:i+n_in_bin] for i in range(0, len(choose_color_trials), n_in_bin)] +colors = ["black", "tab:gray"]#["#D95319", "tab:gray"] +open_c = False -# For each bin, approximate which year most trials in that bin were completed in, for plotting later -bin_year = [choose_shape_trials['year'][i:i+n_in_bin] for i in range(0, len(choose_shape_trials), n_in_bin)] -bin_year_mode = [x.mode()[0] for x in bin_year] +fig, axs = plt.subplots(figsize = plot_size) +for i in range(len(bin_data)): + for j in range(len(bin_data[0][i])): + if open_c: + axs.scatter(bin_data[0][i][j], bin_data[1][i][j], facecolor='none', edgecolor=colors[i], s=6, linewidth=.2, rasterized=False) + else: + axs.scatter(bin_data[0][i][j], bin_data[1][i][j], facecolor=colors[i], edgecolor=colors[i], s=6, linewidth=.5, rasterized=False) + if ci_data is not None: + axs.fill_between(ci_data[0][i][j], np.array(ci_data[1][i][j]).T[0], np.array(ci_data[1][i][j]).T[1], alpha=.2, color=colors[i], rasterized=False) # rasterize CIs else get svg rendering issues + yr_ax = axs.secondary_xaxis(location=0) + yr_ax.set_xticks(year_ticks[i][0],year_ticks[i][1],fontsize=7, color=colors[i], rotation=90) +if np.max(bin_data[0][0][-1]) > 20000: + xtick = list(range(0, int(np.max(bin_data[0][0][-1])), 20000)) +elif np.max(bin_data[0][0][-1]) < 15000: + xtick = [0, 10000] +else: + xtick = list(range(0, int(np.max(bin_data[0][0][-1])), 1000)) +axs.margins(.05) -# Deal with last bins - may have few trials and one trial type may have one more bin than another -n_bins = np.min([len(binned_choose_shape), len(binned_choose_color)]) # min n bins shared by both trial types -if binned_choose_shape[n_bins-1].shape[0] < 10 or binned_choose_color[n_bins-1].shape[0] < 10: - n_bins = n_bins-1 # if either final bin has very few trials, don't include in plot +if len(nafc)==2: + stop2 = np.max([np.max(bin_data[0][0][0]), np.max(bin_data[0][1][0])]) + start4 = np.min([np.min(bin_data[0][0][1]), np.min(bin_data[0][1][1])]) + stop4 = np.max([np.max(bin_data[0][0][1]), np.max(bin_data[0][1][1])]) -# For each bin, bootstrap the accuracy 1000 times -n_boots = 1000 -shape_color = np.zeros((2, 3, n_bins)) # -# For each trial type -for t, trial_type in enumerate([binned_choose_shape, binned_choose_color]): - trial_type_binned = trial_type[:n_bins] - # For each bin - for l, b in enumerate(trial_type_binned): - boot_accs = [] - for i in range(n_boots): - sample = np.random.choice(b, size=n_in_bin) # array of 0s and 1s, resample to bin size - boot_accs.append(sample.mean()) # calculate accuracy for that sample of trials - boot_mean_acc = np.array(boot_accs).mean() # accuracy at trial l - boot_lcb = np.quantile(boot_accs, q=.025) # lower confidence bound of acccuracy at trial l - boot_ucb = np.quantile(boot_accs, q=.975) # upper confidence bound of accuracy at trial l - shape_color[t,0,l] = boot_mean_acc - shape_color[t,1,l] = boot_lcb - shape_color[t,2,l] = boot_ucb +axs.tick_params(axis="both", length=2., pad=1) +axs.tick_params(axis='x', pad=6) -# To preserve trial number as x axis, get trial number each bin would be centered on -x_vals = np.min([choose_shape_trials.shape[0],choose_color_trials.shape[0]]) -use_x = list(range(int(n_in_bin/2),x_vals+int(n_in_bin/2), n_in_bin)) -use_x = use_x[:n_bins] +ytick = [.25,.5,1.] +axs.set_yticks(ytick, labels=[str(yt) for yt in ytick], fontsize=7) +axs.set_xticks(xtick) +axs.set_xticklabels([int(xt/10000) for xt in xtick],fontsize=7) -# Save out all accuracies, confidence intervals, x ticks, and years -shape_accs = shape_color[0, 0, :] -color_accs = shape_color[1, 0, :] -shape_ci = shape_color[0, 1:,:].T -color_ci = shape_color[1, 1:,:].T -array_out_name = subject + '_'+task+'_learning_curve_data_'+str(n_boots)+'_all_binned.npz' -out_path = os.path.join(out_dir, array_out_name) -np.savez(out_path, color_x=use_x,color_accs=color_accs, - shape_x=use_x,shape_accs=shape_accs,color_i=use_x, - color_ci=color_ci,shape_i=use_x,shape_ci=shape_ci, bin_year = bin_year_mode) +fig.tight_layout() +#fig.savefig(os.path.join(out_dir, name + '_learning.svg')) +plt.show() +plt.close() + diff --git a/analysis/color_biased_regions.py b/analysis/color_biased_regions.py index f5f9fde..4a1cace 100755 --- a/analysis/color_biased_regions.py +++ b/analysis/color_biased_regions.py @@ -10,12 +10,12 @@ from bin import passive_task_functions as pf # Choose subject -subject = 'jeeves' # one of 'wooster', 'jeeves' -content_root = 'data' # where are the data stored +subject = 'wooster' # one of 'wooster', 'jeeves' +content_root = 'data' # where are the data stored change this subj_root = os.path.join(content_root, 'subjects', subject) # where is that subject's data # Set out directory -outdir = 'results/passive' +outdir = 'results/passive' change this # LOAD DATA KEYS pointing to nifti beta weight images for each condition on each run # for passive task 1 and for eccentricity @@ -103,7 +103,14 @@ scp_betas = pf.load_betas(scp_beta_coeffs_key, conditions_to_quant=['uncolored_shape', 'achromatic_shape', 'constant'], content_root=content_root, subj_root=subj_root) - +scp_arrays = {} +for i in range(len(scp_betas)): + namekey = scp_betas['run'][i] + '_' + scp_betas['condition'][i] + a=scp_betas['betas'][i] + scp_arrays[namekey] = a +scpout = os.path.join('/mnt/isilon/PROJECTS/ColorShapeContingency1/data_files/univariate_data/save_out_betas', subject+'_scp_betas.npz') +np.savez(scpout, **scp_arrays) + # Path to subject's masked funcitonal target; used for getting brain mask ft_path = os.path.join(subj_root, 'mri', 'functional_target.nii.gz') @@ -143,4 +150,4 @@ # Save file out color_minus_noncolor_out = os.path.join(outdir, subject+'_color_assoc_bias_colorbiased_minus_noncolorbiased.csv') -color_minus_noncolor.to_csv(color_minus_noncolor_out, index=False) \ No newline at end of file +#color_minus_noncolor.to_csv(color_minus_noncolor_out, index=False) \ No newline at end of file diff --git a/analysis/compute_selectivity.py b/analysis/compute_selectivity.py index 9063c58..27e8e50 100755 --- a/analysis/compute_selectivity.py +++ b/analysis/compute_selectivity.py @@ -12,9 +12,9 @@ import pandas as pd import nibabel as nib from bin import passive_task_functions as pf - + # Choose subject -subject = 'wooster' # one of 'wooster', 'jeeves' +subject = 'jeeves' # one of 'wooster', 'jeeves' content_root = 'data' # where are the data stored subj_root = os.path.join(content_root, 'subjects', subject) # where is that subject's data @@ -36,8 +36,6 @@ ecc_mod_dir = os.path.join(subj_root, 'analysis', 'ecc') ecc_beta_coeffs_key = pd.read_csv(os.path.join(subj_root, 'analysis', 'eccentricity_mapper_beta_coeffs_key.csv')) - - # GENERATE ROI DEFINITIONS # Load subject's atlas parcels atlas_path = os.path.join(subj_root, 'rois', 'major_divisions', 'final_atlas.nii.gz') @@ -122,6 +120,7 @@ congruency_betas = pf.load_betas(congruency_beta_coeffs_key, conditions_to_quant=['incongruent', 'congruent'], content_root=content_root, subj_root=subj_root) + incongruency_selectivity = pf.selectivity(congruency_betas,'incongruent', 'congruent', subdiv_rois_congruency) incongruency_selectivity = pd.DataFrame(incongruency_selectivity, columns = ['run', 'roi', 'effect']) incongruency_selectivity['comparison'] = 'incongruent_vs_congruent' diff --git a/analysis/learning_performance.py b/analysis/learning_performance.py index beb250a..e4f999d 100755 --- a/analysis/learning_performance.py +++ b/analysis/learning_performance.py @@ -5,9 +5,10 @@ import pandas as pd import numpy as np import os +import math # Set directories -data_dir = 'data/learning_data' # trial learning data csvs +data_dir = 'data/learning_data' out_dir = 'results/learning' ####################### @@ -41,77 +42,119 @@ # Grab last n trials shape = choose_shape_trials['chose_correct'].to_numpy()[len(choose_shape_trials)-last_n_trials:] color = choose_color_trials['chose_correct'].to_numpy()[len(choose_color_trials)-last_n_trials:] + all_trials = subject_data['chose_correct'].to_numpy()[len(subject_data)-last_n_trials:] # Bootstrap accuracy in last 1000 trials + all_samples = [] shape_samples = [] color_samples = [] for t in range(1000): + all_samp = np.random.choice(all_trials, last_n_trials, replace=True) shape_samp = np.random.choice(shape, last_n_trials, replace=True) color_samp = np.random.choice(color, last_n_trials, replace=True) + all_samples.append(all_samp.mean()) shape_samples.append(shape_samp.mean()) # get accuracy within sample color_samples.append(color_samp.mean()) + all_trials_mean = np.mean(all_samples) + all_trials_lcb, all_trials_ucb = np.quantile(all_samples, .025), np.quantile(all_samples, .975) # confidence bounds color_mean = np.mean(color_samples) # chose color accuracy color_lcb, color_ucb = np.quantile(color_samples, .025), np.quantile(color_samples, .975) # confidence bounds shape_mean = np.mean(shape_samples) # chose shape accuracy shape_lcb, shape_ucb = np.quantile(shape_samples, .025), np.quantile(shape_samples, .975) # confidence bounds - plateau_perform.append([subject, color_mean, color_lcb, color_ucb, shape_mean, shape_lcb, shape_ucb]) + plateau_perform.append([subject, color_mean, color_lcb, color_ucb, shape_mean, shape_lcb, shape_ucb, all_trials_mean,all_trials_lcb, all_trials_ucb]) if task == 'Probe_4AFC': - title_cols = ['subject','shape_to_color_mean', 'shape_to_color_lower_cb', 'shape_to_color_upper_cb', 'color_to_shape_mean', 'color_to_shape_lower_cb', 'color_to_shape_upper_cb'] + title_cols = ['subject','shape_to_color_mean', 'shape_to_color_lower_cb', 'shape_to_color_upper_cb', 'color_to_shape_mean', 'color_to_shape_lower_cb', 'color_to_shape_upper_cb','all_trials_mean', 'all_trials_lower_cb', + 'all_trials_upper_cb'] elif task == 'Train_4AFC': - title_cols = ['subject','coloredshape_to_color_mean', 'coloredshape_to_color_lower_cb', 'coloredshape_to_color_upper_cb', 'coloredshape_to_shape_mean', 'coloredshape_to_shape_lower_cb', 'coloredshape_to_shape_upper_cb'] + title_cols = ['subject','coloredshape_to_color_mean', 'coloredshape_to_color_lower_cb', 'coloredshape_to_color_upper_cb', 'coloredshape_to_shape_mean', 'coloredshape_to_shape_lower_cb', 'coloredshape_to_shape_upper_cb','all_trials_mean', 'all_trials_lower_cb', + 'all_trials_upper_cb'] else: - title_cols = ['subject','color_to_color_mean', 'color_to_color_lower_cb', 'color_to_color_upper_cb', 'shape_to_shape_mean', 'shape_to_shape_lower_cb', 'shape_to_shape_upper_cb'] + title_cols = ['subject','color_to_color_mean', 'color_to_color_lower_cb', 'color_to_color_upper_cb', 'shape_to_shape_mean', 'shape_to_shape_lower_cb', 'shape_to_shape_upper_cb','all_trials_mean', 'all_trials_lower_cb', + 'all_trials_upper_cb'] plateau_df = pd.DataFrame(plateau_perform, columns=title_cols) df_out = os.path.join(out_dir, task + 'plateau_performance_last_'+str(last_n_trials)+'_trials.csv') - plateau_df.to_csv(df_out, index=False) + #plateau_df.to_csv(df_out, index=False) ####################### # INITIAL PERFORMANCE # ####################### +def binomial_zscore(p_correct, n_trials, p_chance): + n_correct = p_correct*n_trials + mu = p_chance*n_trials + sigma = math.sqrt(mu*(1-p_chance)) + z = (n_correct-mu)/sigma + return z # Check if performance on first long-term memory 2AFC trials was above chance # Load Data -task = 'Probe_2AFC' +tasks = ['Probe_2AFC', 'Probe_4AFC'] n_boots = 1000 # number of bootstrap iterations -initial_perform = [] -for subject in ['w', 'je']: - subject_data_path = os.path.join(data_dir, subject + '_' + task + '.csv') - subject_data = pd.read_csv(subject_data_path) - - # Keep only sessions before short-term memory, matching trials began - subject_data = subject_data[subject_data['days_from_20160101']<27].reset_index(drop=True) - - # Split into color and shape trials - choose_shape_trials = subject_data[subject_data['is_choice_color']==0].reset_index(drop=True) # chose shape - choose_color_trials = subject_data[subject_data['is_choice_color']==1].reset_index(drop=True) # chose color - - # Get array of trial outcomes - shape = choose_shape_trials['chose_correct'].to_numpy() - color = choose_color_trials['chose_correct'].to_numpy() - # Get number of trials - shape_n = shape.shape[0] - color_n = color.shape[0] - - # Bootstrap accuracy and estimate 95% confidence interval - shape_samples = [] - color_samples = [] - for t in range(n_boots): - shape_samp = np.random.choice(shape, shape_n, replace=True) - color_samp = np.random.choice(color, color_n, replace=True) - shape_samples.append(shape_samp.mean()) # get accuracy within sample - color_samples.append(color_samp.mean()) - color_mean = np.mean(color_samples) # shape-to-color accuracy - color_lcb, color_ucb = np.quantile(color_samples, .025), np.quantile(color_samples, .975) # confidence bounds - shape_mean = np.mean(shape_samples) # color-to-shape trial accuracy - shape_lcb, shape_ucb = np.quantile(shape_samples, .025), np.quantile(shape_samples, .975) # confidence bounds - initial_perform.append([subject, color_mean, color_lcb, color_ucb, color_n, shape_mean, shape_lcb, shape_ucb, shape_n]) +for task in tasks: + initial_perform = [] + if task == 'Probe_2AFC': + cutoff = 27 + p_chance = .5 + else: + cutoff = 355 + p_chance = .25 + for subject in ['w', 'je']: + subject_data_path = os.path.join(data_dir, subject + '_' + task + '.csv') + subject_data = pd.read_csv(subject_data_path) + + # Keep only sessions before short-term memory, matching trials began + subject_data = subject_data[subject_data['days_from_20160101']