diff --git a/.github/actions/setup-conda-env/action.yml b/.github/actions/setup-conda-env/action.yml new file mode 100644 index 000000000..4867fd1b0 --- /dev/null +++ b/.github/actions/setup-conda-env/action.yml @@ -0,0 +1,32 @@ +name: Set up conda env +description: > + Install Miniconda and create/activate an EEG-ExPy conda environment from + the given env yml. Shared by the Test and Typecheck jobs so the two + don't drift apart. Environment name is not set in the yml files so local + installs can use any name they like. + +inputs: + environment-file: + required: true + description: Path to the conda environment yml file to install from. + activate-environment: + required: true + description: Name to give the created environment. + python-version: + required: false + description: > + Python version to pin (e.g. '3.8'). Overrides the version conda would + otherwise resolve from the environment file's constraints. When omitted, + conda resolves freely within the environment file's range. + +runs: + using: composite + steps: + - uses: conda-incubator/setup-miniconda@v3 + with: + environment-file: ${{ inputs.environment-file }} + activate-environment: ${{ inputs.activate-environment }} + python-version: ${{ inputs.python-version }} + auto-activate-base: false + channels: conda-forge + miniconda-version: "latest" diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 5f8953695..090a8b1f8 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -9,29 +9,20 @@ on: jobs: build: runs-on: ubuntu-22.04 + defaults: + run: + shell: bash -el {0} steps: - name: Checkout repo uses: actions/checkout@v3 with: - fetch-depth: 0 - - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: 3.8 - - - name: Install dependencies - run: | - make install-deps-apt - python -m pip install --upgrade pip wheel - python -m pip install attrdict - - make install-deps-wxpython - - - name: Build project - run: | - make install-docs-build-dependencies + fetch-depth: 0 + - name: Set up conda env + uses: ./.github/actions/setup-conda-env + with: + environment-file: environments/eeg-expy-docsbuild.yml + activate-environment: eeg-expy-docsbuild - name: Get list of changed files id: changes @@ -40,7 +31,6 @@ jobs: git diff --name-only origin/master...HEAD > changed_files.txt cat changed_files.txt - - name: Determine build mode id: mode run: | @@ -48,13 +38,13 @@ jobs: echo "FULL_BUILD=true" >> $GITHUB_ENV echo "Detected non-example file change. Full build triggered." else - CHANGED_EXAMPLES=$(grep '^examples/.*\.py$' changed_files.txt | paste -sd '|' -) + # || true prevents grep's exit code 1 (no matches) from aborting the step + CHANGED_EXAMPLES=$(grep '^examples/.*\.py$' changed_files.txt | paste -sd '|' - || true) echo "FULL_BUILD=false" >> $GITHUB_ENV echo "CHANGED_EXAMPLES=$CHANGED_EXAMPLES" >> $GITHUB_ENV echo "Changed examples: $CHANGED_EXAMPLES" fi - - name: Cache built documentation id: cache-docs uses: actions/cache@v4 @@ -65,15 +55,12 @@ jobs: restore-keys: | ${{ runner.os }}-sphinx- - - name: Build docs - run: | - make docs + run: make docs - - name: Deploy Docs uses: peaceiris/actions-gh-pages@v3 - if: github.ref == 'refs/heads/master' # TODO: Deploy seperate develop-version of docs? + if: github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/heads/dev/') with: github_token: ${{ secrets.GITHUB_TOKEN }} publish_dir: doc/_build/html diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 5b4f3c6aa..570a8bc54 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -29,15 +29,12 @@ jobs: if: "startsWith(runner.os, 'Linux')" run: | make install-deps-apt - - name: Install conda - uses: conda-incubator/setup-miniconda@v3 + - name: Set up conda env + uses: ./.github/actions/setup-conda-env with: environment-file: environments/eeg-expy-full.yml - auto-activate-base: false - python-version: ${{ matrix.python_version }} activate-environment: eeg-expy-full - channels: conda-forge - miniconda-version: "latest" + python-version: ${{ matrix.python_version }} - name: Fix PsychXR numpy dependency DLL issues (Windows only) if: matrix.os == 'windows-latest' @@ -75,15 +72,12 @@ jobs: steps: - uses: actions/checkout@v2 - - name: Install conda - uses: conda-incubator/setup-miniconda@v3 + - name: Set up conda env + uses: ./.github/actions/setup-conda-env with: environment-file: environments/eeg-expy-full.yml - auto-activate-base: false - python-version: ${{ matrix.python_version }} activate-environment: eeg-expy-full - channels: conda-forge - miniconda-version: "latest" + python-version: ${{ matrix.python_version }} - name: Typecheck run: | make typecheck diff --git a/.gitignore b/.gitignore index 8ce78c6c7..2b5152507 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,8 @@ __pycache__ # Built as part of docs doc/auto_examples doc/_build +doc/generated/ +doc/sg_execution_times.rst # Built by auto_examples examples/visual_cueing/*.csv @@ -18,4 +20,4 @@ htmlcov # PyCharm .idea/ -**/.DS_Store \ No newline at end of file +**/.DS_Store diff --git a/doc/conf.py b/doc/conf.py index c449ea6fc..4982fc9ff 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -255,9 +255,9 @@ def setup(app): # Configurations for sphinx gallery -sphinx_gallery_conf = {'filename_pattern': '(?=.*r__)(?=.*.py)', - 'examples_dirs': ['../examples','../examples/visual_n170', '../examples/visual_p300','../examples/visual_ssvep', '../examples/visual_cueing', '../examples/visual_gonogo'], - 'gallery_dirs': ['auto_examples','auto_examples/visual_n170', 'auto_examples/visual_p300','auto_examples/visual_ssvep', 'auto_examples/visual_cueing', 'auto_examples/visual_gonogo'], +sphinx_gallery_conf = {'filename_pattern': '(?=.*r__)(?=.*.py)', + 'examples_dirs': ['../examples/visual_n170', '../examples/visual_p300','../examples/visual_ssvep', '../examples/visual_cueing', '../examples/visual_gonogo'], + 'gallery_dirs': ['auto_examples/visual_n170', 'auto_examples/visual_p300','auto_examples/visual_ssvep', 'auto_examples/visual_cueing', 'auto_examples/visual_gonogo'], 'within_subsection_order': FileNameSortKey, 'default_thumb_file': 'img/eeg-notebooks_logo.png', 'backreferences_dir': 'generated', # Where to drop linking files between examples & API diff --git a/doc/experiments/vprvep.rst b/doc/experiments/vprvep.rst new file mode 100644 index 000000000..36ac6b1ac --- /dev/null +++ b/doc/experiments/vprvep.rst @@ -0,0 +1,206 @@ +******************************** +_ +********************************* + +Visual Pattern Reversal VEP +=========================== + +The Pattern Reversal VEP (PR-VEP) is the most widely studied visual +evoked potential paradigm. A checkerboard pattern swaps its black and +white squares at a regular rate (typically 2 reversals per second) while +the participant fixates a central dot. Each reversal elicits a +stereotyped waveform whose most prominent feature is the **P100**, a +positive deflection occurring ~100ms after the reversal at midline +occipital electrodes. The other components are a small N75 before it +and an N145 after it. + +In this notebook, we will attempt to detect the P100 with the OpenBCI +Cyton, with the most critical electrode at Oz, followed by O1 and O2, +then POz. Fp1 and Fp2 are optional channels for detecting eye movement +artefacts. We use monocular pattern reversal blocks and run the analysis +pipeline to pull out the per-eye P100 latency and the interocular +latency difference. + + +**PR-VEP Experiment Notebook Examples:** + +.. include:: ../auto_examples/visual_vep/index.rst + + +Running the Experiment +---------------------- + +.. code-block:: python + + from eegnb.devices.eeg import EEG + from eegnb.experiments.visual_vep import VisualPatternReversalVEP + + eeg = EEG(device='cyton') + experiment = VisualPatternReversalVEP( + display_refresh_rate=120, # must match display and be divisible by 2; higher rates give better latency precision + eeg=eeg, + save_fn='my_vep_recording.csv', + use_vr=True, # False for monitor mode + ) + experiment.run() + + +Participant Preparation +----------------------- + +The PR-VEP is sensitive to the optical quality of the retinal image. +Participants who normally wear glasses or contact lenses **must** wear +their corrective lenses during the test. Uncorrected refractive error +blurs the checkerboard's high spatial frequency edges, which attenuates +the P100 amplitude and can increase its latency — mimicking a genuine +neural conduction delay. This is especially important when comparing +latencies between eyes or across sessions. + +ISCEV guidelines require that visual acuity be documented for each +recording session. If a participant's corrected acuity is worse than +6/9 (20/30), note it alongside the data so that downstream analysis can +account for it. + + +Stimulus Parameters +------------------- + +Parameters follow the ISCEV "large check" option [Odom2016]_: + +- **Check size**: 1° of visual angle (0.5 cpd) +- **Reversal rate**: 2 reversals per second (one reversal per two display frames) +- **Field size**: 16° (monitor) / 20° (VR) +- **Contrast**: High contrast black/white, mean luminance held constant +- **Fixation**: Central red dot +- **Recording**: Monocular, alternating left and right eye per block + +Eight blocks of 50 seconds by default, giving ~100 reversals per eye per +block (400 per eye total). + +The experiment requires a display refresh rate that is divisible by two, +since each reversal occupies exactly two frames. Any such refresh rate is +supported — 60 Hz, 90 Hz, 120 Hz, 144 Hz, etc. A higher refresh rate +reduces the temporal jitter between the true reversal onset and the +nearest frame boundary, which directly translates to more precise P100 +latency estimates. For example, at 60 Hz each frame is ~16.7 ms wide, +whereas at 120 Hz it is ~8.3 ms — halving the worst-case timing error. +VR headsets running at 90 Hz or above are therefore preferred over a +standard 60 Hz monitor when absolute latency precision matters. + + +Monitor vs VR +------------- + +The experiment supports both standard monitor presentation and Meta +Quest (VR) presentation via ``use_vr=True``. + +**VR mode is preferred** for two reasons: + +- Each eye sees the checkerboard independently, so there is no manual + eye closure and no light leakage. +- The OpenXR compositor supplies a per-frame predicted photon time + (``tracking_state.headPose.time``), which is attached to the EEG + marker in place of ``time.time()``. This cancels most of the + output-side display latency — render queue, compositor buffering, + scan-out, HMD persistence — on a per-frame basis, which matters for + P100 latency where even small shifts are clinically meaningful. + +In monitor mode the software marker is the only timing source, so any +fixed display-pipeline latency has to be handled separately (see below). +A proof-of-concept photodiode sync patch is drawn in the bottom-left +corner of the window in monitor mode — a 50px square whose polarity +flips with each reversal. Taping a photodiode over that square and +routing its TTL into a spare channel would give hardware timing ground +truth; the code is in place but the hardware path is a work in progress — +instructions for wiring a photodiode to a Cyton digital input pin will +be added in a future update. + + +Electrode Placement +------------------- + +The P100 is generated in occipital cortex. Priority electrode placement +for the OpenBCI Cyton is: + +1. **Oz** — the primary electrode; highest amplitude P100 +2. **O1, O2** — lateral occipital; provide left/right asymmetry information +3. **POz** — parieto-occipital midline; useful fallback or supplement +4. **Fp1, Fp2** — optional; placed on the forehead to record eye movement + artefacts (EOG) for rejection during analysis + + +Latency Resolution +------------------ + +The precision of a P100 latency estimate depends on three factors: + +1. **Display refresh rate** — determines the worst-case stimulus timing + jitter (see *Stimulus Parameters* above). At 120 Hz this is ~4.2 ms + per frame. + +2. **EEG sampling rate** — the Cyton samples at 250 Hz, giving 4 ms + between samples. Without interpolation, the peak latency is locked to + the nearest sample and cannot resolve shifts smaller than 4 ms. + +3. **Number of trials** — averaging more reversals reduces noise in the + ERP waveform, tightening the confidence interval around the peak + estimate. The default is 8 blocks of 100 reversals (400 per eye). + +To achieve sub-sample precision the analysis pipeline uses **parabolic +interpolation**: a parabola is fitted through the peak sample and its +two neighbours, and the vertex of the fit is taken as the true peak +location. At 250 Hz this brings effective resolution to ~0.5 ms — well +below the sample interval. The interpolated peak finder is used by +default in ``vep_utils.plot_vep()``. + +For studies that require detecting latency shifts of 1–2 ms (e.g. +within-subject longitudinal comparisons), the combination of 120 Hz +display, parabolic interpolation, and the default 8-block design is +recommended. + + +Longitudinal Tracking +--------------------- + +To monitor P100 latency over time — for example during nerve recovery or +neuroplasticity studies — record multiple sessions using the same subject +and session numbering scheme and compare the per-eye P100 across them. + +Before attributing a latency change to an intervention, establish a +**baseline**: record at least 3–5 sessions over 1–2 weeks under the same +conditions. This gives you the natural session-to-session variability for +your setup and participant, so you can distinguish a real shift from +measurement noise. + +The ``02r__pattern_reversal_longitudinal.py`` example notebook +demonstrates the full workflow: discovering sessions, extracting per-eye +P100 latencies with parabolic interpolation, printing a summary table, +and plotting latency trends and interocular differences over time. + + +Timing Notes +------------ + +Measured P100 latency is the true P100 latency plus the display-pipeline +delay, plus the EEG device's input delay, plus any clock-alignment +error. For the Cyton the USB-serial latency is typically ~30–40ms, so +if you need *absolute* latencies you need to characterise and subtract +it; for *relative* comparisons (between-eye, within-subject across +sessions) it cancels out and you can ignore it. + +Two sidecar files are written alongside each recording to let you check +timing after the fact: + +- ``{save_fn}_timing.csv`` — per-trial software and compositor + timestamps and their delta +- ``{save_fn}_frame_stats.json`` — per-frame intervals and dropped-frame + count (150%-of-refresh threshold) + + +References +---------- + +.. [Odom2016] Odom JV, Bach M, Brigell M, Holder GE, McCulloch DL, Mizota A, + Tormene AP; International Society for Clinical Electrophysiology of Vision. + **ISCEV standard for clinical visual evoked potentials: (2016 update).** + *Documenta Ophthalmologica* 133(1):1-9. doi:10.1007/s10633-016-9553-y diff --git a/doc/index.rst b/doc/index.rst index 278093db3..ccdd6f85c 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -19,6 +19,7 @@ experiments/vn170 experiments/vp300 experiments/vssvep + experiments/vprvep experiments/cueing experiments/gonogo experiments/all_examples diff --git a/eegnb/analysis/__init__.py b/eegnb/analysis/__init__.py index e69de29bb..2fde929d0 100644 --- a/eegnb/analysis/__init__.py +++ b/eegnb/analysis/__init__.py @@ -0,0 +1 @@ +from eegnb.analysis import vep_utils # noqa: F401 diff --git a/eegnb/analysis/utils.py b/eegnb/analysis/utils.py index d9450981d..12690222f 100644 --- a/eegnb/analysis/utils.py +++ b/eegnb/analysis/utils.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import copy from copy import deepcopy import math @@ -5,7 +7,10 @@ import sys from collections import OrderedDict from glob import glob -from typing import Union, List +from typing import TYPE_CHECKING, Union, List + +if TYPE_CHECKING: + from eegnb.devices.eeg import EEG from time import sleep, time import os @@ -23,9 +28,11 @@ from scipy.signal import lfilter, lfilter_zi from eegnb import _get_recording_dir -from eegnb.devices.eeg import EEG from eegnb.devices.utils import EEG_INDICES, SAMPLE_FREQS -from pynput import keyboard +try: + from pynput import keyboard +except ImportError: + keyboard = None # this should probably not be done here sns.set_context("talk") diff --git a/eegnb/analysis/vep_utils.py b/eegnb/analysis/vep_utils.py new file mode 100644 index 000000000..2dc772e83 --- /dev/null +++ b/eegnb/analysis/vep_utils.py @@ -0,0 +1,96 @@ +import numpy as np +from mne import Evoked + + +def print_latency(peak_name, peak_latency, peak_channel, uv): + peak_latency = round(peak_latency * 1e3, 2) # convert to milliseconds + uv = round(uv * 1e6, 2) # convert to µV + print('{} Peak of {} µV at {} ms in peak_channel {}'.format(peak_name, uv, peak_latency, peak_channel)) + + +def get_peak(erp_name, evoked_potential, peak_time_min, peak_time_max, mode): + """Find peak latency with sub-sample precision using parabolic interpolation. + + MNE's get_peak returns the sample with the largest value, limiting + resolution to the sample interval (4 ms at 250 Hz). A parabolic fit + through the peak sample and its two neighbours recovers the true peak + location between samples, giving ~0.5 ms precision at 250 Hz. + """ + # Step 1: find the sample-level peak via MNE + peak_channel, sample_latency, _ = evoked_potential.get_peak( + tmin=peak_time_min, tmax=peak_time_max, + mode=mode, return_amplitude=True) + + # Step 2: parabolic interpolation around the peak sample + ch_idx = evoked_potential.ch_names.index(peak_channel) + times = evoked_potential.times + data = evoked_potential.data[ch_idx] + + peak_sample = np.argmin(np.abs(times - sample_latency)) + + # Need at least one sample on each side for the fit + if 0 < peak_sample < len(times) - 1: + y_prev = data[peak_sample - 1] + y_peak = data[peak_sample] + y_next = data[peak_sample + 1] + + # Parabolic interpolation: offset from centre sample + denom = y_prev - 2 * y_peak + y_next + if abs(denom) > 1e-30: + offset = 0.5 * (y_prev - y_next) / denom + dt = times[peak_sample] - times[peak_sample - 1] + interp_latency = times[peak_sample] + offset * dt + interp_uv = y_peak - 0.25 * (y_prev - y_next) * offset + else: + interp_latency = sample_latency + interp_uv = y_peak + else: + interp_latency = sample_latency + interp_uv = data[peak_sample] + + print_latency(erp_name, interp_latency, peak_channel, interp_uv) + return interp_latency + + +def plot_vep(evoked_occipital: Evoked): + n75_peak_width = 0.05 + n75_latency = get_peak(erp_name='N75', + evoked_potential=evoked_occipital, + peak_time_min=0.06, + peak_time_max=0.075 + n75_peak_width, + mode='neg') + p100_peak_width = 0.1 + p100_latency = get_peak(erp_name='P100', + evoked_potential=evoked_occipital, + peak_time_min=n75_latency, + peak_time_max=n75_latency + p100_peak_width, + mode='pos') + n145_peak_width = 0.12 + n145_latency = get_peak(erp_name='N145', + evoked_potential=evoked_occipital, + peak_time_min=p100_latency, + peak_time_max=p100_latency + n145_peak_width, + mode='neg') + + plt = evoked_occipital.plot(spatial_colors=True, show=False) + + # Get the axes from the figure + axes = plt.get_axes() # This gets all Axes objects + + # Add vertical lines as markers to each subplot + ax = axes[0] + ax.axvline(x=0, color='r', linestyle='--', label='stim') + ax.axvline(x=0.100, color='r', linestyle='--', label='100 ms') + #ax.axvline(x=n75_latency, color='g', linestyle='-', label='n75') + ax.axvline(x=p100_latency, color='g', linestyle='-', label='p100') + #ax.axvline(x=n145_latency, color='g', linestyle='-', label='n145') + + # Add a legend to each subplot + # ax.legend() + + # plt.show() + + # Add a legend + plt.legend(loc="lower right") + + return plt diff --git a/eegnb/datasets/datasets.py b/eegnb/datasets/datasets.py index 21add031c..16b462b00 100644 --- a/eegnb/datasets/datasets.py +++ b/eegnb/datasets/datasets.py @@ -54,10 +54,12 @@ def fetch_dataset( "visual-P300", "visual-spatialfreq", "visual-SSVEP", + "visual-PRVEP", ] # List gdrive extensions for various experiments gdrive_locs = { + "visual-PRVEP": None, # TODO: upload example dataset to Google Drive and paste the file ID here "visual-SSVEP": "1zj9Wx-YEMJo7GugUUu7Sshcybfsr-Fze", "visual-spatialfreq": "1ggBt7CNvMgddxji-FvxcZoP-IF-PmESX", "visual-P300": "1OLcj-zSjqdNrsBSUAsGBXOwWDnGWTVFC", @@ -84,6 +86,12 @@ def fetch_dataset( download_it = True if download_it: + if gdrive_locs.get(experiment) is None: + raise ValueError( + f"No example dataset available for '{experiment}' yet. " + "Upload a zip to Google Drive and add the file ID to gdrive_locs in datasets.py." + ) + # check if data directory exits. If not, create it if os.path.exists(data_dir) is not True: os.makedirs(data_dir) diff --git a/eegnb/devices/__init__.py b/eegnb/devices/__init__.py index e69de29bb..3b0919c63 100644 --- a/eegnb/devices/__init__.py +++ b/eegnb/devices/__init__.py @@ -0,0 +1,8 @@ +from eegnb.devices.utils import ( # noqa: F401 + CYTON_CONFIG_GAIN_1X, + CYTON_CONFIG_GAIN_2X, + CYTON_CONFIG_GAIN_4X, + CYTON_CONFIG_GAIN_6X, + CYTON_CONFIG_GAIN_12X, + CYTON_CONFIG_GAIN_24X, +) diff --git a/eegnb/devices/utils.py b/eegnb/devices/utils.py index c804befd0..d2075be13 100644 --- a/eegnb/devices/utils.py +++ b/eegnb/devices/utils.py @@ -90,6 +90,43 @@ } + +# --------------------------------------------------------------------------- +# Cyton board channel configuration presets +# --------------------------------------------------------------------------- +# Each channel command has the format: x N P G I B S1 S2 X +# N = channel number (1-8) +# P = power (0=ON, 1=OFF) +# G = gain (0=1×, 1=2×, 2=4×, 3=6×, 4=12×, 5=24×) +# I = input type (0=normal EEG, 1=shorted, ...) +# B = include in BIAS derivation (1=yes) +# S2 = SRB2 connection (1=connected) +# S1 = SRB1 connection (0=disconnected) +# +# Build a config string by joining per-channel strings — applied with +# EEG(device='cyton', config=CYTON_CONFIG_GAIN_12X). + +def _cyton_ch_config(gain_code: int, n_channels: int = 8) -> str: + """Build a Cyton channel-settings string for all channels. + + Args: + gain_code: BrainFlow gain code (0=1×, 1=2×, 2=4×, 3=6×, 4=12×, 5=24×). + n_channels: Number of channels to configure (default 8 for standard Cyton). + + Returns: + Config string ready to pass to ``EEG(config=...)``. + """ + return "".join(f"x{ch}{gain_code}0110X" for ch in range(1, n_channels + 1)) + +# Standard gain presets — normal EEG input, bias enabled, SRB2 on, SRB1 off. +CYTON_CONFIG_GAIN_1X = _cyton_ch_config(0) # 1× (for strong signals / testing) +CYTON_CONFIG_GAIN_2X = _cyton_ch_config(1) # 2× +CYTON_CONFIG_GAIN_4X = _cyton_ch_config(2) # 4× +CYTON_CONFIG_GAIN_6X = _cyton_ch_config(3) # 6× +CYTON_CONFIG_GAIN_12X = _cyton_ch_config(4) # 12× — good general-purpose EEG config +CYTON_CONFIG_GAIN_24X = _cyton_ch_config(5) # 24× — for very quiet environments + + def create_stim_array(timestamps, markers): """Creates a stim array which is the lenmgth of the EEG data where the stimuli are lined up with their corresponding EEG sample. diff --git a/eegnb/experiments/BlockExperiment.py b/eegnb/experiments/BlockExperiment.py new file mode 100644 index 000000000..b18a6eaae --- /dev/null +++ b/eegnb/experiments/BlockExperiment.py @@ -0,0 +1,150 @@ +""" +BlockExperiment Class - Extends BaseExperiment with block-based functionality + +This class provides block-based experiment capabilities by inheriting from BaseExperiment +and overriding the run method to handle multiple blocks. It loads stimulus only once +and reuses it across blocks, while allowing block-specific instructions. + +Experiments that need block-based execution should inherit from this class instead of BaseExperiment. +""" +import gc +from abc import ABC +from time import time + +from .Experiment import BaseExperiment + + +class BlockExperiment(BaseExperiment, ABC): + """ + Inherits from BaseExperiment to provide block-based functionality. + + This class is designed for experiments that need to run as multiple blocks. + Each block has its own instructions and duration. It loads all stimuli at once, then re/uses it across blocks. + """ + + def __init__(self, exp_name, block_duration, eeg, save_fn, block_trial_size, n_blocks, iti: float, soa: float, jitter: float, + use_vr=False, use_fullscr=True, stereoscopic=False): + """ Initializer for the BlockExperiment Class + + Args: + exp_name (str): Name of the experiment + block_duration (float): Duration of each block in seconds + eeg: EEG device object for recording + save_fn (str): Save filename for data + block_trial_size (int): Number of trials per block + n_blocks (int): Number of blocks to run + iti (float): Inter-trial interval + soa (float): Stimulus on arrival + jitter (float): Random delay between stimulus + use_vr (bool): Use VR for displaying stimulus + use_fullscr (bool): Use fullscreen mode + """ + # Calculate total trials for the base class + total_trials = block_trial_size * n_blocks + + # Initialize BaseExperiment with total trials + # Pass None for duration if block_duration is None to ignore time spent in instructions + super().__init__(exp_name, block_duration, eeg, save_fn, total_trials, iti, soa, jitter, use_vr, use_fullscr, stereoscopic) + + # Block-specific parameters + self.block_duration = block_duration + self.block_trial_size = block_trial_size + self.n_blocks = n_blocks + + # Current block index + self.current_block_index = 0 + + def present_block_instructions(self, current_block): + """ + Display instructions for the current block to the user. + + This method is meant to be overridden by child classes to provide + experiment-specific instructions before each block. The base implementation + simply flips the window without adding any text. + + This method is called by __show_block_instructions in a loop until the user + provides input to continue or cancel the experiment. + + Args: + current_block (int): The current block number (0-indexed), used to customize + instructions for specific blocks if needed. + """ + self.window.flip() + + def _show_block_instructions(self, block_number): + """ + Show instructions for a specific block + + Args: + block_number (int): Current block number (0-indexed) + + Returns: + tuple: (continue_experiment, instruction_end_time) + - continue_experiment (bool): Whether to continue the experiment + """ + + # Clear any previous input + self._clear_user_input() + + # Wait for user input to continue + while True: + # Display the instruction text + super()._draw(lambda: self.present_block_instructions(block_number)) + + if self._user_input('start'): + return True + elif self._user_input('cancel'): + return False + + def run(self, instructions=True): + """ + Run the experiment as a series of blocks + + This method overrides BaseExperiment.run() to handle multiple blocks. + + Args: + instructions (bool): Whether to show the initial experiment instructions + """ + # Setup the experiment (creates window, loads stimulus once) + if not self.setup(instructions): + return False + + # Start EEG Stream once for all blocks + if self.eeg: + print("Wait for the EEG-stream to start...") + self.eeg.start(self.save_fn) + print("EEG Stream started") + + self._enable_frame_tracking() + + # Run each block + for block_index in range(self.n_blocks): + self.current_block_index = block_index + print(f"Starting block {block_index + 1} of {self.n_blocks}") + + # Show block-specific instructions + if not self._show_block_instructions(block_index): + break + + # Elevate process priority and disable GC during the trial loop + # to prevent OS scheduling jitter and ~1-10ms GC pauses that + # cause the Quest Link compositor to drop frames (hourglass). + # Both are restored between blocks so instruction screens run normally. + from psychopy import core + core.rush(True) + gc.disable() + try: + if not self._run_trial_loop(start_time=time(), duration=self.block_duration): + break + finally: + gc.enable() + core.rush(False) + + self._report_frame_stats() + + # Stop EEG Stream after all blocks + if self.eeg: + self.eeg.stop() + + # Close window at the end of all blocks + self.window.close() diff --git a/eegnb/experiments/Experiment.py b/eegnb/experiments/Experiment.py index 4d7cab119..5666b860e 100644 --- a/eegnb/experiments/Experiment.py +++ b/eegnb/experiments/Experiment.py @@ -14,12 +14,14 @@ from psychopy import prefs from psychopy.visual.rift import Rift +import gc from time import time import random +import json import numpy as np from pandas import DataFrame -from psychopy import visual, event +from psychopy import visual, event, core from eegnb import generate_save_fn @@ -151,7 +153,8 @@ def show_instructions(self): """ # Splitting instruction text into lines - self.instruction_text = self.instruction_text % self.duration + if '%s' in self.instruction_text: + self.instruction_text = self.instruction_text % self.duration # Disabling the cursor during display of instructions self.window.mouseVisible = False @@ -247,6 +250,11 @@ def _draw(self, present_stimulus: Callable): tracking_state = self.window.getTrackingState() self.window.calcEyePoses(tracking_state.headPose.thePose) self.window.setDefaultView() + # Per-frame predicted photon time from the OpenXR compositor. + # More accurate than time() + fixed lag constant — varies per frame + # based on compositor load. Stored so present_stimulus() can use it + # as the EEG marker timestamp. + self.predicted_display_time = tracking_state.headPose.time present_stimulus() def _clear_user_input(self): @@ -310,6 +318,43 @@ def iti_with_jitter(): return True + def _enable_frame_tracking(self): + """Enable per-frame interval recording for dropped frame diagnostics.""" + self.window.recordFrameIntervals = True + # Threshold for counting a frame as "dropped" — 50% over expected duration + expected_frame_dur = 1.0 / (self.window.displayRefreshRate if self.use_vr + else (self.window.getActualFrameRate() or 60)) + self.window.refreshThreshold = expected_frame_dur * 1.5 + + def _report_frame_stats(self): + """Print frame timing summary and save intervals alongside recording.""" + intervals = self.window.frameIntervals + if not intervals: + return + + intervals_ms = [i * 1000 for i in intervals] + dropped = self.window.nDroppedFrames + total = len(intervals) + mean_ms = np.mean(intervals_ms) + std_ms = np.std(intervals_ms) + max_ms = max(intervals_ms) + + print(f"\nFrame timing: {total} frames, {dropped} dropped ({dropped/total*100:.1f}%)") + print(f" Mean: {mean_ms:.2f}ms Std: {std_ms:.2f}ms Max: {max_ms:.2f}ms") + + if self.save_fn: + stats_path = self.save_fn.with_name(self.save_fn.stem + '_frame_stats.json') + with open(stats_path, 'w') as f: + json.dump({ + 'total_frames': total, + 'dropped_frames': dropped, + 'mean_ms': round(mean_ms, 3), + 'std_ms': round(std_ms, 3), + 'max_ms': round(max_ms, 3), + 'intervals_ms': [round(i, 3) for i in intervals_ms] + }, f, indent=2) + print(f" Saved to {stats_path}") + def run(self, instructions=True): """ Run the experiment """ @@ -323,11 +368,23 @@ def run(self, instructions=True): self.eeg.start(self.save_fn, duration=self.duration + 5) print("EEG Stream started") + self._enable_frame_tracking() + # Record experiment until a key is pressed or duration has expired. record_start_time = time() - # Run the trial loop - self._run_trial_loop(record_start_time, self.duration) + # Elevate process priority and disable GC during the trial loop to + # prevent OS scheduling jitter and ~1-10ms GC pauses that cause + # dropped frames (visible as Quest Link hourglass). + core.rush(True) + gc.disable() + try: + self._run_trial_loop(record_start_time, self.duration) + finally: + gc.enable() + core.rush(False) + + self._report_frame_stats() # Clearing the screen for the next trial event.clearEvents() diff --git a/eegnb/experiments/visual_vep/__init__.py b/eegnb/experiments/visual_vep/__init__.py index e69de29bb..514a3492b 100644 --- a/eegnb/experiments/visual_vep/__init__.py +++ b/eegnb/experiments/visual_vep/__init__.py @@ -0,0 +1,10 @@ +"""Visual Evoked Potential (VEP) experiments module. + +This module contains experiments for measuring visual evoked potentials, +including pattern reversal VEP for assessing the P100 component. +""" + +from .grating_vep import VisualGratingVEP +from .pattern_reversal_vep import VisualPatternReversalVEP + +__all__ = ['VisualGratingVEP', 'VisualPatternReversalVEP'] diff --git a/eegnb/experiments/visual_vep/vep.py b/eegnb/experiments/visual_vep/grating_vep.py similarity index 97% rename from eegnb/experiments/visual_vep/vep.py rename to eegnb/experiments/visual_vep/grating_vep.py index 11d076cc4..c661b94c0 100644 --- a/eegnb/experiments/visual_vep/vep.py +++ b/eegnb/experiments/visual_vep/grating_vep.py @@ -5,7 +5,7 @@ from eegnb.devices.eeg import EEG -class VisualVEP(Experiment.BaseExperiment): +class VisualGratingVEP(Experiment.BaseExperiment): def __init__(self, duration=120, eeg: Optional[EEG]=None, save_fn=None, diff --git a/eegnb/experiments/visual_vep/pattern_reversal_vep.py b/eegnb/experiments/visual_vep/pattern_reversal_vep.py new file mode 100644 index 000000000..ecf8464bc --- /dev/null +++ b/eegnb/experiments/visual_vep/pattern_reversal_vep.py @@ -0,0 +1,271 @@ +from time import time +import csv +import os +import numpy as np + +from psychopy import visual +from typing import Optional, Dict, Any +from eegnb.devices.eeg import EEG +from eegnb.experiments.BlockExperiment import BlockExperiment +from stimupy.stimuli.checkerboards import contrast_contrast + +QUEST_PPD = 20 + +class VisualPatternReversalVEP(BlockExperiment): + + def __init__(self, display_refresh_rate: int, eeg: Optional[EEG] = None, save_fn=None, + block_duration_seconds=50, block_trial_size: int=100, n_blocks: int=8, use_vr=False, use_fullscr=True): + + self.display_refresh_rate = display_refresh_rate + soa=0.5 + iti=0 + jitter=0 + + super().__init__("Visual Pattern Reversal VEP", block_duration_seconds, eeg, save_fn, block_trial_size, n_blocks, iti, soa, jitter, use_vr, use_fullscr, stereoscopic=True) + + # Per-trial timing sidecar: records software time, compositor predicted + # display time, and delta for each trial. Written alongside the EEG CSV. + if save_fn: + timing_path = save_fn.with_name(save_fn.stem + '_timing.csv') + else: + timing_path = 'vep_timing.csv' + self._timing_file = open(timing_path, 'w', newline='') + self._timing_writer = csv.writer(self._timing_file) + self._timing_writer.writerow( + ['trial_idx', 'software_time', 'predicted_display_time', 'delta_ms', 'use_vr'] + ) + + self.instruction_text = f"""Welcome to the Visual Pattern Reversal VEP experiment! + + This experiment will run for {n_blocks} blocks of {block_duration_seconds} seconds each. + + Press spacebar or controller to continue. + """ + + # Setting up the trial and parameter list + left_eye = 0 + right_eye = 1 + # Alternate between left and right eye blocks + block_eyes = [] + for block_num in range(n_blocks): + eye = left_eye if block_num % 2 == 0 else right_eye + block_eyes.extend([eye] * block_trial_size) + self.parameter = np.array(block_eyes) + + @staticmethod + def create_monitor_checkerboard(intensity_checks): + # Standard parameters for monitor-based pattern reversal VEP + # Using standard 1 degree check size at 30 pixels per degree + return contrast_contrast( + visual_size=(16, 16), # aspect ratio in degrees + ppd=72, # pixels per degree + frequency=(0.5, 0.5), # spatial frequency of the checkerboard (0.5 cpd = 1 degree check size) + intensity_checks=intensity_checks, + target_shape=(0, 0), + alpha=0, + tau=0 + ) + + @staticmethod + def create_vr_checkerboard(intensity_checks): + # Optimized parameters for Oculus/Meta Quest 2 with PC link + # Quest 2 has approximately 20 pixels per degree and a ~90° FOV + # Using standard 1 degree check size (0.5 cpd) + return contrast_contrast( + visual_size=(20, 20), # size in degrees - covers a good portion of the FOV + ppd=QUEST_PPD, # pixels per degree for Quest 2 + frequency=(0.5, 0.5), # spatial frequency (0.5 cpd = 1 degree check size) + intensity_checks=intensity_checks, + target_shape=(0, 0), + alpha=0, + tau=0 + ) + + def load_stimulus(self) -> Dict[str, Any]: + # Frame rate, in Hz + # TODO: Fix - Rift.GetActualFrameRate() crashes in psychxr due to 'EndFrame called before BeginFrame' + actual_frame_rate = np.round(self.window.displayRefreshRate if self.use_vr else self.window.getActualFrameRate()) + + # Ensure the expected frame rate matches and is divisable by the stimulus rate(soa) + assert actual_frame_rate % self.soa == 0, f"Expected frame rate divisable by stimulus rate: {self.soa}, but got {actual_frame_rate} Hz" + assert abs(self.display_refresh_rate - actual_frame_rate) <= self.display_refresh_rate * 0.05, f"Expected frame rate {self.display_refresh_rate} Hz, but got {actual_frame_rate} Hz" + + if self.use_vr: + # Create the VR checkerboard + create_checkerboard = self.create_vr_checkerboard + # the window is large over the eye, checkerboard should only cover the central vision + size = self.window.size / 1.5 + else: + # Create the Monitor checkerboard + create_checkerboard = self.create_monitor_checkerboard + size = (self.window_size[1], self.window_size[1]) + + # Optode sync patch: small white/black square in the bottom-left corner. + # Alternates polarity with each checkerboard reversal so a photodiode + # taped to this corner produces a TTL pulse on every stimulus onset. + # Monitor path only — VR uses compositor timestamps instead. + if not self.use_vr: + patch_size = 50 # pixels + x = -self.window.size[0] / 2 + patch_size / 2 + y = -self.window.size[1] / 2 + patch_size / 2 + self.optode_patch = visual.Rect( + self.window, width=patch_size, height=patch_size, + pos=(x, y), units='pix', fillColor='white' + ) + else: + self.optode_patch = None + + # The surrounding / periphery needs to be dark when not using vr. + # Also used for covering eye which is not being stimulated. + self.black_background = visual.Rect(self.window, + width=self.window.size[0], + height=self.window.size[1], + fillColor='black') + + # A grey background behind the checkerboard must be used in vr to maintain luminence. + self.grey_background = visual.Rect(self.window, + width=self.window.size[0], + height=self.window.size[1], + fillColor=[-0.22, -0.22, -0.22]) + + # Create checkerboard stimuli + def create_checkerboard_stim(intensity_checks, pos): + return visual.ImageStim(self.window, + image=create_checkerboard(intensity_checks)['img'], + units='pix', size=size, color='white', pos=pos) + + # Create fixation stimuli + def create_fixation_stim(pos): + fixation = visual.GratingStim( + win=self.window, + pos=pos, + sf=400 if self.use_vr else 0.2, + color=[1, 0, 0] + ) + fixation.size = 0.02 if self.use_vr else 0.4 + return fixation + + # Create VR block instruction stimuli + def create_vr_block_instruction(pos): + return visual.TextStim(win=self.window, text="Focus on the red dot, and try not to blink whilst the squares are flashing, press the spacebar or pull the controller trigger when ready to commence.", color=[-1, -1, -1], + pos=pos, height=0.1) + + # Create and position stimulus + def create_eye_stimuli(eye_x_pos, pix_x_pos): + return { + 'checkerboards': [ + create_checkerboard_stim((1, -1), pos=(pix_x_pos, 0)), + create_checkerboard_stim((-1, 1), pos=(pix_x_pos, 0)) + ], + 'fixation': create_fixation_stim([eye_x_pos, 0]), + 'vr_block_instructions': create_vr_block_instruction((eye_x_pos, 0)) + } + + # Structure all stimuli in organized dictionary + if self.use_vr: + # Calculate pixel positions for stereoscopic presentation + window_width = self.window.size[0] + left_pix_x_pos = self.left_eye_x_pos * (window_width / 2) + right_pix_x_pos = self.right_eye_x_pos * (window_width / 2) + + return { + 'left': create_eye_stimuli(self.left_eye_x_pos, left_pix_x_pos), + 'right': create_eye_stimuli(self.right_eye_x_pos, right_pix_x_pos) + } + else: + return { + 'monoscopic': create_eye_stimuli(0, 0) + } + + def _present_vr_block_instructions(self, open_eye, closed_eye): + self.window.setBuffer(open_eye) + self.stim[open_eye]['vr_block_instructions'].draw() + self.stim[open_eye]['fixation'].draw() + self.window.setBuffer(closed_eye) + self.black_background.draw() + + def present_block_instructions(self, current_block: int) -> None: + if self.use_vr: + if current_block % 2 == 0: + self._present_vr_block_instructions(open_eye="left", closed_eye="right") + else: + self._present_vr_block_instructions(open_eye="right", closed_eye="left") + else: + if current_block % 2 == 0: + instruction_text = ( + "Close your right eye, then focus on the red dot with your left eye. " + "Press spacebar or controller when ready." + ) + else: + instruction_text = ( + "Close your left eye, then focus on the red dot with your right eye. " + "Press spacebar or controller when ready." + ) + text = visual.TextStim(win=self.window, text=instruction_text, color=[-1, -1, -1]) + text.draw() + self.stim['monoscopic']['fixation'].draw() + self.window.flip() + + def present_stimulus(self, idx: int): + # Get the label of the trial + trial_idx = self.current_block_index * self.block_trial_size + idx + label = self.parameter[trial_idx] + + open_eye = 'left' if label == 0 else 'right' + closed_eye = 'left' if label == 1 else 'right' + + # draw checkerboard and fixation + if self.use_vr: + self.window.setBuffer(open_eye) + self.grey_background.draw() + display = self.stim['left' if label == 0 else 'right'] + else: + self.black_background.draw() + display = self.stim['monoscopic'] + + checkerboard_frame = idx % 2 + display['checkerboards'][checkerboard_frame].draw() + display['fixation'].draw() + + if self.use_vr: + self.window.setBuffer(closed_eye) + self.black_background.draw() + + # Alternate sync patch polarity with each reversal so the photodiode + # fires on every checkerboard flip, not just odd or even frames. + if self.optode_patch is not None: + self.optode_patch.fillColor = 'white' if checkerboard_frame == 0 else 'black' + self.optode_patch.draw() + + self.window.flip() + + # Use compositor-reported predicted display time when available (VR path). + # Falls back to time() for monitor path — apply hardware lag offset in analysis. + software_time = time() + predicted_display_time = getattr(self, 'predicted_display_time', None) + if predicted_display_time is not None: + eeg_timestamp = predicted_display_time + else: + eeg_timestamp = software_time + + # Pushing the sample to the EEG + marker = self.markernames[label] + self.eeg.push_sample(marker=marker, timestamp=eeg_timestamp) + + # Log per-trial timing metadata for post-hoc validation + trial_idx = self.current_block_index * self.block_trial_size + idx + delta_ms = (predicted_display_time - software_time) * 1000 if predicted_display_time else None + self._timing_writer.writerow( + [trial_idx, software_time, predicted_display_time, delta_ms, self.use_vr] + ) + + def __del__(self): + if hasattr(self, '_timing_file') and not self._timing_file.closed: + self._timing_file.close() + + def present_iti(self): + if self.use_vr: + for eye in ['left', 'right']: + self.window.setBuffer(eye) + self.black_background.draw() + self.window.flip() diff --git a/environments/eeg-expy-docsbuild.yml b/environments/eeg-expy-docsbuild.yml index 31eae3a6e..2ef84acbf 100644 --- a/environments/eeg-expy-docsbuild.yml +++ b/environments/eeg-expy-docsbuild.yml @@ -4,9 +4,10 @@ channels: dependencies: # System-level dependencies - python>=3.8,<=3.13 + - setuptools - pytables # install pytables for macOS arm64, so do not need to build from source. - rust # used by docsbuild - pip - pip: # Install package with only Analysis requirements - - -e ..[docsbuild] \ No newline at end of file + - -e ..[docsbuild] diff --git a/environments/eeg-expy-full.yml b/environments/eeg-expy-full.yml index d3c160f01..224eb9908 100644 --- a/environments/eeg-expy-full.yml +++ b/environments/eeg-expy-full.yml @@ -4,6 +4,7 @@ channels: dependencies: # System-level dependencies - python>=3.8,<=3.10 # psychopy <= 3.10 + - setuptools - dukpy==0.2.3 # psychopy dependency, avoid failing due to building wheel on win 3.9. - numpy # fix PsychXR numpy dependency DLL issues on Windows - pytables # install pytables for macOS arm64, so do not need to build from source. diff --git a/environments/eeg-expy-stimpres.yml b/environments/eeg-expy-stimpres.yml index de7ed1178..069d1020b 100644 --- a/environments/eeg-expy-stimpres.yml +++ b/environments/eeg-expy-stimpres.yml @@ -4,6 +4,7 @@ channels: dependencies: # System-level dependencies - python>=3.8,<=3.10 # psychopy <= 3.10 + - setuptools - dukpy==0.2.3 # psychopy dependency, avoid failing due to building wheel on win 3.9. - wxpython>=4.0 # install wxpython to prevent error on macOS arm64: "site-packages/wx/_core.cpython-38-darwin.so, 0x0002): symbol not found in flat namespace '__ZN10wxBoxSizer20InformFirstDirectionEiii'" - cffi # Fix sound ffi.callback() issue with sounddevice on macOS: https://github.com/spatialaudio/python-sounddevice/issues/397 diff --git a/environments/eeg-expy-streaming.yml b/environments/eeg-expy-streaming.yml index 2b7f87af7..554dc2d3d 100644 --- a/environments/eeg-expy-streaming.yml +++ b/environments/eeg-expy-streaming.yml @@ -4,6 +4,7 @@ channels: dependencies: # System-level dependencies - python>=3.8,<=3.13 + - setuptools - liblsl # install liblsl to prevent error on macOS and Ubuntu: "RuntimeError: LSL binary library file was not found." - pip - pip: diff --git a/environments/eeg-expy-streamstim.yml b/environments/eeg-expy-streamstim.yml index 8ed52571f..e6d8b1455 100644 --- a/environments/eeg-expy-streamstim.yml +++ b/environments/eeg-expy-streamstim.yml @@ -4,6 +4,7 @@ channels: dependencies: # System-level dependencies - python>=3.8,<=3.10 # psychopy <= 3.10 + - setuptools - dukpy==0.2.3 # psychopy dependency, avoid failing due to building wheel on win 3.9. - liblsl # install liblsl to prevent error on macOS and Ubuntu: "RuntimeError: LSL binary library file was not found." - wxpython>=4.0 # install wxpython to prevent error on macOS arm64: "site-packages/wx/_core.cpython-38-darwin.so, 0x0002): symbol not found in flat namespace '__ZN10wxBoxSizer20InformFirstDirectionEiii'" diff --git a/examples/visual_vep/00x__pattern_reversal_run_experiment.py b/examples/visual_vep/00x__pattern_reversal_run_experiment.py new file mode 100644 index 000000000..7e4c30e19 --- /dev/null +++ b/examples/visual_vep/00x__pattern_reversal_run_experiment.py @@ -0,0 +1,105 @@ +""" +PRVEP Run Experiment +=============================== + +This example demonstrates the initiation of an EEG stream with eeg-expy, +and how to run the Pattern Reversal VEP (PRVEP) experiment. + +The experiment presents a checkerboard that reverses its black and white squares +at 2 reversals per second, while the participant fixates a central dot. +Each reversal elicits a P100 response at occipital electrodes. + +The experiment supports both standard monitor presentation and Meta Quest VR +presentation via ``use_vr=True``. VR mode is preferred as it provides monocular +stimulation per eye without manual eye closure, and uses compositor-predicted +photon timestamps for improved timing accuracy. + +""" + +################################################################################################### +# Setup +# --------------------- +# +# Imports + +from eegnb.devices import CYTON_CONFIG_GAIN_4X +import platform +from os import path, getenv +from dotenv import load_dotenv +load_dotenv() + +from eegnb import generate_save_fn +from eegnb.devices import CYTON_CONFIG_GAIN_12X +from eegnb.devices.eeg import EEG +from eegnb.experiments.visual_vep import VisualPatternReversalVEP + +################################################################################################### +# Configuration +# --------------------- +# +# Set your experiment parameters here before running. +# + +# Display: set use_vr=True for Meta Quest, False for monitor +use_vr = False + +# Device: "cyton", "unicorn", "muse2", etc. +device = "cyton" + +# Serial port: "COM3" for Windows, "/dev/ttyUSB0" for Linux +serial_port = "COM3" + +# Config: Only needed for Thinkpulse active electrodes, otherwise leave as None. +config = CYTON_CONFIG_GAIN_4X + +# Electrode montage type: "cap" or "mark-iv" +montage_type = "mark-iv" +ch_names = ["Fp1", "Fp2", "C1", "C2", "O1", "O2", "POz", "Oz"] + +# Subject and session identifiers +subject_id = 1 +session_nb = 0 + +################################################################################################### +# Initiate EEG device +# --------------------- +# +# Start EEG device based on configuration above. +eeg_device = EEG(device, serial_port, ch_names, config) + +################################################################################################### +# Display and save path setup +# --------------------- + +if use_vr: + refresh_rate = 120 + display = "quest-2_{}Hz".format(refresh_rate) +else: + refresh_rate = 100 + display = "acer-34-predator_{}Hz".format(refresh_rate) + +site="{}_{}".format(display, montage_type) +data_dir = path.join(path.expanduser("~/"), getenv("DATA_DIR", ".eegnb")) +save_fn = generate_save_fn(eeg_device.device_name, + experiment="block_pattern-reversal", + site=site, + subject_id=subject_id, + session_nb=session_nb, + data_dir=data_dir) +print(save_fn) + +################################################################################################### +# Run experiment +# --------------------- +# +# Run the Pattern Reversal VEP. The experiment will present alternating checkerboard +# blocks for each eye (or for both eyes on monitor). Press spacebar/controller trigger +# at each block instruction prompt to begin that block. + +pattern_reversal_vep = VisualPatternReversalVEP( + display_refresh_rate=refresh_rate, + eeg=eeg_device, + save_fn=save_fn, + use_vr=use_vr +) +pattern_reversal_vep.run() diff --git a/examples/visual_vep/01r__pattern_reversal_viz.py b/examples/visual_vep/01r__pattern_reversal_viz.py new file mode 100644 index 000000000..940308d37 --- /dev/null +++ b/examples/visual_vep/01r__pattern_reversal_viz.py @@ -0,0 +1,176 @@ +""" +Pattern Reversal VEP Visualization +================================== + +This example demonstrates loading, organizing, and visualizing EP response data +from the visual Pattern Reversal VEP (PR-VEP) experiment. + +An animation of a checkerboard reversal is shown (the checkerboard squares' +colours are toggled once each half a second). + +The data used is the first subject and first session of the eeg-expy PR-VEP +example dataset, recorded using a g.tec Unicorn EEG headset with electrodes +placed at occipital locations (O1, Iz, O2, PO1, PO2) fitted around a Meta +Quest 3S headset. The session used the Meta Quest 3S linked with a PC to +display the checkerboard reversal animation in VR, alternating monocular +stimulation between left and right eye across blocks. + +We first use ``fetch_dataset`` to obtain the data files. If the files are not +already present in the local data directory they will be downloaded from the +cloud. + +After loading the data from the occipital channels, we place it in an MNE +``Epochs`` object, and then an ``Evoked`` object to obtain the trial-averaged +response. The final figures show the P100 response ERP waveform, a comparison +between eyes, and the interocular difference wave. + +""" + +################################################################################################### +# Setup +# ----- + +import os +import numpy as np +import warnings +warnings.filterwarnings('ignore') +import matplotlib.pyplot as plt + +from mne import Epochs, find_events + +from eegnb.analysis.utils import load_data +from eegnb.analysis import vep_utils +from eegnb.analysis.vep_utils import plot_vep +from eegnb.datasets import fetch_dataset + +# sphinx_gallery_thumbnail_number = 3 + +################################################################################################### +# Hardware lag definitions +# ------------------------ +# +# Known display-pipeline offsets for different setups, subtracted from software +# timestamps so that t=0 corresponds to actual photon delivery. +# + +def windows_quest3s_usb_unicorn_lag(): + return 0.0368 + +################################################################################################### +# Load Data +# --------- +# +# Download the PR-VEP example dataset if it is not already present locally. +# + +eegnb_data_path = os.path.join(os.path.expanduser('~/'), '.eegnb', 'data') +prvep_data_path = os.path.join(eegnb_data_path, 'visual-PRVEP', 'eegnb_examples') + +if not os.path.isdir(prvep_data_path): + fetch_dataset(data_dir=eegnb_data_path, experiment='visual-PRVEP', site='eegnb_examples') + +raw = load_data(subject=1, session=0, + experiment='visual-PRVEP', site='eegnb_examples', device_name='unicorn', + data_dir=eegnb_data_path) + +################################################################################################### +# Visualize the power spectrum +# ---------------------------- + +raw.plot_psd() + +################################################################################################### +# Filtering +# --------- +# +# Use FIR rather than IIR to keep linear phase. +# + +raw.filter(1, 30, method='fir') +raw.plot_psd(fmin=1, fmax=30) + +################################################################################################### +# Epoching +# -------- +# +# Epoch around stimulus onsets, separating left- and right-eye trials. +# + +events = find_events(raw) +event_id = {'left_eye': 1, 'right_eye': 2} + +epochs = Epochs(raw, events=events, event_id=event_id, + tmin=-0.1, tmax=0.4, baseline=None, + reject={'eeg': 65e-6}, preload=True, + verbose=False, picks=[7]) + +epochs.shift_time(-windows_quest3s_usb_unicorn_lag()) +print('sample drop %: ', (1 - len(epochs.events)/len(events)) * 100) + +################################################################################################### +# Epoch average +# ------------- + +evoked = epochs.average() +evoked.plot(spatial_colors=True, show=False) + +evoked_potentials_left = epochs['left_eye'].average(picks=['Oz']) +plot_vep(evoked_potentials_left) + +evoked_potentials_right = epochs['right_eye'].average(picks=['Oz']) +plot_vep(evoked_potentials_right) + +################################################################################################### +# Compare evoked potentials by eye +# --------------------------------- + +evoked_left = epochs['left_eye'].average(picks=['Oz']) +evoked_right = epochs['right_eye'].average(picks=['Oz']) + +fig, ax = plt.subplots(figsize=(10, 6)) +times = evoked_left.times * 1000 +left_data = evoked_left.data[0] * 1e6 +right_data = evoked_right.data[0] * 1e6 + +ax.plot(times, left_data, label='Left Eye', color='blue', linewidth=2) +ax.plot(times, right_data, label='Right Eye', color='red', linewidth=2) +ax.set_xlabel('Time (ms)') +ax.set_ylabel('Amplitude (μV)') +ax.set_title('Comparison of Evoked Potentials: Left Eye vs Right Eye') +ax.legend() +ax.grid(True, alpha=0.3) +ax.axhline(y=0, color='black', linestyle='-', alpha=0.3) +ax.axvline(x=0, color='black', linestyle='--', alpha=0.5, label='Stimulus Onset') +plt.tight_layout() +plt.show() + +print(f"Left eye - Number of epochs: {len(epochs['left_eye'])}") +print(f"Right eye - Number of epochs: {len(epochs['right_eye'])}") + +p100_window = (80, 120) +time_mask = (times >= p100_window[0]) & (times <= p100_window[1]) + +left_p100_idx = np.argmax(left_data[time_mask]) +right_p100_idx = np.argmax(right_data[time_mask]) + +print(f"\nP100 Peak Analysis:") +print(f"Left eye - Peak at {times[time_mask][left_p100_idx]:.1f}ms, amplitude: {left_data[time_mask][left_p100_idx]:.2f}μV") +print(f"Right eye - Peak at {times[time_mask][right_p100_idx]:.1f}ms, amplitude: {right_data[time_mask][right_p100_idx]:.2f}μV") + +################################################################################################### +# Interocular difference wave +# --------------------------- + +difference_data = left_data - right_data + +fig, ax = plt.subplots(figsize=(10, 6)) +ax.plot(times, difference_data, label='Left - Right', color='green', linewidth=2) +ax.set_xlabel('Time (ms)') +ax.set_ylabel('Amplitude Difference (μV)') +ax.set_title('Difference Wave: Left Eye - Right Eye') +ax.grid(True, alpha=0.3) +ax.axhline(y=0, color='black', linestyle='-', alpha=0.3) +ax.axvline(x=0, color='black', linestyle='--', alpha=0.5, label='Stimulus Onset') +ax.legend() +plt.tight_layout() +plt.show() diff --git a/examples/visual_vep/02r__pattern_reversal_longitudinal.py b/examples/visual_vep/02r__pattern_reversal_longitudinal.py new file mode 100644 index 000000000..92d2cd7af --- /dev/null +++ b/examples/visual_vep/02r__pattern_reversal_longitudinal.py @@ -0,0 +1,199 @@ +""" +Longitudinal P100 Tracking +=========================== + +This example demonstrates how to load multiple PR-VEP recording sessions +for the same subject and track P100 latency over time. This is useful for +monitoring changes in visual pathway conduction — for instance during nerve +recovery, remyelination, or neuroplasticity studies — where latency shifts +of a few milliseconds between sessions are meaningful. + +The workflow is: + +1. Discover all sessions for a given subject. +2. For each session, epoch around stimulus onsets, extract the per-eye P100 + latency using parabolic interpolation (sub-sample precision), and store + the results. +3. Plot per-eye P100 latency and interocular difference over sessions. + +Before attributing a latency change to an intervention, record several +baseline sessions (at least 3–5, ideally over 1–2 weeks) to establish your +individual test-retest range. + +""" + +################################################################################################### +# Setup +# ----- + +import os +import glob +import numpy as np +import warnings +warnings.filterwarnings('ignore') +import matplotlib.pyplot as plt +import matplotlib.dates as mdates +from datetime import datetime + +from mne import Epochs, find_events + +from eegnb.analysis.utils import load_csv_as_raw +from eegnb.analysis.vep_utils import get_peak +from eegnb.devices.utils import EEG_INDICES, SAMPLE_FREQS + +################################################################################################### +# Configuration +# ------------- +# +# Point ``data_root`` at the directory tree that contains your recordings. +# The expected layout follows the eeg-expy convention:: +# +# data_root/subject{XXXX}/session{XXX}/recording_*.csv +# +# Adjust ``device_name``, ``subject_id``, and ``hardware_lag`` for your setup. +# + +data_root = os.path.join(os.path.expanduser('~/'), 'eeg-data', 'visual-PRVEP') +device_name = 'cyton' +subject_id = 1 +hardware_lag = 0.0 # seconds — set to your measured display-pipeline offset + +################################################################################################### +# Discover sessions +# ----------------- +# +# Scan the subject directory for session folders and sort them by the +# recording timestamp embedded in the CSV filename. +# + +subject_dir = os.path.join(data_root, f'subject{subject_id:04d}') +session_dirs = sorted(glob.glob(os.path.join(subject_dir, 'session*'))) + +print(f'Found {len(session_dirs)} sessions for subject {subject_id}') + +################################################################################################### +# Extract P100 per session +# ------------------------ +# +# For each session we: +# +# - Load the CSV into an MNE Raw object +# - Band-pass filter 1–30 Hz (FIR) +# - Epoch around stimulus markers, separating left and right eye +# - Subtract the hardware lag so t=0 is true photon delivery +# - Extract N75, P100, and N145 latencies with parabolic interpolation +# + +sfreq = SAMPLE_FREQS[device_name] +ch_ind = EEG_INDICES[device_name] + +results = [] + +for session_dir in session_dirs: + csv_files = sorted(glob.glob(os.path.join(session_dir, 'recording_*.csv'))) + if not csv_files: + continue + + # Parse recording date from filename + fname = os.path.basename(csv_files[0]) + date_str = fname.replace('recording_', '').replace('.csv', '') + try: + session_date = datetime.strptime(date_str, '%Y-%m-%d-%H.%M.%S') + except ValueError: + session_date = None + + session_label = os.path.basename(session_dir) + + raw = load_csv_as_raw(csv_files, sfreq=sfreq, ch_ind=ch_ind, + replace_ch_names=None, verbose=0) + raw.filter(1, 30, method='fir', verbose=False) + + events = find_events(raw, verbose=False) + event_id = {'left_eye': 1, 'right_eye': 2} + + epochs = Epochs(raw, events=events, event_id=event_id, + tmin=-0.1, tmax=0.4, baseline=None, + reject={'eeg': 65e-6}, preload=True, + verbose=False) + + if hardware_lag: + epochs.shift_time(-hardware_lag) + + drop_pct = (1 - len(epochs.events) / len(events)) * 100 + + session_result = { + 'session': session_label, + 'date': session_date, + 'n_epochs_left': len(epochs['left_eye']), + 'n_epochs_right': len(epochs['right_eye']), + 'drop_pct': drop_pct, + } + + for eye in ['left_eye', 'right_eye']: + if len(epochs[eye]) < 10: + session_result[f'{eye}_p100'] = np.nan + continue + + evoked = epochs[eye].average(picks=['Oz']) + + n75_latency = get_peak('N75', evoked, 0.06, 0.125, 'neg') + p100_latency = get_peak('P100', evoked, n75_latency, n75_latency + 0.1, 'pos') + + session_result[f'{eye}_p100'] = p100_latency * 1e3 # convert to ms + + results.append(session_result) + +print(f'\nExtracted P100 from {len(results)} sessions') + +################################################################################################### +# Summary table +# ------------- + +print(f'\n{"Session":<14} {"Date":<12} {"L-eye P100":>11} {"R-eye P100":>11} ' + f'{"IOD":>8} {"Drop%":>6} {"L-epochs":>9} {"R-epochs":>9}') +print('-' * 82) +for r in results: + date_str = r['date'].strftime('%Y-%m-%d') if r['date'] else '—' + left = r['left_eye_p100'] + right = r['right_eye_p100'] + iod = left - right if not (np.isnan(left) or np.isnan(right)) else np.nan + print(f'{r["session"]:<14} {date_str:<12} {left:>9.2f}ms {right:>9.2f}ms ' + f'{iod:>6.2f}ms {r["drop_pct"]:>5.1f}% {r["n_epochs_left"]:>9} {r["n_epochs_right"]:>9}') + +################################################################################################### +# Plot P100 latency over sessions +# -------------------------------- + +dates = [r['date'] for r in results] +left_p100 = [r['left_eye_p100'] for r in results] +right_p100 = [r['right_eye_p100'] for r in results] +iod = [l - r if not (np.isnan(l) or np.isnan(r)) else np.nan + for l, r in zip(left_p100, right_p100)] + +use_dates = all(d is not None for d in dates) +x = dates if use_dates else range(len(results)) + +fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 8), sharex=True) + +# Per-eye P100 latency +ax1.plot(x, left_p100, 'o-', label='Left eye', color='blue', linewidth=2) +ax1.plot(x, right_p100, 's-', label='Right eye', color='red', linewidth=2) +ax1.set_ylabel('P100 latency (ms)') +ax1.set_title('P100 Latency Across Sessions') +ax1.legend() +ax1.grid(True, alpha=0.3) + +# Interocular difference +ax2.plot(x, iod, 'D-', color='green', linewidth=2) +ax2.axhline(y=0, color='black', linestyle='--', alpha=0.3) +ax2.set_ylabel('Interocular difference (ms)') +ax2.set_xlabel('Session date' if use_dates else 'Session index') +ax2.set_title('Interocular P100 Latency Difference (Left − Right)') +ax2.grid(True, alpha=0.3) + +if use_dates: + ax2.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d')) + fig.autofmt_xdate() + +plt.tight_layout() +plt.show() diff --git a/examples/visual_vep/README.txt b/examples/visual_vep/README.txt new file mode 100644 index 000000000..e201880e7 --- /dev/null +++ b/examples/visual_vep/README.txt @@ -0,0 +1 @@ +Visual VEP diff --git a/requirements.txt b/requirements.txt index 001b69229..c75a6e5ea 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,7 +15,6 @@ pysocks>=1.7.1 pyserial>=3.5 h5py>=3.1.0 pytest-shutil -pyo>=1.0.3; platform_system == "Linux" #pynput requires pyobjc, psychopy requires a version less than 8, setting pyobjc to # a specific version prevents an endless dependency resolution loop. pyobjc==7.3; sys_platform == 'darwin' @@ -26,6 +25,7 @@ attrdict3 ## ~~ Streaming Requirements ~~ +pyxid2 muselsl>=2.0.2 # Upgrade from 1.10.5 to 1.16.2 so the arm64 lib is available to macOS Apple Silicon for preventing error: # pylsl/liblsl64.dylib' (mach-o file, but is an incompatible architecture (have 'x86_64', need 'arm64e' or 'arm64')) @@ -35,7 +35,6 @@ pysocks>=1.7.1 pyserial>=3.5 h5py>=3.1.0 pytest-shutil -pyo>=1.0.3; platform_system == "Linux" #pynput requires pyobjc, psychopy requires a version less than 8, setting pyobjc to # a specific version prevents an endless dependency resolution loop. pyobjc==7.3; sys_platform == 'darwin' @@ -68,7 +67,6 @@ pysocks>=1.7.1 pyserial>=3.5 h5py>=3.1.0 pytest-shutil -pyo>=1.0.3; platform_system == "Linux" airium>=0.1.0 attrdict>=2.0.1 attrdict3 @@ -87,9 +85,12 @@ pyglet==1.4.11 ; platform_system == "Windows" psychxr>=0.2.4rc2; platform_system == "Windows" and python_version <= "3.9" +# Used for generating checkerboard in pattern reversal experiment +stimupy - -## ~~ Docsbuild Requirements ~~ +## ~~ Docsbuild Requirements ~~ +setuptools # brainflow imports pkg_resources at runtime; not included by default in Python 3.10+ envs +python-dotenv recommonmark brainflow numpydoc