This notebook outlines the stimulus presentations table and the trials table and shows how you can use them to align behavioral data like running, licking and pupil info to task events. Please note that the VBN project used the same detection of change task as the Visual Behavior 2-Photon dataset. Users are encouraged to explore the documentation and example notebooks for that project for additional context.
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from allensdk.brain_observatory.behavior.behavior_project_cache.\
behavior_neuropixels_project_cache \
import VisualBehaviorNeuropixelsProjectCache
C:\Users\svc_ccg\AppData\Local\Continuum\anaconda3\lib\site-packages\requests\__init__.py:91: RequestsDependencyWarning: urllib3 (1.26.9) or chardet (3.0.4) doesn't match a supported version! RequestsDependencyWarning)
cache_dir = r"C:\Users\svc_ccg\Desktop\Data\vbn_cache"
cache = VisualBehaviorNeuropixelsProjectCache.from_s3_cache(
cache_dir=cache_dir)
ecephys_sessions_table = cache.get_ecephys_session_table()[0]
session_id = ecephys_sessions_table.index.values[50]
session = cache.get_ecephys_session(
ecephys_session_id=session_id)
ecephys_session_1069193611.nwb: 100%|████████████████████████████████████████████| 2.83G/2.83G [02:10<00:00, 21.6MMB/s] C:\Users\svc_ccg\AppData\Local\Continuum\anaconda3\lib\site-packages\hdmf\spec\namespace.py:533: UserWarning: Ignoring cached namespace 'hdmf-common' version 1.5.1 because version 1.5.0 is already loaded. % (ns['name'], ns['version'], self.__namespaces.get(ns['name'])['version'])) C:\Users\svc_ccg\AppData\Local\Continuum\anaconda3\lib\site-packages\hdmf\spec\namespace.py:533: UserWarning: Ignoring cached namespace 'core' version 2.4.0 because version 2.3.0 is already loaded. % (ns['name'], ns['version'], self.__namespaces.get(ns['name'])['version'])) C:\Users\svc_ccg\AppData\Local\Continuum\anaconda3\lib\site-packages\hdmf\spec\namespace.py:533: UserWarning: Ignoring cached namespace 'hdmf-experimental' version 0.2.0 because version 0.1.0 is already loaded. % (ns['name'], ns['version'], self.__namespaces.get(ns['name'])['version']))
Let's grab a random session and look at the stimulus presentations dataframe.
session_id = ecephys_sessions_table.index.values[20]
session = cache.get_ecephys_session(
ecephys_session_id=session_id)
ecephys_session_1063010385.nwb: 100%|████████████████████████████████████████████| 2.39G/2.39G [02:04<00:00, 19.1MMB/s] C:\Users\svc_ccg\AppData\Local\Continuum\anaconda3\lib\site-packages\hdmf\spec\namespace.py:533: UserWarning: Ignoring cached namespace 'hdmf-common' version 1.5.1 because version 1.5.0 is already loaded. % (ns['name'], ns['version'], self.__namespaces.get(ns['name'])['version'])) C:\Users\svc_ccg\AppData\Local\Continuum\anaconda3\lib\site-packages\hdmf\spec\namespace.py:533: UserWarning: Ignoring cached namespace 'core' version 2.4.0 because version 2.3.0 is already loaded. % (ns['name'], ns['version'], self.__namespaces.get(ns['name'])['version'])) C:\Users\svc_ccg\AppData\Local\Continuum\anaconda3\lib\site-packages\hdmf\spec\namespace.py:533: UserWarning: Ignoring cached namespace 'hdmf-experimental' version 0.2.0 because version 0.1.0 is already loaded. % (ns['name'], ns['version'], self.__namespaces.get(ns['name'])['version']))
stimulus_presentations = session.stimulus_presentations
stimulus_presentations.head()
active | color | contrast | duration | end_frame | flashes_since_change | image_name | is_change | omitted | orientation | ... | position_y | rewarded | spatial_frequency | start_frame | start_time | stimulus_block | stimulus_index | stimulus_name | stop_time | temporal_frequency | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
stimulus_presentations_id | |||||||||||||||||||||
0 | True | NaN | NaN | 0.250214 | 75 | 0.0 | im104_r | False | False | NaN | ... | NaN | False | NaN | 60 | 25.710908 | 0 | NaN | Natural_Images_Lum_Matched_set_ophys_H_2019 | 25.961122 | NaN |
1 | True | NaN | NaN | 0.250214 | 120 | 1.0 | im104_r | False | False | NaN | ... | NaN | False | NaN | 105 | 26.461549 | 0 | NaN | Natural_Images_Lum_Matched_set_ophys_H_2019 | 26.711763 | NaN |
2 | True | NaN | NaN | NaN | 165 | 1.0 | omitted | False | True | NaN | ... | NaN | False | NaN | 150 | 27.212170 | 0 | NaN | Natural_Images_Lum_Matched_set_ophys_H_2019 | 27.462374 | NaN |
3 | True | NaN | NaN | 0.250219 | 210 | 2.0 | im104_r | False | False | NaN | ... | NaN | False | NaN | 195 | 27.962797 | 0 | NaN | Natural_Images_Lum_Matched_set_ophys_H_2019 | 28.213015 | NaN |
4 | True | NaN | NaN | 0.250199 | 255 | 3.0 | im104_r | False | False | NaN | ... | NaN | False | NaN | 240 | 28.713453 | 0 | NaN | Natural_Images_Lum_Matched_set_ophys_H_2019 | 28.963652 | NaN |
5 rows × 21 columns
This table is a record of every stimulus we presented to the mouse over the course of this experiment. The different stimuli are indexed by the 'stimulus_block' column. Let's group this dataframe by stimulus block and see what stimulus was shown for each block.
stimulus_presentations.groupby('stimulus_block')[['stimulus_block', 'stimulus_name', 'active', 'duration']].head()
stimulus_block | stimulus_name | active | duration | |
---|---|---|---|---|
stimulus_presentations_id | ||||
0 | 0 | Natural_Images_Lum_Matched_set_ophys_H_2019 | True | 0.250214 |
1 | 0 | Natural_Images_Lum_Matched_set_ophys_H_2019 | True | 0.250214 |
2 | 0 | Natural_Images_Lum_Matched_set_ophys_H_2019 | True | NaN |
3 | 0 | Natural_Images_Lum_Matched_set_ophys_H_2019 | True | 0.250219 |
4 | 0 | Natural_Images_Lum_Matched_set_ophys_H_2019 | True | 0.250199 |
4797 | 1 | spontaneous | False | 10.008370 |
4798 | 2 | gabor_20_deg_250ms | False | 0.250208 |
4799 | 2 | gabor_20_deg_250ms | False | 0.250208 |
4800 | 2 | gabor_20_deg_250ms | False | 0.250208 |
4801 | 2 | gabor_20_deg_250ms | False | 0.250208 |
4802 | 2 | gabor_20_deg_250ms | False | 0.250207 |
8443 | 3 | spontaneous | False | 288.991592 |
8444 | 4 | flash_250ms | False | 0.250203 |
8445 | 4 | flash_250ms | False | 0.250208 |
8446 | 4 | flash_250ms | False | 0.250216 |
8447 | 4 | flash_250ms | False | 0.250208 |
8448 | 4 | flash_250ms | False | 0.250211 |
8594 | 5 | Natural_Images_Lum_Matched_set_ophys_H_2019 | False | 0.250208 |
8595 | 5 | Natural_Images_Lum_Matched_set_ophys_H_2019 | False | 0.250208 |
8596 | 5 | Natural_Images_Lum_Matched_set_ophys_H_2019 | False | 0.250212 |
8597 | 5 | Natural_Images_Lum_Matched_set_ophys_H_2019 | False | 0.250203 |
8598 | 5 | Natural_Images_Lum_Matched_set_ophys_H_2019 | False | 0.250212 |
This shows us the structure of this experiment (and every experiment in this dataset). There are 5 stimuli as follows:
block 0: Change detection task. Natural images are flashed repeatedly and the mouse is rewarded for licking when the identity of the image changes. You can find more info about this task here.
block 1: Brief gray screen
block 2: Receptive field mapping. Gabor stimuli used for receptive field mapping. For more details on this stimulus consult this notebook.
block 3: Longer gray screen
block 4: Full-field flashes, shown at 80% contrast. Flashes can be black (color = -1) or white (color = 1).
block 5: Passive replay. Frame-for-frame replay of the stimulus shown during the change detection task (block 0), but now with the lick spout retracted so the animal can no longer engage in the task.
Here's a quick explanation for each of the columns in this table:
active: Boolean indicating when the change detection task (with the lick spout available to the mouse) was run. This should only be TRUE for block 0.
stimulus_block: Index of stimulus as described in cells above.
stimulus_name: Indicates the stimulus category for this stimulus presentation.
contrast: Stimulus contrast
duration: Duration of stimulus in seconds
start_time: Experiment time when this stimulus started. This value is corrected for display lag and therefore indicates when the stimulus actually appeared on the screen.
stop_time: Experiment time when this stimulus ended, also corrected for display lag.
start_frame: Stimulus frame index when this stimulus started. This can be used to sync this table to the behavior trials table, for which behavioral data is collected every frame.
end_frame: Stimulus frame index when this stimulus ended.
flashes_since_change: Relevant for the detection of change task (block 0) and the passive replay (block 5), this column indicates how many flashes of the same image have occurred since the last stimulus change.
image_name: Indicates which natural image was flashed for this stimulus presentation. To see how to visualize this image, check out this tutorial [LINK TO DATA ACCESS NOTEBOOK]
is_change: Indicates whether the image identity changed for this stimulus presentation. When both this value and 'active' are TRUE, the mouse was rewarded for licking within the response window.
omitted: Indicates whether the image presentation was omitted for this flash. Most image flashes had a 5% probability of being omitted (producing a gray screen). Flashes immediately preceding a change or immediately following an omission could not be omitted.
rewarded: Indicates whether a reward was given after this image presentation. During the passive replay block (5), this value indicates that a reward was issued for the corresponding image presenation during the active behavior block (0). No rewards were given during passive replay.
orientation: Orientation of gabor.
position_x: Position of the gabor along azimuth. The units are in degrees relative to the center of the screen (negative values are nasal).
position_y: Position of the gabor along elevation. Negative values are lower elevation.
spatial_frequency: Spatial frequency of gabor in cycles per degree.
temporal_frequency: Temporal frequency of gabor in Hz.
color: Color of the full-field flash stimuli. "1" is white and "-1" is black.
Let's confirm that the active behavior block (0) and the passive replay block (5) match frame for frame:
active_image_presentations = stimulus_presentations[stimulus_presentations['stimulus_block']==0]
passive_image_presentations = stimulus_presentations[stimulus_presentations['stimulus_block']==5]
np.all(active_image_presentations['image_name'].values == passive_image_presentations['image_name'].values )
True
Now let's explore the behavior trials table. This table contains lots of useful information about every trial in the change detection task.
trials = session.trials
trials.head()
start_time | stop_time | initial_image_name | change_image_name | stimulus_change | change_time | go | catch | lick_times | response_time | ... | reward_volume | hit | false_alarm | miss | correct_reject | aborted | auto_rewarded | change_frame | trial_length | change_time_with_display_delay | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
trials_id | |||||||||||||||||||||
0 | 25.66618 | 26.98348 | im104_r | im104_r | False | NaN | False | False | [26.36584, 26.51607, 26.61606, 26.98348] | NaN | ... | 0.0 | False | False | False | False | True | False | NaN | 1.31730 | NaN |
1 | 27.16692 | 28.81836 | im104_r | im104_r | False | NaN | False | False | [28.46755] | NaN | ... | 0.0 | False | False | False | False | True | False | NaN | 1.65144 | NaN |
2 | 29.41882 | 31.25379 | im104_r | im104_r | False | NaN | False | False | [30.63653, 30.76949, 30.90293] | NaN | ... | 0.0 | False | False | False | False | True | False | NaN | 1.83497 | NaN |
3 | 31.67111 | 33.23862 | im104_r | im104_r | False | NaN | False | False | [32.88816, 33.22146, 33.33829, 33.42177, 33.52... | NaN | ... | 0.0 | False | False | False | False | True | False | NaN | 1.56751 | NaN |
4 | 33.92263 | 36.45841 | im104_r | im104_r | False | NaN | False | False | [35.87392, 36.02384, 36.10722] | NaN | ... | 0.0 | False | False | False | False | True | False | NaN | 2.53578 | NaN |
5 rows × 22 columns
Here you can see that unlike the stimulus presentations table in which every row corresponded to a visual stimulus presentation, for the behavior trials table every row corresponds to one trial of the change detection task. Here is a quick summary of the columns:
start_time: Experiment time when this trial began in seconds.
stop_time: Experiment time when this trial ended.
initial_image_name: Indicates which image was shown before the change (or sham change) for this trial
change_image_name: Indicates which image was scheduled to be the change image for this trial. Note that if the trial is aborted, a new trial will begin before this change occurs.
stimulus_change: Indicates whether an image change occurred for this trial.
change_time_no_display_delay: Experiment time when the task-control computer commanded an image change. This change time is used to determine the response window during which a lick will trigger a reward. Note that due to display lag, this is not the time when the change image actually appears on the screen. To get this time, you need the stimulus_presentations table (more about this below).
go: Indicates whether this trial was a 'go' trial. To qualify as a go trial, an image change must occur and the trial cannot be autorewarded.
catch: Indicates whether this trial was a 'catch' trial. To qualify as a catch trial, a 'sham' change must occur during which the image identity does not change. These sham changes are drawn to match the timing distribution of real changes and can be used to calculate the false alarm rate.
lick_times: A list indicating when the behavioral control software recognized a lick. Note that this is not identical to the lick times from the licks dataframe, which record when the licks were registered by the lick sensor. The licks dataframe should generally be used for analysis of the licking behavior rather than these times.
response_time: Indicates the time when the first lick was registered by the task control software for trials that were not aborted (go or catch). NaN for aborted trials. For a more accurate measure of response time, the licks dataframe should be used.
reward_time: Indicates when the reward command was triggered for hit trials. NaN for other trial types.
reward_volume: Indicates the volume of water dispensed as reward for this trial.
hit: Indicates whether this trial was a 'hit' trial. To qualify as a hit, the trial must be a go trial during which the stimulus changed and the mouse licked within the reward window (150-750 ms after the change time).
false_alarm: Indicates whether this trial was a 'false alarm' trial. To qualify as a false alarm, the trial must be a catch trial during which a sham change occurred and the mouse licked during the reward window.
miss: To qualify as a miss trial, the trial must be a go trial during which the stimulus changed but the mouse did not lick within the response window.
correct_reject: To qualify as a correct reject trial, the trial must be a catch trial during which a sham change occurred and the mouse withheld licking.
aborted: A trial is aborted when the mouse licks before the scheduled change or sham change.
auto_rewarded: During autorewarded trials, the reward is automatically triggered after the change regardless of whether the mouse licked within the response window. These always come at the beginning of the session to help engage the mouse in behavior.
change_frame: Indicates the stimulus frame index when the change occurred. This column can be used to link the trials table with the stimulus presentations table, as shown below.
trial_length: Duration of the trial in seconds.
Let's combine info from both of these tables to calculate response latency for this session. Note that the change time in the trials table is not corrected for display lag. This is the time that the task control computer uses to determine the response window. However, to calculate response latency, we want to use the display lag corrected change times from the stimulus presentations table. Below, we will grab these corrected times and add them to the trials table under the new column label 'change_time_with_display_delay'.
from functools import partial
def get_change_time_from_stim_table(row, table):
'''
Given a particular row in the trials table,
find the corresponding change time in the
stimulus presentations table
'''
change_frame = row['change_frame']
if np.isnan(change_frame):
return np.nan
change_time = table[table.start_frame==change_frame]\
['start_time'].values[0]
return change_time
get_change_times = partial(get_change_time_from_stim_table, table=stimulus_presentations)
change_times = trials.apply(get_change_times, axis=1)
trials['change_time_with_display_delay'] = change_times
Now we can use this new column to calculate the response latency on 'hit' trials.
hit_trials = trials[trials['hit']]
response_latencies = hit_trials['response_time'] - hit_trials['change_time_with_display_delay']
fig, ax = plt.subplots()
fig.suptitle('Response Latency Histogram for Hit trials')
ax.hist(response_latencies, bins=np.linspace(-0.1, 0.8, 50))
ax.set_xlabel('Time from change (s)')
ax.set_ylabel('Trial count')
Text(0, 0.5, 'Trial count')
Note that there is one trial with a negative response latency. This happens when a lick immediately precedes the change, and the task control software doesn't have time to abort the trial. To restrict ourselves to only those licks that occur during the response window, we can do the following:
response_window_lick_times = []
for it, trial in hit_trials.iterrows():
lick_times = trial['lick_times']
response_window_start = trial['change_time'] + 0.15
response_window_lick_time = lick_times[lick_times>response_window_start][0]
response_window_lick_times.append(response_window_lick_time)
response_latencies = response_window_lick_times - hit_trials['change_time_with_display_delay'].values
fig, ax = plt.subplots()
fig.suptitle('Response Latency Histogram for Hit trials')
ax.hist(response_latencies, bins=np.linspace(-0.1, 0.8, 50))
ax.set_xlabel('Time from change (s)')
ax.set_ylabel('Trial count')
Text(0, 0.5, 'Trial count')
Now let's grab the licking, running and pupil tracking data for this session and align it to the behavior.
eye_tracking = session.eye_tracking
running_speed = session.running_speed
licks = session.licks
Eye tracking dataframe: One entry containing ellipse fit parameters for the eye, pupil and corneal reflection for every frame of the eye tracking video stream.
eye_tracking.head()
timestamps | cr_area | eye_area | pupil_area | likely_blink | pupil_area_raw | cr_area_raw | eye_area_raw | cr_center_x | cr_center_y | ... | eye_center_x | eye_center_y | eye_width | eye_height | eye_phi | pupil_center_x | pupil_center_y | pupil_width | pupil_height | pupil_phi | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
frame | |||||||||||||||||||||
0 | 1.34174 | NaN | NaN | NaN | True | NaN | NaN | NaN | NaN | NaN | ... | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
1 | 1.35840 | NaN | NaN | NaN | True | NaN | NaN | NaN | NaN | NaN | ... | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
2 | 1.37507 | NaN | NaN | NaN | True | NaN | NaN | NaN | NaN | NaN | ... | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
3 | 1.39174 | NaN | NaN | NaN | True | NaN | NaN | NaN | NaN | NaN | ... | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
4 | 1.40840 | NaN | NaN | NaN | True | NaN | NaN | NaN | NaN | NaN | ... | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN | NaN |
5 rows × 23 columns
There seem to be several rows for which there are no valid data. We can use the 'likely_blink' column to filter these out.
eye_tracking_noblinks = eye_tracking[~eye_tracking['likely_blink']]
eye_tracking_noblinks.head()
timestamps | cr_area | eye_area | pupil_area | likely_blink | pupil_area_raw | cr_area_raw | eye_area_raw | cr_center_x | cr_center_y | ... | eye_center_x | eye_center_y | eye_width | eye_height | eye_phi | pupil_center_x | pupil_center_y | pupil_width | pupil_height | pupil_phi | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
frame | |||||||||||||||||||||
17 | 1.65840 | 132.486115 | 70625.134735 | 18292.276485 | False | 18292.276485 | 132.486115 | 70625.134735 | 349.237369 | 283.728001 | ... | 329.009419 | 275.387574 | 163.572471 | 137.435587 | 0.021028 | 349.972230 | 270.666912 | 74.207993 | 76.306045 | -0.371412 |
18 | 1.67508 | 133.196091 | 70636.186432 | 18187.494820 | False | 18187.494820 | 133.196091 | 70636.186432 | 349.287138 | 284.020121 | ... | 329.213380 | 275.846254 | 163.453674 | 137.556997 | 0.027826 | 350.034210 | 270.571270 | 74.254708 | 76.087183 | -0.527341 |
19 | 1.69174 | 142.640850 | 70558.979068 | 18200.499170 | False | 18200.499170 | 142.640850 | 70558.979068 | 349.460331 | 284.599632 | ... | 329.696022 | 277.131676 | 163.398004 | 137.453457 | 0.014976 | 350.428484 | 271.797634 | 74.268068 | 76.114380 | -0.428505 |
20 | 1.70841 | 145.368885 | 70492.200841 | 18144.662960 | False | 18144.662960 | 145.368885 | 70492.200841 | 349.463427 | 284.728674 | ... | 330.251743 | 277.005598 | 163.079554 | 137.591524 | 0.033417 | 350.709697 | 271.863534 | 74.039046 | 75.997537 | -0.416278 |
21 | 1.72507 | 143.348989 | 70690.285097 | 18107.560769 | False | 18107.560769 | 143.348989 | 70690.285097 | 349.244917 | 284.382902 | ... | 329.352378 | 276.858677 | 163.326985 | 137.769130 | 0.032516 | 350.052768 | 271.331949 | 74.116918 | 75.919797 | -0.415238 |
5 rows × 23 columns
Running dataframe: One entry for each read of the analog input line monitoring the encoder voltage, polled at ~60 Hz.
running_speed.head()
timestamps | speed | |
---|---|---|
0 | 24.66661 | -0.081347 |
1 | 24.68133 | 14.160151 |
2 | 24.69771 | 27.779561 |
3 | 24.71438 | 40.197108 |
4 | 24.73108 | 50.903242 |
Licking dataframe: One entry for every detected lick onset time,
licks.head()
timestamps | frame | |
---|---|---|
0 | 26.40224 | 102 |
1 | 26.54844 | 111 |
2 | 26.65058 | 117 |
3 | 27.01904 | 139 |
4 | 28.49396 | 228 |
Now let's take a look at running, licking and pupil area for one reward trial
time_before = 3.0 #how much time to plot before the reward
time_after = 3.0 #how much time to plot after the reward
reward_time = session.rewards.iloc[10]['timestamps'] #get the time of the 10th reward
#Get running data aligned to this reward
trial_running = running_speed.query('timestamps >= {} and timestamps <= {} '.
format(reward_time-time_before, reward_time+time_after))
#Get pupil data aligned to this reward
trial_pupil_area = eye_tracking_noblinks.query('timestamps >= {} and timestamps <= {} '.
format(reward_time-time_before, reward_time+time_after))
#Get stimulus presentations around this reward
behavior_presentations = stimulus_presentations[stimulus_presentations['active']]
behavior_presentations.at[:,'omitted'] = behavior_presentations['omitted'].astype('bool')
trial_stimuli = behavior_presentations.query('stop_time >= {} and start_time <= {} and not omitted'.
format(reward_time-time_before, reward_time+time_after))
#Get licking aligned to this reward
trial_licking = licks.query('timestamps >= {} and timestamps <= {} '.
format(reward_time-time_before, reward_time+time_after))
#Plot running, pupil area and licks
fig, axr = plt.subplots()
fig.set_size_inches(14,6)
axr.plot(trial_running['timestamps'], trial_running['speed'], 'k')
axp = axr.twinx()
axp.plot(trial_pupil_area['timestamps'], trial_pupil_area['pupil_area'], 'g')
rew_handle, = axr.plot(reward_time, 0, 'db', markersize=10)
lick_handle, = axr.plot(trial_licking['timestamps'], np.zeros(len(trial_licking['timestamps'])), 'mo')
axr.legend([rew_handle, lick_handle], ['reward', 'licks'])
axr.set_ylabel('running speed (cm/s)')
axp.set_ylabel('pupil area\n$(pixels^2)$')
axr.set_xlabel('Experiment time (s)')
axp.yaxis.label.set_color('g')
axp.spines['right'].set_color('g')
axp.tick_params(axis='y', colors='g')
#Plot the image flashes as grey bars.
colors = ['0.3', '0.8']
stimulus_colors = {stim: c for stim,c in zip(trial_stimuli['image_name'].unique(), colors)}
for idx, stimulus in trial_stimuli.iterrows():
axr.axvspan(stimulus['start_time'], stimulus['stop_time'], color=stimulus_colors[stimulus['image_name']], alpha=0.5)
Here we can see that just after the stimulus change (a little past 202 seconds), the mouse abruptly stops running and begins licking. The reward is delivered shortly after the first lick. We can also see that before the change the pupil and the running become entrained to the image flashes.