Skip to content

Commit

Permalink
Merge branch 'main' into session_metrics
Browse files Browse the repository at this point in the history
  • Loading branch information
nkeesey authored Dec 2, 2024
2 parents 4945819 + ce36c2d commit ed9b11b
Show file tree
Hide file tree
Showing 8 changed files with 453 additions and 57 deletions.
79 changes: 79 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,85 @@ To develop the code, run
pip install -e .[dev]
```

## Usage
### Annotate licks
To create a dataframe of licks that has been annotated with licking bout starts/stops, cue responsive licks, reward triggered licks, and intertrial choices.
```
import aind_dynamic_foraging_basic_analysis.licks.annotation as annotation
df_licks = annotation.annotate_licks(nwb)
```

You can then plot interlick interval analyses with:
```
import aind_dynamic_foraging_basic_analysis.licks.plot_interlick_interval as pii
#Plot interlick interval of all licks
pii.plot_interlick_interval(df_licks)
#plot interlick interval for left and right licks separately
pii.plot_interlick_interval(df_licks, categories='event')
```

### Create lick analysis report
To create a figure with several licking pattern analyses:

```
import aind_dynamic_foraging_basic_analysis.licks.lick_analysis as lick_analysis
lick_analysis.plot_lick_analysis(nwb)
```

### Compute trial by trial metrics
To annotate the trials dataframe with trial by trial metrics:

```
import aind_dynamic_foraging_basic_analysis.metrics.trial_metrics as tm
df_trials = tm.compute_all_trial_metrics(nwb)
```

### Plot interactive session scroller
```
import aind_dynamic_foraging_basic_analysis.plot.plot_session_scroller as pss
pss.plot_session_scroller(nwb)
```

To disable lick bout and other annotations:
```
pss.plot_session_scroller(nwb,plot_bouts=False)
```

This function will automatically plot FIP data if available. To change the processing method plotted use:
```
pss.plot_session_scroller(nwb, processing="bright")
```

To change which trial by trial metrics plotted:
```
pss.plot_session_scroller(nwb, metrics=['response_rate'])
```

### Plot FIP PSTH
You can use the `plot_fip` module to compute and plot PSTHs for the FIP data.

To compare one channel to multiple event types
```
from aind_dynamic_foraging_basic_analysis.plot import plot_fip as pf
channel = 'G_1_dff-poly'
rewarded_go_cues = nwb.df_trials.query('earned_reward == 1')['goCue_start_time_in_session'].values
unrewarded_go_cues = nwb.df_trials.query('earned_reward == 0')['goCue_start_time_in_session'].values
pf.plot_fip_psth_compare_alignments(
nwb,
{'rewarded goCue':rewarded_go_cues,'unrewarded goCue':unrewarded_go_cues},
channel,
censor=True
)
```

To compare multiple channels to the same event type:
```
pf.plot_fip_psth(nwb, 'goCue_start_time')
```


## Contributing

### Linters and testing
Expand Down
2 changes: 1 addition & 1 deletion src/aind_dynamic_foraging_basic_analysis/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""Init package"""

__version__ = "0.3.5"
__version__ = "0.3.7"

from .foraging_efficiency import compute_foraging_efficiency # noqa: F401
from .plot.plot_foraging_session import plot_foraging_session # noqa: F401
122 changes: 119 additions & 3 deletions src/aind_dynamic_foraging_basic_analysis/metrics/trial_metrics.py
Original file line number Diff line number Diff line change
@@ -1,21 +1,24 @@
"""
Tools for computing trial by trial metrics
df_trials = compute_all_trial_metrics(nwb)
df_trials = compute_trial_metrics(nwb)
df_trials = compute_bias(nwb)
"""

import pandas as pd
import numpy as np

import aind_dynamic_foraging_models.logistic_regression.model as model

# TODO, we might want to make these parameters metric specific
WIN_DUR = 15
MIN_EVENTS = 2

LEFT, RIGHT, IGNORE = 0, 1, 2


def compute_trial_metrics(nwb):
"""
Computes all trial by trial metrics
Computes trial by trial metrics
response_rate, fraction of trials with a response
gocue_reward_rate, fraction of trials with a reward
Expand All @@ -38,6 +41,7 @@ def compute_trial_metrics(nwb):

df_trials = nwb.df_trials.copy()


# --- Add reward-related columns ---
df_trials["reward"] = False
df_trials.loc[
Expand Down Expand Up @@ -143,6 +147,7 @@ def compute_trial_metrics(nwb):
df_trials.loc[i, "n_valid_licks_right"] = 0
df_trials.loc[i, "n_valid_licks_all"] = 0


df_trials["RESPONDED"] = [x in [0, 1] for x in df_trials["animal_response"].values]
# Rolling fraction of goCues with a response
df_trials["response_rate"] = (
Expand All @@ -168,6 +173,16 @@ def compute_trial_metrics(nwb):
df_trials["WENT_RIGHT"].rolling(WIN_DUR, min_periods=MIN_EVENTS, center=True).mean()
)

# TODO, add from process_nwb
# trial duration (stop-time - start-time) (start/stop time, or gocue to gocue?)
# n_licks_left (# of left licks in response window)
# n_licks_left_total (# of left licks from goCue to next go cue)
# Same for Right, same for all
# intertrial choices (boolean)
# number of intertrial choices
# number of intertrial switches
# response switch or repeat

# Clean up temp columns
drop_cols = [
"RESPONDED",
Expand All @@ -176,4 +191,105 @@ def compute_trial_metrics(nwb):
]
df_trials = df_trials.drop(columns=drop_cols)


return df_trials


def compute_bias(nwb):
"""
Computes side bias by fitting a logistic regression model
returns trials table with the following columns:
bias, the side bias
bias_ci_lower, the lower confidence interval on the bias
bias_ci_upper, the uppwer confidence interval on the bias
"""

# Parameters for computing bias
n_trials_back = 5
max_window = 200
cv = 1
compute_every = 10
BIAS_LIMIT = 10

# Make sure trials table has been computed
if not hasattr(nwb, "df_trials"):
print("You need to compute df_trials: nwb_utils.create_trials_df(nwb)")
return

# extract choice and reward
df_trials = nwb.df_trials.copy()
df_trials["choice"] = [np.nan if x == 2 else x for x in df_trials["animal_response"]]
df_trials["reward"] = [
any(x) for x in zip(df_trials["earned_reward"], df_trials["extra_reward"])
]

# Set up lists to store results
bias = []
ci_lower = []
ci_upper = []
C = []

# Iterate over trials and compute
compute_on = np.arange(compute_every, len(df_trials), compute_every)
for i in compute_on:
# Determine interval to compute on
start = np.max([0, i - max_window])
end = i

# extract choice and reward
choice = df_trials.loc[start:end]["choice"].values
reward = df_trials.loc[start:end]["reward"].values

# Determine if we have valid data to fit model
unique = np.unique(choice[~np.isnan(choice)])
if len(unique) == 0:
# no choices, report bias confidence as (-inf, +inf)
bias.append(np.nan)
ci_lower.append(-BIAS_LIMIT)
ci_upper.append(BIAS_LIMIT)
C.append(np.nan)
elif len(unique) == 2:
# Fit model
out = model.fit_logistic_regression(
choice, reward, n_trial_back=n_trials_back, cv=cv, fit_exponential=False
)
bias.append(out["df_beta"].loc["bias"]["bootstrap_mean"].values[0])
ci_lower.append(out["df_beta"].loc["bias"]["bootstrap_CI_lower"].values[0])
ci_upper.append(out["df_beta"].loc["bias"]["bootstrap_CI_upper"].values[0])
C.append(out["C"])
elif unique[0] == 0:
# only left choices, report bias confidence as (-inf, 0)
bias.append(-1)
ci_lower.append(-BIAS_LIMIT)
ci_upper.append(0)
C.append(np.nan)
elif unique[0] == 1:
# only right choices, report bias confidence as (0, +inf)
bias.append(+1)
ci_lower.append(0)
ci_upper.append(BIAS_LIMIT)
C.append(np.nan)

# Pack results into a dataframe
df = pd.DataFrame()
df["trial"] = compute_on
df["bias"] = bias
df["bias_ci_lower"] = ci_lower
df["bias_ci_upper"] = ci_upper
df["bias_C"] = C

# merge onto trials dataframe
df_trials = pd.merge(
nwb.df_trials.drop(columns=["bias", "bias_ci_lower", "bias_ci_upper"], errors="ignore"),
df[["trial", "bias", "bias_ci_lower", "bias_ci_upper"]],
how="left",
on=["trial"],
)

# fill in bias on non-computed trials
df_trials["bias"] = df_trials["bias"].bfill().ffill()
df_trials["bias_ci_lower"] = df_trials["bias_ci_lower"].bfill().ffill()
df_trials["bias_ci_upper"] = df_trials["bias_ci_upper"].bfill().ffill()


return df_trials
Loading

0 comments on commit ed9b11b

Please sign in to comment.