Skip to content

Commit

Permalink
Merge pull request #1 from dpmlab/dev
Browse files Browse the repository at this point in the history
Adding analyses from revisions 1 and 2
  • Loading branch information
Tal-Golan authored Jul 14, 2023
2 parents 84efafc + a31f4a7 commit 6a6fe5e
Show file tree
Hide file tree
Showing 42 changed files with 44,729 additions and 368 deletions.
6 changes: 6 additions & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
[submodule "third_party/acceptability-prediction-in-context"]
path = third_party/acceptability-prediction-in-context
url = https://github.com/jhlau/acceptability-prediction-in-context.git
[submodule "third_party/acceptability_prediction_in_context"]
path = third_party/acceptability_prediction_in_context
url = https://github.com/jhlau/acceptability-prediction-in-context.git
187 changes: 187 additions & 0 deletions analyze_experiment2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,187 @@
import os
import json

import numpy as np
import pandas as pd

import behav_exp_analysis


# prepare behavioral data from its raw format


def preprocess_experiment2():
fields_to_keep = [
"Zone Type",
"Trial Number",
"Zone Name",
"Response",
"Reaction Time",
"counterbalance-o1ql",
"sentence1",
"sentence2",
"sentence1_type",
"sentence2_type",
"Participant Private ID",
]

try:
assert os.path.exists(
"behavioral_results/contstim_Nov2022_30_participants_anon.csv"
)
except:
csvs = [
os.path.join(
"behavioral_results",
"contstim_Nov2022_data_exp_48362-v23_task-kwi1.csv",
),
os.path.join(
"behavioral_results",
"contstim_Nov2022_data_exp_48362-v24_task-kwi1.csv",
),
]

df = pd.concat([pd.read_csv(csv) for csv in csvs])
df = df.loc[[str(s).startswith("resp") for s in df["Zone Name"]]]

if "sentence1_type" in df.columns:
df.drop("sentence1_type", axis=1, inplace=True)
if "sentence2_type" in df.columns:
df.drop("sentence2_type", axis=1, inplace=True)
if "sentence1" in df.columns:
df.drop("sentence1", axis=1, inplace=True)
if "sentence2" in df.columns:
df.drop("sentence2", axis=1, inplace=True)

df = df.rename(
columns={
"sentence1_type_set 1": "sentence1_type",
"sentence2_type_set 1": "sentence2_type",
"sentence1_set 1": "sentence1",
"sentence2_set 1": "sentence2",
"sentence1_model_set 1": "sentence1_model",
"sentence2_model_set 1": "sentence2_model",
}
)

if "Participant Private ID" in df.columns:

# we excluded three participants from the analysis for behavior indicating low effort.
# remove excluded participants
# the IDs are not included in the repo for privacy reasons
excluded_participants = json.load("behavioral_results/contstim_Nov2022_excluded_participants.json")
df = df[~df["Participant Private ID"].isin(excluded_participants)]

IDs, df["subject"] = np.unique(
df["Participant Private ID"], return_inverse=True
)
df = df.drop(columns=["Participant Private ID"])
pd.DataFrame(IDs).to_csv(
"behavioral_results/contstim_Nov2022_30_participants_subject_ID_list.csv"
)
else:
assert "subject" in df.columns, "subject column not found"

n_subjects = len(df["subject"].unique())

print("found {} subjects".format(n_subjects))

# leave only the selection fields:
df = df.drop(columns=[col for col in df.columns if col not in fields_to_keep])

model_list = [
"bert",
"bert_has_a_mouth",
"electra",
"electra_has_a_mouth",
"roberta",
"roberta_has_a_mouth",
]

behav_exp_analysis.add_model_sentence_probabilities(
df, model_list, remove_existing=False
)
df.to_csv(
f"behavioral_results/contstim_Nov2022_{n_subjects}_participants_anon.csv"
)

finally:
df = behav_exp_analysis.data_preprocessing(
results_csv="behavioral_results/contstim_Nov2022_30_participants_anon.csv",
experiment=2,
)

behav_exp_analysis.catch_trial_report(df, subject_id_column="subject")

df.to_csv(
"behavioral_results/contstim_Nov2022_30_participants_anon_aligned_with_loso.csv")

if __name__ == "__main__":
try:
df = pd.read_csv(
"behavioral_results/contstim_Nov2022_30_participants_anon_aligned_with_loso.csv"
)
except:
preprocess_experiment2()
df = pd.read_csv(
"behavioral_results/contstim_Nov2022_30_participants_anon_aligned_with_loso.csv"
)

model_combinations_to_contrast = [
("bert", "bert_has_a_mouth"),
["electra", "electra_has_a_mouth"],
["roberta", "roberta_has_a_mouth"],
]

behav_exp_analysis.plot_main_results_figures(
df,
save_folder="figures/exp2/binarized_acc_by_subject",
measure="RAE_signed_rank_cosine_similarity",
figure_set="exp2_synthetic",
exp="exp2",
statistical_testing_level="subject",
model_combinations_to_contrast=model_combinations_to_contrast,
initial_panel_letter_index=1,
)

behav_exp_analysis.generate_worst_sentence_pairs_table(
df,
trial_type="synthetic_vs_synthetic",
n_sentences_per_model=2,
target_folder="has_a_mouth_exp_tables",
models = ["bert_has_a_mouth", "electra_has_a_mouth", "roberta_has_a_mouth"],
)

# as a complement to the main results, we also plot the results of experiment 1 in the
# randomly-sampled natural sentence condition using the same models, plotting, and inference
# used in experiment 2.

try:
df = pd.read_csv(
"behavioral_results/contstim_Aug2021_n100_results_anon_with_PLL_models_aligned_with_loso.csv"
)
except:
df = behav_exp_analysis.data_preprocessing()
df = behav_exp_analysis.add_model_sentence_probabilities(
df,
["bert_has_a_mouth", "electra_has_a_mouth", "roberta_has_a_mouth"],
)
# drop irrelevant models
models_to_drop = ["gpt2", "bigram", "trigram", "xlm", "rnn", "lstm"]
df = df[
[col for col in df.columns if not any([m in col for m in models_to_drop])]
]
df.to_csv(
"behavioral_results/contstim_Aug2021_n100_results_anon_with_PLL_models_aligned_with_loso.csv"
)

behav_exp_analysis.plot_main_results_figures(
df,
save_folder="figures/exp1/binarized_acc_by_subject",
measure="RAE_signed_rank_cosine_similarity",
figure_set="exp1_natural_sents_reanalyzed_for_exp",
exp="exp2",
statistical_testing_level="subject",
model_combinations_to_contrast=model_combinations_to_contrast,
initial_panel_letter_index=0,
)
20 changes: 20 additions & 0 deletions axon_scripts/axon_bidirectional_prob_calc_exp_big_gpu.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
#!/bin/sh
#
#
#SBATCH --account=nklab
#SBATCH --job-name=exp_a40 # The job name.
#SBATCH --gres=gpu:a40:1
#SBATCH --mem=16G
#SBATCH --error="slurm-%A_%a.err"
#SBATCH --time=24:00:00
#SBATCH --exclude=ax11,ax13
module load anaconda3-2019.03

cd /scratch/nklab/projects/contstimlang/contstimlang

conda activate contstimlang

python -u batch_bidirectional_prob_calc_exp2.py
echo "python script terminated"

# End of script
20 changes: 20 additions & 0 deletions axon_scripts/axon_bidirectional_prob_calc_exp_small_gpu.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
#!/bin/sh
#
#
#SBATCH --account=nklab
#SBATCH --job-name=exp_2gpu # The job name.
#SBATCH --gres=gpu:2
#SBATCH --mem=16G
#SBATCH --error="slurm-%A_%a.err"
#SBATCH --time=24:00:00
#SBATCH --exclude=ax11,ax12,ax13,ax14,ax15,ax16
module load anaconda3-2019.03

cd /scratch/nklab/projects/contstimlang/contstimlang

conda activate contstimlang

python -u batch_bidirectional_prob_calc_exp2.py
echo "python script terminated"

# End of script
22 changes: 22 additions & 0 deletions axon_scripts/axon_make_controversial_sentences_absolute_ratings.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
#!/bin/sh
#
#
#SBATCH --account=nklab
#SBATCH --job-name=make_model_sent # The job name.
#SBATCH --gres=gpu:2
#SBATCH --mem=16G
#SBATCH --error="slurm-%A_%a.err"
#SBATCH --time=24:00:00
#SBATCH --output="slurm-%A_%a.out"
#SBATCH --exclude=ax01

module load anaconda3-2019.03

cd /scratch/nklab/projects/contstimlang/contstim

conda activate contstimlang

python -u batch_synthesize_absolute_rating_controversial_sentences.py
echo "python script terminated"

# End of script
48 changes: 48 additions & 0 deletions batch_bidirectional_prob_calc_exp.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
import os
from batch_synthesize_controversial_pairs import (
NaturalSentenceAssigner,
synthesize_controversial_sentence_pair_set,
)

if __name__ == "__main__":
model_pairs = [
(
"bert",
"bert_has_a_mouth",
),
("bert_has_a_mouth", "bert"),
(
"electra",
"electra_has_a_mouth",
),
("electra_has_a_mouth", "electra"),
(
"roberta",
"roberta_has_a_mouth",
),
("roberta_has_a_mouth", "roberta"),
]

initial_sentence_assigner = NaturalSentenceAssigner(model_pairs)
sent_len = 8

results_csv_folder = os.path.join(
"synthesized_sentences",
"bidirectional_prob_calc_exp",
"controverisal_sentence_pairs_natural_initialization",
"{}_word".format(sent_len),
)

synthesize_controversial_sentence_pair_set(
model_pairs,
initial_sentence_assigner,
results_csv_folder=results_csv_folder,
sent_len=sent_len, # in the preprint, we used 8 word sentences
allow_only_prepositions_to_repeat=True, # in the preprint, this was True
natural_initialization=True, # sample random sentence for initialization
max_sentence_pairs_per_run=1, # set this to a small number (e.g. 5) if HPC job time is limited, None if you want the code to keep running until it's done
max_non_decreasing_loss_attempts_per_word=50,
max_replacement_attempts_per_word=50,
max_opt_hours=12,
verbose=3,
)
Loading

0 comments on commit 6a6fe5e

Please sign in to comment.