From 2777071d8eb8800a4ec905cd1a59446049e6043a Mon Sep 17 00:00:00 2001 From: vinicvaz Date: Wed, 15 May 2024 08:14:49 -0300 Subject: [PATCH 1/3] add basic structure for reference and commit on workflows --- .github/workflows/deploy-docs.yaml | 14 ++++++++++++++ docs/vame-docs-app/docs/reference/_category_.json | 8 ++++++++ 2 files changed, 22 insertions(+) create mode 100644 docs/vame-docs-app/docs/reference/_category_.json diff --git a/.github/workflows/deploy-docs.yaml b/.github/workflows/deploy-docs.yaml index d6d0d6be..4c53a54e 100644 --- a/.github/workflows/deploy-docs.yaml +++ b/.github/workflows/deploy-docs.yaml @@ -4,6 +4,7 @@ on: push: branches: - docs + - fix/docs-workflows jobs: deploy: @@ -39,6 +40,19 @@ jobs: - name: Build website run: cd docs/vame-docs-app && yarn build + - name: Commit pydoc-markdown files + run: | + git config --local user.email "github-actions[bot]@users.noreply.github.com" + git config --local user.name "github-actions[bot]" + GIT_STATUS=$(git status -s) + [[ ! -z "$GIT_STATUS" ]] && git add docs/* && git commit -m "auto-commit-docs" -a || echo "No changes to commit" + + - name: Push changes + uses: ad-m/github-push-action@master + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + branch: ${{ github.ref }} + # Popular action to deploy to GitHub Pages: # Docs: https://github.com/peaceiris/actions-gh-pages#%EF%B8%8F-docusaurus - name: Deploy to GitHub Pages diff --git a/docs/vame-docs-app/docs/reference/_category_.json b/docs/vame-docs-app/docs/reference/_category_.json new file mode 100644 index 00000000..d15c769f --- /dev/null +++ b/docs/vame-docs-app/docs/reference/_category_.json @@ -0,0 +1,8 @@ +{ + "label": "API reference", + "position": 3, + "link": { + "type": "generated-index", + "description": "VAME package API reference" + } +} \ No newline at end of file From 62861a869306c1061de8feaa4d79a6bea589cb24 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 15 May 2024 11:16:00 +0000 Subject: [PATCH 2/3] auto-commit-docs --- .../vame-docs-app/docs/reference/sidebar.json | 48 ++ .../vame/analysis/community_analysis.md | 328 ++++++++++++ .../vame/analysis/generative_functions.md | 132 +++++ .../reference/vame/analysis/gif_creator.md | 73 +++ .../vame/analysis/pose_segmentation.md | 150 ++++++ .../vame/analysis/segment_behavior.md | 136 +++++ .../reference/vame/analysis/tree_hierarchy.md | 168 +++++++ .../vame/analysis/umap_visualization.md | 93 ++++ .../reference/vame/analysis/videowriter.md | 72 +++ .../reference/vame/initialize_project/new.md | 45 ++ .../reference/vame/model/create_training.md | 139 +++++ .../docs/reference/vame/model/dataloader.md | 68 +++ .../docs/reference/vame/model/evaluate.md | 87 ++++ .../docs/reference/vame/model/rnn_model.md | 475 ++++++++++++++++++ .../docs/reference/vame/model/rnn_vae.md | 224 +++++++++ .../reference/vame/util/align_egocentrical.md | 204 ++++++++ .../docs/reference/vame/util/auxiliary.md | 76 +++ .../docs/reference/vame/util/csv_to_npy.md | 61 +++ .../reference/vame/util/gif_pose_helper.md | 143 ++++++ 19 files changed, 2722 insertions(+) create mode 100644 docs/vame-docs-app/docs/reference/sidebar.json create mode 100644 docs/vame-docs-app/docs/reference/vame/analysis/community_analysis.md create mode 100644 docs/vame-docs-app/docs/reference/vame/analysis/generative_functions.md create mode 100644 docs/vame-docs-app/docs/reference/vame/analysis/gif_creator.md create mode 100644 docs/vame-docs-app/docs/reference/vame/analysis/pose_segmentation.md create mode 100644 docs/vame-docs-app/docs/reference/vame/analysis/segment_behavior.md create mode 100644 docs/vame-docs-app/docs/reference/vame/analysis/tree_hierarchy.md create mode 100644 docs/vame-docs-app/docs/reference/vame/analysis/umap_visualization.md create mode 100644 docs/vame-docs-app/docs/reference/vame/analysis/videowriter.md create mode 100644 docs/vame-docs-app/docs/reference/vame/initialize_project/new.md create mode 100644 docs/vame-docs-app/docs/reference/vame/model/create_training.md create mode 100644 docs/vame-docs-app/docs/reference/vame/model/dataloader.md create mode 100644 docs/vame-docs-app/docs/reference/vame/model/evaluate.md create mode 100644 docs/vame-docs-app/docs/reference/vame/model/rnn_model.md create mode 100644 docs/vame-docs-app/docs/reference/vame/model/rnn_vae.md create mode 100644 docs/vame-docs-app/docs/reference/vame/util/align_egocentrical.md create mode 100644 docs/vame-docs-app/docs/reference/vame/util/auxiliary.md create mode 100644 docs/vame-docs-app/docs/reference/vame/util/csv_to_npy.md create mode 100644 docs/vame-docs-app/docs/reference/vame/util/gif_pose_helper.md diff --git a/docs/vame-docs-app/docs/reference/sidebar.json b/docs/vame-docs-app/docs/reference/sidebar.json new file mode 100644 index 00000000..3f82c3d2 --- /dev/null +++ b/docs/vame-docs-app/docs/reference/sidebar.json @@ -0,0 +1,48 @@ +{ + "items": [ + { + "items": [ + "reference/vame/analysis/community_analysis", + "reference/vame/analysis/generative_functions", + "reference/vame/analysis/gif_creator", + "reference/vame/analysis/pose_segmentation", + "reference/vame/analysis/segment_behavior", + "reference/vame/analysis/tree_hierarchy", + "reference/vame/analysis/umap_visualization", + "reference/vame/analysis/videowriter" + ], + "label": "vame.analysis", + "type": "category" + }, + { + "items": [ + "reference/vame/initialize_project/new" + ], + "label": "vame.initialize_project", + "type": "category" + }, + { + "items": [ + "reference/vame/model/create_training", + "reference/vame/model/dataloader", + "reference/vame/model/evaluate", + "reference/vame/model/rnn_model", + "reference/vame/model/rnn_vae" + ], + "label": "vame.model", + "type": "category" + }, + { + "items": [ + "reference/vame/util/align_egocentrical", + "reference/vame/util/auxiliary", + "reference/vame/util/csv_to_npy", + "reference/vame/util/gif_pose_helper" + ], + "label": "vame.util", + "type": "category" + } + ], + "label": "vame", + "type": "category" +} \ No newline at end of file diff --git a/docs/vame-docs-app/docs/reference/vame/analysis/community_analysis.md b/docs/vame-docs-app/docs/reference/vame/analysis/community_analysis.md new file mode 100644 index 00000000..2d6d722b --- /dev/null +++ b/docs/vame-docs-app/docs/reference/vame/analysis/community_analysis.md @@ -0,0 +1,328 @@ +--- +sidebar_label: community_analysis +title: vame.analysis.community_analysis +--- + +Variational Animal Motion Embedding 1.0-alpha Toolbox +© K. Luxem & P. Bauer, Department of Cellular Neuroscience +Leibniz Institute for Neurobiology, Magdeburg, Germany + +https://github.com/LINCellularNeuroscience/VAME +Licensed under GNU General Public License v3.0 + +Updated 5/11/2022 with PH edits + +#### get\_adjacency\_matrix + +```python +def get_adjacency_matrix( + labels: np.ndarray, + n_cluster: int) -> Tuple[np.ndarray, np.ndarray, np.ndarray] +``` + +Calculate the adjacency matrix, transition matrix, and temporal matrix. + +**Arguments**: + +- `labels` _np.ndarray_ - Array of cluster labels. +- `n_cluster` _int_ - Number of clusters. + + +**Returns**: + + Tuple[np.ndarray, np.ndarray, np.ndarray]: Tuple containing adjacency matrix, transition matrix, and temporal matrix. + +#### get\_transition\_matrix + +```python +def get_transition_matrix(adjacency_matrix: np.ndarray, + threshold: float = 0.0) -> np.ndarray +``` + +Compute the transition matrix from the adjacency matrix. + +**Arguments**: + +- `adjacency_matrix` _np.ndarray_ - Adjacency matrix. +- `threshold` _float, optional_ - Threshold for considering transitions. Defaults to 0.0. + + +**Returns**: + +- `np.ndarray` - Transition matrix. + +#### consecutive + +```python +def consecutive(data: np.ndarray, stepsize: int = 1) -> List[np.ndarray] +``` + +Identifies location of missing motif finding consecutive elements in an array and returns array(s) at the split. + +**Arguments**: + +- `data` _np.ndarray_ - Input array. +- `stepsize` _int, optional_ - Step size. Defaults to 1. + + +**Returns**: + +- `List[np.ndarray]` - List of arrays containing consecutive elements. + +#### find\_zero\_labels + +```python +def find_zero_labels(motif_usage: Tuple[np.ndarray, np.ndarray], + n_cluster: int) -> np.ndarray +``` + +Find zero labels in motif usage and fill them. + +**Arguments**: + +- `motif_usage` _Tuple[np.ndarray, np.ndarray]_ - 2D list where the first index is a unique list of motif used and the second index is the motif usage in frames. +- `n_cluster` _int_ - Number of clusters. + + +**Returns**: + +- `np.ndarray` - List of motif usage frames with 0's where motifs weren't used (array with zero labels filled). + +#### augment\_motif\_timeseries + +```python +def augment_motif_timeseries(label: np.ndarray, + n_cluster: int) -> Tuple[np.ndarray, np.ndarray] +``` + +Augment motif time series by filling zero motifs. + +**Arguments**: + +- `label` _np.ndarray_ - Original label array. +- `n_cluster` _int_ - Number of clusters. + + +**Returns**: + + Tuple[np.ndarray, np.ndarray]: Augmented label array and indices of zero motifs. + +#### get\_labels + +```python +def get_labels(cfg: dict, files: List[str], model_name: str, n_cluster: int, + parametrization: str) -> List[np.ndarray] +``` + +Get cluster labels for given videos files. + +**Arguments**: + +- `cfg` _dict_ - Configuration parameters. +- `files` _List[str]_ - List of video files paths. +- `model_name` _str_ - Model name. +- `n_cluster` _int_ - Number of clusters. +- `parametrization` _str_ - Parameterization. + + +**Returns**: + +- `List[np.ndarray]` - List of cluster labels for each file. + +#### get\_community\_label + +```python +def get_community_label(cfg: dict, files: List[str], model_name: str, + n_cluster: int, parametrization: str) -> np.ndarray +``` + +Get community labels for given files. + +**Arguments**: + +- `cfg` _dict_ - Configuration parameters. +- `files` _List[str]_ - List of files paths. +- `model_name` _str_ - Model name. +- `n_cluster` _int_ - Number of clusters. +- `parametrization` _str_ - Parameterization. + + +**Returns**: + +- `np.ndarray` - Array of community labels. + +#### compute\_transition\_matrices + +```python +def compute_transition_matrices(files: List[str], labels: List[np.ndarray], + n_cluster: int) -> List[np.ndarray] +``` + +Compute transition matrices for given files and labels. + +**Arguments**: + +- `files` _List[str]_ - List of file paths. +- `labels` _List[np.ndarray]_ - List of label arrays. +- `n_cluster` _int_ - Number of clusters. + + +**Returns**: + +- `List[np.ndarray]` - List of transition matrices. + +#### create\_community\_bag + +```python +def create_community_bag(files: List[str], labels: List[np.ndarray], + transition_matrices: List[np.ndarray], cut_tree: int, + n_cluster: int) -> Tuple +``` + +Create community bag for given files and labels (Markov chain to tree -> community detection). + +**Arguments**: + +- `files` _List[str]_ - List of file paths. +- `labels` _List[np.ndarray]_ - List of label arrays. +- `transition_matrices` _List[np.ndarray]_ - List of transition matrices. +- `cut_tree` _int_ - Cut line for tree. +- `n_cluster` _int_ - Number of clusters. + + +**Returns**: + +- `Tuple` - Tuple containing list of community bags and list of trees. + +#### create\_cohort\_community\_bag + +```python +def create_cohort_community_bag(files: List[str], labels: List[np.ndarray], + trans_mat_full: np.ndarray, cut_tree: int, + n_cluster: int) -> Tuple +``` + +Create cohort community bag for given labels, transition matrix, cut tree, and number of clusters. +(markov chain to tree -> community detection) + +**Arguments**: + +- `files` _List[str]_ - List of files paths (deprecated). +- `labels` _List[np.ndarray]_ - List of label arrays. +- `trans_mat_full` _np.ndarray_ - Full transition matrix. +- `cut_tree` _int_ - Cut line for tree. +- `n_cluster` _int_ - Number of clusters. + + +**Returns**: + +- `Tuple` - Tuple containing list of community bags and list of trees. + +#### get\_community\_labels + +```python +def get_community_labels( + files: List[str], labels: List[np.ndarray], + communities_all: List[List[List[int]]]) -> List[np.ndarray] +``` + +Transform kmeans parameterized latent vector into communities. Get community labels for given files and community bags. + +**Arguments**: + +- `files` _List[str]_ - List of file paths. +- `labels` _List[np.ndarray]_ - List of label arrays. +- `communities_all` _List[List[List[int]]]_ - List of community bags. + + +**Returns**: + +- `List[np.ndarray]` - List of community labels for each file. + +#### get\_cohort\_community\_labels + +```python +def get_cohort_community_labels( + files: List[str], labels: List[np.ndarray], + communities_all: List[List[List[int]]]) -> List[np.ndarray] +``` + +Transform kmeans parameterized latent vector into communities. Get cohort community labels for given labels, and community bags. + +**Arguments**: + +- `files` _List[str]_ - List of file paths (deprecated). +- `labels` _List[np.ndarray]_ - List of label arrays. +- `communities_all` _List[List[List[int]]]_ - List of community bags. + + +**Returns**: + +- `List[np.ndarray]` - List of cohort community labels for each file. + +#### umap\_embedding + +```python +def umap_embedding(cfg: dict, file: str, model_name: str, n_cluster: int, + parameterization: str) -> np.ndarray +``` + +Perform UMAP embedding for given file and parameters. + +**Arguments**: + +- `cfg` _dict_ - Configuration parameters. +- `file` _str_ - File path. +- `model_name` _str_ - Model name. +- `n_cluster` _int_ - Number of clusters. +- `parameterization` _str_ - Parameterization. + + +**Returns**: + +- `np.ndarray` - UMAP embedding. + +#### umap\_vis + +```python +def umap_vis(cfg: dict, file: str, embed: np.ndarray, + community_labels_all: np.ndarray) -> None +``` + +Create plotly visualizaton of UMAP embedding. + +**Arguments**: + +- `cfg` _dict_ - Configuration parameters. +- `file` _str_ - File path. +- `embed` _np.ndarray_ - UMAP embedding. +- `community_labels_all` _np.ndarray_ - Community labels. + + +**Returns**: + + None + +#### community + +```python +def community(config: str, + cohort: bool = True, + show_umap: bool = False, + cut_tree: int = None) -> None +``` + +Perform community analysis. + +**Arguments**: + +- `config` _str_ - Path to the configuration file. +- `cohort` _bool, optional_ - Flag indicating cohort analysis. Defaults to True. +- `show_umap` _bool, optional_ - Flag indicating weather to show UMAP visualization. Defaults to False. +- `cut_tree` _int, optional_ - Cut line for tree. Defaults to None. + + +**Returns**: + + None + diff --git a/docs/vame-docs-app/docs/reference/vame/analysis/generative_functions.md b/docs/vame-docs-app/docs/reference/vame/analysis/generative_functions.md new file mode 100644 index 00000000..55f36511 --- /dev/null +++ b/docs/vame-docs-app/docs/reference/vame/analysis/generative_functions.md @@ -0,0 +1,132 @@ +--- +sidebar_label: generative_functions +title: vame.analysis.generative_functions +--- + +Variational Animal Motion Embedding 1.0-alpha Toolbox +© K. Luxem & P. Bauer, Department of Cellular Neuroscience +Leibniz Institute for Neurobiology, Magdeburg, Germany + +https://github.com/LINCellularNeuroscience/VAME +Licensed under GNU General Public License v3.0 + +#### random\_generative\_samples\_motif + +```python +def random_generative_samples_motif(cfg: dict, model: torch.nn.Module, + latent_vector: np.ndarray, + labels: np.ndarray, + n_cluster: int) -> None +``` + +Generate random samples for motifs. + +**Arguments**: + +- `cfg` _dict_ - Configuration dictionary. +- `model` _torch.nn.Module_ - PyTorch model. +- `latent_vector` _np.ndarray_ - Latent vectors. +- `labels` _np.ndarray_ - Labels. +- `n_cluster` _int_ - Number of clusters. + + +**Returns**: + +- `None` - Plot of generated samples. + +#### random\_generative\_samples + +```python +def random_generative_samples(cfg: dict, model: torch.nn.Module, + latent_vector: np.ndarray) -> None +``` + +Generate random generative samples. + +**Arguments**: + +- `cfg` _dict_ - Configuration dictionary. +- `model` _torch.nn.Module_ - PyTorch model. +- `latent_vector` _np.ndarray_ - Latent vectors. + + +**Returns**: + + None + +#### random\_reconstruction\_samples + +```python +def random_reconstruction_samples(cfg: dict, model: torch.nn.Module, + latent_vector: np.ndarray) -> None +``` + +Generate random reconstruction samples. + +**Arguments**: + +- `cfg` _dict_ - Configuration dictionary. +- `model` _torch.nn.Module_ - PyTorch model to use. +- `latent_vector` _np.ndarray_ - Latent vectors. + + +**Returns**: + + None + +#### visualize\_cluster\_center + +```python +def visualize_cluster_center(cfg: dict, model: torch.nn.Module, + cluster_center: np.ndarray) -> None +``` + +Visualize cluster centers. + +**Arguments**: + +- `cfg` _dict_ - Configuration dictionary. +- `model` _torch.nn.Module_ - PyTorch model. +- `cluster_center` _np.ndarray_ - Cluster centers. + + +**Returns**: + + None + +#### load\_model + +```python +def load_model(cfg: dict, model_name: str) -> torch.nn.Module +``` + +Load PyTorch model. + +**Arguments**: + +- `cfg` _dict_ - Configuration dictionary. +- `model_name` _str_ - Name of the model. + + +**Returns**: + +- `torch.nn.Module` - Loaded PyTorch model. + +#### generative\_model + +```python +def generative_model(config: str, mode: str = "sampling") -> None +``` + +Generative model. + +**Arguments**: + +- `config` _str_ - Path to the configuration file. +- `mode` _str, optional_ - Mode for generating samples. Defaults to "sampling". + + +**Returns**: + + None + diff --git a/docs/vame-docs-app/docs/reference/vame/analysis/gif_creator.md b/docs/vame-docs-app/docs/reference/vame/analysis/gif_creator.md new file mode 100644 index 00000000..11350c13 --- /dev/null +++ b/docs/vame-docs-app/docs/reference/vame/analysis/gif_creator.md @@ -0,0 +1,73 @@ +--- +sidebar_label: gif_creator +title: vame.analysis.gif_creator +--- + +Variational Animal Motion Embedding 1.0-alpha Toolbox +© K. Luxem & P. Bauer, Department of Cellular Neuroscience +Leibniz Institute for Neurobiology, Magdeburg, Germany + +https://github.com/LINCellularNeuroscience/VAME +Licensed under GNU General Public License v3.0 + +#### create\_video + +```python +def create_video(path_to_file: str, file: str, embed: np.ndarray, + clabel: np.ndarray, frames: List[np.ndarray], start: int, + length: int, max_lag: int, num_points: int) -> None +``` + +Create video frames for the given embedding. + +**Arguments**: + +- `path_to_file` _str_ - Path to the file. +- `file` _str_ - File name. +- `embed` _np.ndarray_ - Embedding array. +- `clabel` _np.ndarray_ - Cluster labels. +- `frames` _List[np.ndarray]_ - List of frames. +- `start` _int_ - Starting index. +- `length` _int_ - Length of the video. +- `max_lag` _int_ - Maximum lag. +- `num_points` _int_ - Number of points. + + +**Returns**: + + None + +#### gif + +```python +def gif( + config: str, + pose_ref_index: int, + subtract_background: bool = True, + start: int = None, + length: int = 500, + max_lag: int = 30, + label: str = 'community', + file_format: str = '.mp4', + crop_size: Tuple[int, int] = (300, 300)) -> None +``` + +Create a GIF from the given configuration. + +**Arguments**: + +- `config` _str_ - Path to the configuration file. +- `pose_ref_index` _int_ - Pose reference index. +- `subtract_background` _bool, optional_ - Whether to subtract background. Defaults to True. +- `start` _int, optional_ - Starting index. Defaults to None. +- `length` _int, optional_ - Length of the video. Defaults to 500. +- `max_lag` _int, optional_ - Maximum lag. Defaults to 30. +- `label` _str, optional_ - Label type. Defaults to 'community'. +- `file_format` _str, optional_ - File format. Defaults to '.mp4'. +- `crop_size` _Tuple[int, int], optional_ - Crop size. Defaults to (300,300). + + +**Returns**: + + None + diff --git a/docs/vame-docs-app/docs/reference/vame/analysis/pose_segmentation.md b/docs/vame-docs-app/docs/reference/vame/analysis/pose_segmentation.md new file mode 100644 index 00000000..01a63077 --- /dev/null +++ b/docs/vame-docs-app/docs/reference/vame/analysis/pose_segmentation.md @@ -0,0 +1,150 @@ +--- +sidebar_label: pose_segmentation +title: vame.analysis.pose_segmentation +--- + +Variational Animal Motion Embedding 1.0-alpha Toolbox +© K. Luxem & P. Bauer, Department of Cellular Neuroscience +Leibniz Institute for Neurobiology, Magdeburg, Germany + +https://github.com/LINCellularNeuroscience/VAME +Licensed under GNU General Public License v3.0 + +#### load\_model + +```python +def load_model(cfg: dict, model_name: str, fixed: bool) -> RNN_VAE +``` + +Load the VAME model. + +**Arguments**: + +- `cfg` _dict_ - Configuration dictionary. +- `model_name` _str_ - Name of the model. +- `fixed` _bool_ - Fixed or variable length sequences. + + +**Returns**: + +- `RNN_VAE` - Loaded VAME model. + +#### embedd\_latent\_vectors + +```python +def embedd_latent_vectors(cfg: dict, files: List[str], model: RNN_VAE, + fixed: bool) -> List[np.ndarray] +``` + +Embed latent vectors for the given files using the VAME model. + +**Arguments**: + +- `cfg` _dict_ - Configuration dictionary. +- `files` _List[str]_ - List of files names. +- `model` _RNN_VAE_ - VAME model. +- `fixed` _bool_ - Whether the model is fixed. + + +**Returns**: + +- `List[np.ndarray]` - List of latent vectors for each file. + +#### consecutive + +```python +def consecutive(data: np.ndarray, stepsize: int = 1) -> List[np.ndarray] +``` + +Find consecutive sequences in the data array. + +**Arguments**: + +- `data` _np.ndarray_ - Input array. +- `stepsize` _int, optional_ - Step size. Defaults to 1. + + +**Returns**: + +- `List[np.ndarray]` - List of consecutive sequences. + +#### get\_motif\_usage + +```python +def get_motif_usage(label: np.ndarray) -> np.ndarray +``` + +Compute motif usage from the label array. + +**Arguments**: + +- `label` _np.ndarray_ - Label array. + + +**Returns**: + +- `np.ndarray` - Array of motif usage counts. + +#### same\_parameterization + +```python +def same_parameterization( + cfg: dict, files: List[str], latent_vector_files: List[np.ndarray], + states: int, parameterization: str +) -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray]] +``` + +Apply the same parameterization to all animals. + +**Arguments**: + +- `cfg` _dict_ - Configuration dictionary. +- `files` _List[str]_ - List of file names. +- `latent_vector_files` _List[np.ndarray]_ - List of latent vector arrays. +- `states` _int_ - Number of states. +- `parameterization` _str_ - Parameterization method. + + +**Returns**: + +- `Tuple` - Tuple of labels, cluster centers, and motif usages. + +#### individual\_parameterization + +```python +def individual_parameterization(cfg: dict, files: List[str], + latent_vector_files: List[np.ndarray], + cluster: int) -> Tuple +``` + +Apply individual parameterization to each animal. + +**Arguments**: + +- `cfg` _dict_ - Configuration dictionary. +- `files` _List[str]_ - List of file names. +- `latent_vector_files` _List[np.ndarray]_ - List of latent vector arrays. +- `cluster` _int_ - Number of clusters. + + +**Returns**: + +- `Tuple` - Tuple of labels, cluster centers, and motif usages. + +#### pose\_segmentation + +```python +def pose_segmentation(config: str) -> None +``` + +Perform pose segmentation using the VAME model. + +**Arguments**: + +- `config` _str_ - Path to the configuration file. + + +**Returns**: + + None + diff --git a/docs/vame-docs-app/docs/reference/vame/analysis/segment_behavior.md b/docs/vame-docs-app/docs/reference/vame/analysis/segment_behavior.md new file mode 100644 index 00000000..6ac26482 --- /dev/null +++ b/docs/vame-docs-app/docs/reference/vame/analysis/segment_behavior.md @@ -0,0 +1,136 @@ +--- +sidebar_label: segment_behavior +title: vame.analysis.segment_behavior +--- + +Variational Animal Motion Embedding 0.1 Toolbox +© K. Luxem & P. Bauer, Department of Cellular Neuroscience +Leibniz Institute for Neurobiology, Magdeburg, Germany + +https://github.com/LINCellularNeuroscience/VAME +Licensed under GNU General Public License v3.0 + +#### load\_data + +```python +def load_data(PROJECT_PATH: str, file: str, data: str) -> np.ndarray +``` + +Load data for the given file. + +**Arguments**: + +- `PROJECT_PATH` _str_ - Path to the project directory. +- `file` _str_ - Name of the file. +- `data` _str_ - Data to load. + + +**Returns**: + +- `np.ndarray` - Loaded data. + +#### kmeans\_clustering + +```python +def kmeans_clustering(context: np.ndarray, n_clusters: int) -> np.ndarray +``` + +Perform k-Means clustering. + +**Arguments**: + +- `context` _np.ndarray_ - Input data for clustering. +- `n_clusters` _int_ - Number of clusters. + + +**Returns**: + +- `np.ndarray` - Cluster labels. + +#### gmm\_clustering + +```python +def gmm_clustering(context: np.ndarray, n_components: int) -> np.ndarray +``` + +Perform Gaussian Mixture Model (GMM) clustering. + +**Arguments**: + +- `context` _np.ndarray_ - Input data for clustering. +- `n_components` _int_ - Number of components. + + +**Returns**: + +- `np.ndarray` - Cluster labels. + +#### behavior\_segmentation + +```python +def behavior_segmentation(config: str, + model_name: str = None, + cluster_method: str = 'kmeans', + n_cluster: List[int] = [30]) -> None +``` + +Perform behavior segmentation. + +**Arguments**: + +- `config` _str_ - Path to the configuration file. +- `model_name` _str, optional_ - Name of the model. Defaults to None. +- `cluster_method` _str, optional_ - Clustering method. Defaults to 'kmeans'. +- `n_cluster` _List[int], optional_ - List of number of clusters. Defaults to [30]. + + +**Returns**: + +- `None` - Save data to the results directory. + +#### temporal\_quant + +```python +def temporal_quant(cfg: dict, model_name: str, files: List[str], + use_gpu: bool) -> Tuple +``` + +Quantify the temporal latent space. + +**Arguments**: + +- `cfg` _dict_ - Configuration dictionary. +- `model_name` _str_ - Name of the model. +- `files` _List[str]_ - List of file names. +- `use_gpu` _bool_ - Whether to use GPU. + + +**Returns**: + +- `Tuple` - Tuple of latent space array and logger. + +#### cluster\_latent\_space + +```python +def cluster_latent_space(cfg: dict, files: List[str], z_data: np.ndarray, + z_logger: List[int], cluster_method: str, + n_cluster: List[int], model_name: str) -> None +``` + +Cluster the latent space. + +**Arguments**: + +- `cfg` _dict_ - Configuration dictionary. +- `files` _List[str]_ - List of file names. +- `z_data` _np.ndarray_ - Latent space data. +- `z_logger` _List[int]_ - Logger for the latent space. +- `cluster_method` _str_ - Clustering method. +- `n_cluster` _List[int]_ - List of number of clusters. +- `model_name` _str_ - Name of the model. + + +**Returns**: + + None -> Save data to the results directory. + diff --git a/docs/vame-docs-app/docs/reference/vame/analysis/tree_hierarchy.md b/docs/vame-docs-app/docs/reference/vame/analysis/tree_hierarchy.md new file mode 100644 index 00000000..352fd509 --- /dev/null +++ b/docs/vame-docs-app/docs/reference/vame/analysis/tree_hierarchy.md @@ -0,0 +1,168 @@ +--- +sidebar_label: tree_hierarchy +title: vame.analysis.tree_hierarchy +--- + +Variational Animal Motion Embedding 1.0-alpha Toolbox +© K. Luxem & P. Bauer, Department of Cellular Neuroscience +Leibniz Institute for Neurobiology, Magdeburg, Germany + +https://github.com/LINCellularNeuroscience/VAME +Licensed under GNU General Public License v3.0 + +#### hierarchy\_pos + +```python +def hierarchy_pos(G: nx.Graph, + root: str = None, + width: float = 0.5, + vert_gap: float = 0.2, + vert_loc: float = 0, + xcenter: float = 0.5) -> Dict[str, Tuple[float, float]] +``` + +Positions nodes in a tree-like layout. +Ref: From Joel's answer at https://stackoverflow.com/a/29597209/2966723. + +**Arguments**: + +- `G` _nx.Graph_ - The input graph. Must be a tree. +- `root` _str, optional_ - The root node of the tree. If None, the function selects a root node based on graph type. +- `width` _float, optional_ - The horizontal space assigned to each level. +- `vert_gap` _float, optional_ - The vertical gap between levels. +- `vert_loc` _float, optional_ - The vertical location of the root node. +- `xcenter` _float, optional_ - The horizontal location of the root node. + + +**Returns**: + + Dict[str, Tuple[float, float]]: A dictionary mapping node names to their positions (x, y). + +#### merge\_func + +```python +def merge_func(transition_matrix: np.ndarray, n_cluster: int, + motif_norm: np.ndarray, + merge_sel: int) -> Tuple[np.ndarray, np.ndarray] +``` + +Merge nodes in a graph based on a selection criterion. + +**Arguments**: + +- `transition_matrix` _np.ndarray_ - The transition matrix of the graph. +- `n_cluster` _int_ - The number of clusters. +- `motif_norm` _np.ndarray_ - The normalized motif matrix. +- `merge_sel` _int_ - The merge selection criterion. + - 0: Merge nodes with highest transition probability. + - 1: Merge nodes with lowest cost. + + +**Raises**: + +- `ValueError` - If an invalid merge selection criterion is provided. + + +**Returns**: + + Tuple[np.ndarray, np.ndarray]: A tuple containing the merged nodes. + +#### graph\_to\_tree + +```python +def graph_to_tree(motif_usage: np.ndarray, + transition_matrix: np.ndarray, + n_cluster: int, + merge_sel: int = 1) -> nx.Graph +``` + +Convert a graph to a tree. + +**Arguments**: + +- `motif_usage` _np.ndarray_ - The motif usage matrix. +- `transition_matrix` _np.ndarray_ - The transition matrix of the graph. +- `n_cluster` _int_ - The number of clusters. +- `merge_sel` _int, optional_ - The merge selection criterion. Defaults to 1. + - 0: Merge nodes with highest transition probability. + - 1: Merge nodes with lowest cost. + + +**Returns**: + +- `nx.Graph` - The tree. + +#### draw\_tree + +```python +def draw_tree(T: nx.Graph) -> None +``` + +Draw a tree. + +**Arguments**: + +- `T` _nx.Graph_ - The tree to be drawn. + + +**Returns**: + + None + +#### traverse\_tree + +```python +def traverse_tree(T: nx.Graph, root_node: str = None) -> str +``` + +Traverse a tree and return the traversal sequence. + +**Arguments**: + +- `T` _nx.Graph_ - The tree to be traversed. +- `root_node` _str, optional_ - The root node of the tree. If None, traversal starts from the root. + + +**Returns**: + +- `str` - The traversal sequence. + +#### traverse\_tree + +```python +def traverse_tree(T: nx.Graph, root_node: str = None) -> str +``` + +Traverse a tree and return the traversal sequence. + +**Arguments**: + +- `T` _nx.Graph_ - The tree to be traversed. +- `root_node` _str, optional_ - The root node of the tree. If None, traversal starts from the root. + + +**Returns**: + +- `str` - The traversal sequence. + +#### traverse\_tree\_cutline + +```python +def traverse_tree_cutline(T: nx.Graph, + root_node: str = None, + cutline: int = 2) -> List[List[str]] +``` + +Traverse a tree with a cutline and return the community bags. + +**Arguments**: + +- `T` _nx.Graph_ - The tree to be traversed. +- `root_node` _str, optional_ - The root node of the tree. If None, traversal starts from the root. +- `cutline` _int, optional_ - The cutline level. + + +**Returns**: + +- `List[List[str]]` - List of community bags. + diff --git a/docs/vame-docs-app/docs/reference/vame/analysis/umap_visualization.md b/docs/vame-docs-app/docs/reference/vame/analysis/umap_visualization.md new file mode 100644 index 00000000..d1e2edd4 --- /dev/null +++ b/docs/vame-docs-app/docs/reference/vame/analysis/umap_visualization.md @@ -0,0 +1,93 @@ +--- +sidebar_label: umap_visualization +title: vame.analysis.umap_visualization +--- + +Variational Animal Motion Embedding 1.0-alpha Toolbox +© K. Luxem & P. Bauer, Department of Cellular Neuroscience +Leibniz Institute for Neurobiology, Magdeburg, Germany + +https://github.com/LINCellularNeuroscience/VAME +Licensed under GNU General Public License v3.0 + +#### umap\_vis + +```python +def umap_vis(file: str, embed: np.ndarray, num_points: int) -> None +``` + +Visualize UMAP embedding without labels. + +**Arguments**: + +- `file` _str_ - Name of the file (deprecated). +- `embed` _np.ndarray_ - UMAP embedding. +- `num_points` _int_ - Number of data points to visualize. + + +**Returns**: + + None - Plot Visualization of UMAP embedding. + +#### umap\_label\_vis + +```python +def umap_label_vis(file: str, embed: np.ndarray, label: np.ndarray, + n_cluster: int, num_points: int) -> None +``` + +Visualize UMAP embedding with motif labels. + +**Arguments**: + +- `file` _str_ - Name of the file (deprecated). +- `embed` _np.ndarray_ - UMAP embedding. +- `label` _np.ndarray_ - Motif labels. +- `n_cluster` _int_ - Number of clusters. +- `num_points` _int_ - Number of data points to visualize. + + +**Returns**: + + None - Plot Visualization of UMAP embedding with motif labels. + +#### umap\_vis\_comm + +```python +def umap_vis_comm(file: str, embed: np.ndarray, community_label: np.ndarray, + num_points: int) -> None +``` + +Visualize UMAP embedding with community labels. + +**Arguments**: + +- `file` _str_ - Name of the file (deprecated). +- `embed` _np.ndarray_ - UMAP embedding. +- `community_label` _np.ndarray_ - Community labels. +- `num_points` _int_ - Number of data points to visualize. + + +**Returns**: + + None - Plot Visualization of UMAP embedding with community labels. + +#### visualization + +```python +def visualization(config: Union[str, Path], + label: Optional[str] = None) -> None +``` + +Visualize UMAP embeddings based on configuration settings. + +**Arguments**: + +- `config` _Union[str, Path]_ - Path to the configuration file. +- `label` _str, optional_ - Type of labels to visualize. Default is None. + + +**Returns**: + + None - Plot Visualization of UMAP embeddings. + diff --git a/docs/vame-docs-app/docs/reference/vame/analysis/videowriter.md b/docs/vame-docs-app/docs/reference/vame/analysis/videowriter.md new file mode 100644 index 00000000..f4c60f4e --- /dev/null +++ b/docs/vame-docs-app/docs/reference/vame/analysis/videowriter.md @@ -0,0 +1,72 @@ +--- +sidebar_label: videowriter +title: vame.analysis.videowriter +--- + +Variational Animal Motion Embedding 1.0-alpha Toolbox +© K. Luxem & P. Bauer, Department of Cellular Neuroscience +Leibniz Institute for Neurobiology, Magdeburg, Germany + +https://github.com/LINCellularNeuroscience/VAME +Licensed under GNU General Public License v3.0 + +#### get\_cluster\_vid + +```python +def get_cluster_vid(cfg: dict, path_to_file: str, file: str, n_cluster: int, + videoType: str, flag: str) -> None +``` + +Generate cluster videos. + +**Arguments**: + +- `cfg` _dict_ - Configuration parameters. +- `path_to_file` _str_ - Path to the file. +- `file` _str_ - Name of the file. +- `n_cluster` _int_ - Number of clusters. +- `videoType` _str_ - Type of video. +- `flag` _str_ - Flag indicating the type of video (motif or community). + + +**Returns**: + + None - Generate cluster videos and save them to fs on project folder. + +#### motif\_videos + +```python +def motif_videos(config: Union[str, Path], videoType: str = '.mp4') -> None +``` + +Generate motif videos. + +**Arguments**: + +- `config` _Union[str, Path]_ - Path to the configuration file. +- `videoType` _str, optional_ - Type of video. Default is '.mp4'. + + +**Returns**: + + None - Generate motif videos and save them to filesystem on project cluster_videos folder. + +#### community\_videos + +```python +def community_videos(config: Union[str, Path], + videoType: str = '.mp4') -> None +``` + +Generate community videos. + +**Arguments**: + +- `config` _Union[str, Path]_ - Path to the configuration file. +- `videoType` _str, optional_ - Type of video. Default is '.mp4'. + + +**Returns**: + + None - Generate community videos and save them to filesystem on project community_videos folder. + diff --git a/docs/vame-docs-app/docs/reference/vame/initialize_project/new.md b/docs/vame-docs-app/docs/reference/vame/initialize_project/new.md new file mode 100644 index 00000000..ad1dd0a3 --- /dev/null +++ b/docs/vame-docs-app/docs/reference/vame/initialize_project/new.md @@ -0,0 +1,45 @@ +--- +sidebar_label: new +title: vame.initialize_project.new +--- + +Variational Animal Motion Embedding 1.0-alpha Toolbox +© K. Luxem & P. Bauer, Department of Cellular Neuroscience +Leibniz Institute for Neurobiology, Magdeburg, Germany + +https://github.com/LINCellularNeuroscience/VAME +Licensed under GNU General Public License v3.0 + +The following code is adapted from: + +DeepLabCut2.0 Toolbox (deeplabcut.org) +© A. & M. Mathis Labs +https://github.com/AlexEMG/DeepLabCut +Please see AUTHORS for contributors. +https://github.com/AlexEMG/DeepLabCut/blob/master/AUTHORS +Licensed under GNU Lesser General Public License v3.0 + +#### init\_new\_project + +```python +def init_new_project(project: str, + videos: List[str], + poses_estimations: List[str], + working_directory: str = None, + videotype: str = '.mp4') -> str +``` + +Creates a new VAME project with the given parameters. + +**Arguments**: + +- `project` _str_ - Project name. +- `videos` _List[str]_ - List of videos paths to be used in the project. E.g. ['./sample_data/Session001.mp4'] +- `poses_estimations` _List[str]_ - List of pose estimation files paths to be used in the project. E.g. ['./sample_data/pose estimation/Session001.csv'] working_directory (str, optional): _description_. Defaults to None. +- `videotype` _str, optional_ - Video extension (.mp4 or .avi). Defaults to '.mp4'. + + +**Returns**: + +- `projconfigfile` _str_ - Path to the new vame project config file. + diff --git a/docs/vame-docs-app/docs/reference/vame/model/create_training.md b/docs/vame-docs-app/docs/reference/vame/model/create_training.md new file mode 100644 index 00000000..b29c0566 --- /dev/null +++ b/docs/vame-docs-app/docs/reference/vame/model/create_training.md @@ -0,0 +1,139 @@ +--- +sidebar_label: create_training +title: vame.model.create_training +--- + +Variational Animal Motion Embedding 1.0-alpha Toolbox +© K. Luxem & P. Bauer, Department of Cellular Neuroscience +Leibniz Institute for Neurobiology, Magdeburg, Germany + +https://github.com/LINCellularNeuroscience/VAME +Licensed under GNU General Public License v3.0 + +#### nan\_helper + +```python +def nan_helper(y: np.ndarray) -> Tuple +``` + +Identifies indices of NaN values in an array and provides a function to convert them to non-NaN indices. + +**Arguments**: + +- `y` _np.ndarray_ - Input array containing NaN values. + + +**Returns**: + + Tuple[np.ndarray, Union[np.ndarray, None]]: A tuple containing two elements: + - An array of boolean values indicating the positions of NaN values. + - A lambda function to convert NaN indices to non-NaN indices. + +#### interpol + +```python +def interpol(arr: np.ndarray) -> np.ndarray +``` + +Interpolates all NaN values in the given array. + +**Arguments**: + +- `arr` _np.ndarray_ - Input array containing NaN values. + + +**Returns**: + +- `np.ndarray` - Array with NaN values replaced by interpolated values. + +#### plot\_check\_parameter + +```python +def plot_check_parameter(cfg: dict, iqr_val: float, num_frames: int, + X_true: List[np.ndarray], X_med: np.ndarray, + anchor_1: int, anchor_2: int) -> None +``` + +Plot the check parameter - z-scored data and the filtered data. + +**Arguments**: + +- `cfg` _dict_ - Configuration parameters. +- `iqr_val` _float_ - IQR value. +- `num_frames` _int_ - Number of frames. +- `X_true` _List[np.ndarray]_ - List of true data. +- `X_med` _np.ndarray_ - Filtered data. +- `anchor_1` _int_ - Index of the first anchor point (deprecated). +- `anchor_2` _int_ - Index of the second anchor point (deprecated). + + +**Returns**: + + None - Plot the z-scored data and the filtered data. + +#### traindata\_aligned + +```python +def traindata_aligned(cfg: dict, files: List[str], testfraction: float, + num_features: int, savgol_filter: bool, + check_parameter: bool) -> None +``` + +Create training dataset for aligned data. + +**Arguments**: + +- `cfg` _dict_ - Configuration parameters. +- `files` _List[str]_ - List of files. +- `testfraction` _float_ - Fraction of data to use as test data. +- `num_features` _int_ - Number of features (deprecated). +- `savgol_filter` _bool_ - Flag indicating whether to apply Savitzky-Golay filter. +- `check_parameter` _bool_ - If True, the function will plot the z-scored data and the filtered data. + + +**Returns**: + + None - Save numpy arrays with the test/train info to the project folder. + +#### traindata\_fixed + +```python +def traindata_fixed(cfg: dict, files: List[str], testfraction: float, + num_features: int, savgol_filter: bool, + check_parameter: bool, + pose_ref_index: Optional[List[int]]) -> None +``` + +Create training dataset for fixed data. + +**Arguments**: + +- `cfg` _dict_ - Configuration parameters. +- `files` _List[str]_ - List of files. +- `testfraction` _float_ - Fraction of data to use as test data. +- `num_features` _int_ - Number of features. +- `savgol_filter` _bool_ - Flag indicating whether to apply Savitzky-Golay filter. +- `check_parameter` _bool_ - If True, the function will plot the z-scored data and the filtered data. +- `pose_ref_index` _Optional[List[int]]_ - List of reference coordinate indices for alignment. + + +**Returns**: + + None - Save numpy arrays with the test/train info to the project folder. + +#### create\_trainset + +```python +def create_trainset(config: str, + pose_ref_index: Optional[List] = None, + check_parameter: bool = False) -> None +``` + +Creates a training dataset for the VAME model. + +**Arguments**: + +- `config` _str_ - Path to the config file. +- `pose_ref_index` _Optional[List], optional_ - List of reference coordinate indices for alignment. Defaults to None. +- `check_parameter` _bool, optional_ - If True, the function will plot the z-scored data and the filtered data. Defaults to False. + diff --git a/docs/vame-docs-app/docs/reference/vame/model/dataloader.md b/docs/vame-docs-app/docs/reference/vame/model/dataloader.md new file mode 100644 index 00000000..84baacb1 --- /dev/null +++ b/docs/vame-docs-app/docs/reference/vame/model/dataloader.md @@ -0,0 +1,68 @@ +--- +sidebar_label: dataloader +title: vame.model.dataloader +--- + +Variational Animal Motion Embedding 0.1 Toolbox +© K. Luxem & P. Bauer, Department of Cellular Neuroscience +Leibniz Institute for Neurobiology, Magdeburg, Germany + +https://github.com/LINCellularNeuroscience/VAME +Licensed under GNU General Public License v3.0 + +## SEQUENCE\_DATASET Objects + +```python +class SEQUENCE_DATASET(Dataset) +``` + +#### \_\_init\_\_ + +```python +def __init__(path_to_file: str, data: str, train: bool, + temporal_window: int) -> None +``` + +Initialize the Sequence Dataset. + +**Arguments**: + +- `path_to_file` _str_ - Path to the dataset files. +- `data` _str_ - Name of the data file. +- `train` _bool_ - Flag indicating whether it's training data. +- `temporal_window` _int_ - Size of the temporal window. + + +**Returns**: + + None + +#### \_\_len\_\_ + +```python +def __len__() -> int +``` + +Return the number of data points. + +**Returns**: + +- `int` - Number of data points. + +#### \_\_getitem\_\_ + +```python +def __getitem__(index: int) -> torch.Tensor +``` + +Get a normalized sequence at the specified index. + +**Arguments**: + +- `index` _int_ - Index of the item. + + +**Returns**: + +- `torch.Tensor` - Normalized sequence data at the specified index. + diff --git a/docs/vame-docs-app/docs/reference/vame/model/evaluate.md b/docs/vame-docs-app/docs/reference/vame/model/evaluate.md new file mode 100644 index 00000000..5195360c --- /dev/null +++ b/docs/vame-docs-app/docs/reference/vame/model/evaluate.md @@ -0,0 +1,87 @@ +--- +sidebar_label: evaluate +title: vame.model.evaluate +--- + +Variational Animal Motion Embedding 0.1 Toolbox +© K. Luxem & P. Bauer, Department of Cellular Neuroscience +Leibniz Institute for Neurobiology, Magdeburg, Germany + +https://github.com/LINCellularNeuroscience/VAME +Licensed under GNU General Public License v3.0 + +#### plot\_reconstruction + +```python +def plot_reconstruction(filepath: str, + test_loader: Data.DataLoader, + seq_len_half: int, + model: RNN_VAE, + model_name: str, + FUTURE_DECODER: bool, + FUTURE_STEPS: int, + suffix: Optional[str] = None) -> None +``` + +Plot the reconstruction and future prediction of the input sequence. + +**Arguments**: + +- `filepath` _str_ - Path to save the plot. +- `test_loader` _Data.DataLoader_ - DataLoader for the test dataset. +- `seq_len_half` _int_ - Half of the temporal window size. +- `model` _RNN_VAE_ - Trained VAE model. +- `model_name` _str_ - Name of the model. +- `FUTURE_DECODER` _bool_ - Flag indicating whether the model has a future prediction decoder. +- `FUTURE_STEPS` _int_ - Number of future steps to predict. +- `suffix` _Optional[str], optional_ - Suffix for the saved plot filename. Defaults to None. + +#### plot\_loss + +```python +def plot_loss(cfg: dict, filepath: str, model_name: str) -> None +``` + +Plot the losses of the trained model. + +**Arguments**: + +- `cfg` _dict_ - Configuration dictionary. +- `filepath` _str_ - Path to save the plot. +- `model_name` _str_ - Name of the model. + +#### eval\_temporal + +```python +def eval_temporal(cfg: dict, + use_gpu: bool, + model_name: str, + fixed: bool, + snapshot: Optional[str] = None, + suffix: Optional[str] = None) -> None +``` + +Evaluate the temporal aspects of the trained model. + +**Arguments**: + +- `cfg` _dict_ - Configuration dictionary. +- `use_gpu` _bool_ - Flag indicating whether to use GPU for evaluation. +- `model_name` _str_ - Name of the model. +- `fixed` _bool_ - Flag indicating whether the data is fixed or not. +- `snapshot` _Optional[str], optional_ - Path to the model snapshot. Defaults to None. +- `suffix` _Optional[str], optional_ - Suffix for the saved plot filename. Defaults to None. + +#### evaluate\_model + +```python +def evaluate_model(config: str, use_snapshots: bool = False) -> None +``` + +Evaluate the trained model. + +**Arguments**: + +- `config` _str_ - Path to config file. +- `use_snapshots` _bool, optional_ - Whether to plot for all snapshots or only the best model. Defaults to False. + diff --git a/docs/vame-docs-app/docs/reference/vame/model/rnn_model.md b/docs/vame-docs-app/docs/reference/vame/model/rnn_model.md new file mode 100644 index 00000000..26ae6b46 --- /dev/null +++ b/docs/vame-docs-app/docs/reference/vame/model/rnn_model.md @@ -0,0 +1,475 @@ +--- +sidebar_label: rnn_model +title: vame.model.rnn_model +--- + +Variational Animal Motion Embedding 0.1 Toolbox +© K. Luxem & P. Bauer, Department of Cellular Neuroscience +Leibniz Institute for Neurobiology, Magdeburg, Germany + +https://github.com/LINCellularNeuroscience/VAME +Licensed under GNU General Public License v3.0 + +The Model is partially adapted from the Timeseries Clustering repository developed by Tejas Lodaya: +https://github.com/tejaslodaya/timeseries-clustering-vae/blob/master/vrae/vrae.py + +## Encoder Objects + +```python +class Encoder(nn.Module) +``` + +Encoder module of the Variational Autoencoder. + +#### \_\_init\_\_ + +```python +def __init__(NUM_FEATURES: int, hidden_size_layer_1: int, + hidden_size_layer_2: int, dropout_encoder: float) +``` + +Initialize the Encoder module. + +**Arguments**: + +- `NUM_FEATURES` _int_ - Number of input features. +- `hidden_size_layer_1` _int_ - Size of the first hidden layer. +- `hidden_size_layer_2` _int_ - Size of the second hidden layer. +- `dropout_encoder` _float_ - Dropout rate for regularization. + +#### forward + +```python +def forward(inputs: torch.Tensor) -> torch.Tensor +``` + +Forward pass of the Encoder module. + +**Arguments**: + +- `inputs` _torch.Tensor_ - Input tensor of shape (batch_size, sequence_length, num_features). + + +**Returns**: + +- `torch.Tensor` - Encoded representation tensor of shape (batch_size, hidden_size_layer_1 * 4). + +## Lambda Objects + +```python +class Lambda(nn.Module) +``` + +Lambda module for computing the latent space parameters. + +#### \_\_init\_\_ + +```python +def __init__(ZDIMS: int, hidden_size_layer_1: int, hidden_size_layer_2: int, + softplus: bool) +``` + +Initialize the Lambda module. + +**Arguments**: + +- `ZDIMS` _int_ - Size of the latent space. +- `hidden_size_layer_1` _int_ - Size of the first hidden layer. +- `hidden_size_layer_2` _int, deprecated_ - Size of the second hidden layer. +- `softplus` _bool_ - Whether to use softplus activation for logvar. + +#### forward + +```python +def forward( + hidden: torch.Tensor +) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor] +``` + +Forward pass of the Lambda module. + +**Arguments**: + +- `hidden` _torch.Tensor_ - Hidden representation tensor of shape (batch_size, hidden_size_layer_1 * 4). + + +**Returns**: + + tuple[torch.Tensor, torch.Tensor, torch.Tensor]: Latent space tensor, mean tensor, logvar tensor. + +## Decoder Objects + +```python +class Decoder(nn.Module) +``` + +Decoder module of the Variational Autoencoder. + +#### \_\_init\_\_ + +```python +def __init__(TEMPORAL_WINDOW: int, ZDIMS: int, NUM_FEATURES: int, + hidden_size_rec: int, dropout_rec: float) +``` + +Initialize the Decoder module. + +**Arguments**: + +- `TEMPORAL_WINDOW` _int_ - Size of the temporal window. +- `ZDIMS` _int_ - Size of the latent space. +- `NUM_FEATURES` _int_ - Number of input features. +- `hidden_size_rec` _int_ - Size of the recurrent hidden layer. +- `dropout_rec` _float_ - Dropout rate for regularization. + +#### forward + +```python +def forward(inputs: torch.Tensor, z: torch.Tensor) -> torch.Tensor +``` + +Forward pass of the Decoder module. + +**Arguments**: + +- `inputs` _torch.Tensor_ - Input tensor of shape (batch_size, seq_len, ZDIMS). +- `z` _torch.Tensor_ - Latent space tensor of shape (batch_size, ZDIMS). + + +**Returns**: + +- `torch.Tensor` - Decoded output tensor of shape (batch_size, seq_len, NUM_FEATURES). + +## Decoder\_Future Objects + +```python +class Decoder_Future(nn.Module) +``` + +Decoder module for predicting future sequences. + +#### \_\_init\_\_ + +```python +def __init__(TEMPORAL_WINDOW: int, ZDIMS: int, NUM_FEATURES: int, + FUTURE_STEPS: int, hidden_size_pred: int, dropout_pred: float) +``` + +Initialize the Decoder_Future module. + +**Arguments**: + +- `TEMPORAL_WINDOW` _int_ - Size of the temporal window. +- `ZDIMS` _int_ - Size of the latent space. +- `NUM_FEATURES` _int_ - Number of input features. +- `FUTURE_STEPS` _int_ - Number of future steps to predict. +- `hidden_size_pred` _int_ - Size of the prediction hidden layer. +- `dropout_pred` _float_ - Dropout rate for regularization. + +#### forward + +```python +def forward(inputs: torch.Tensor, z: torch.Tensor) -> torch.Tensor +``` + +Forward pass of the Decoder_Future module. + +**Arguments**: + +- `inputs` _torch.Tensor_ - Input tensor of shape (batch_size, seq_len, ZDIMS). +- `z` _torch.Tensor_ - Latent space tensor of shape (batch_size, ZDIMS). + + +**Returns**: + +- `torch.Tensor` - Predicted future tensor of shape (batch_size, FUTURE_STEPS, NUM_FEATURES). + +## RNN\_VAE Objects + +```python +class RNN_VAE(nn.Module) +``` + +Variational Autoencoder module. + +#### \_\_init\_\_ + +```python +def __init__(TEMPORAL_WINDOW: int, ZDIMS: int, NUM_FEATURES: int, + FUTURE_DECODER: bool, FUTURE_STEPS: int, hidden_size_layer_1: int, + hidden_size_layer_2: int, hidden_size_rec: int, + hidden_size_pred: int, dropout_encoder: float, dropout_rec: float, + dropout_pred: float, softplus: bool) +``` + +Initialize the VAE module. + +**Arguments**: + +- `TEMPORAL_WINDOW` _int_ - Size of the temporal window. +- `ZDIMS` _int_ - Size of the latent space. +- `NUM_FEATURES` _int_ - Number of input features. +- `FUTURE_DECODER` _bool_ - Whether to include a future decoder. +- `FUTURE_STEPS` _int_ - Number of future steps to predict. +- `hidden_size_layer_1` _int_ - Size of the first hidden layer. +- `hidden_size_layer_2` _int_ - Size of the second hidden layer. +- `hidden_size_rec` _int_ - Size of the recurrent hidden layer. +- `hidden_size_pred` _int_ - Size of the prediction hidden layer. +- `dropout_encoder` _float_ - Dropout rate for encoder. + +#### forward + +```python +def forward(seq: torch.Tensor) -> tuple +``` + +Forward pass of the VAE. + +**Arguments**: + +- `seq` _torch.Tensor_ - Input sequence tensor of shape (batch_size, seq_len, NUM_FEATURES). + + +**Returns**: + + Tuple containing: + - If FUTURE_DECODER is True: + - prediction (torch.Tensor): Reconstructed input sequence tensor. + - future (torch.Tensor): Predicted future sequence tensor. + - z (torch.Tensor): Latent representation tensor. + - mu (torch.Tensor): Mean of the latent distribution tensor. + - logvar (torch.Tensor): Log variance of the latent distribution tensor. + - If FUTURE_DECODER is False: + - prediction (torch.Tensor): Reconstructed input sequence tensor. + - z (torch.Tensor): Latent representation tensor. + - mu (torch.Tensor): Mean of the latent distribution tensor. + - logvar (torch.Tensor): Log variance of the latent distribution tensor. + +## Encoder\_LEGACY Objects + +```python +class Encoder_LEGACY(nn.Module) +``` + +LEGACY Encoder module of the Variational Autoencoder. + +#### \_\_init\_\_ + +```python +def __init__(NUM_FEATURES: int, hidden_size_layer_1: int, + hidden_size_layer_2: int, dropout_encoder: float) +``` + +(LEGACY) Initialize the Encoder_LEGACY module. + +**Arguments**: + +- `NUM_FEATURES` _int_ - Number of input features. +- `hidden_size_layer_1` _int_ - Size of the first hidden layer. +- `hidden_size_layer_2` _int_ - Size of the second hidden layer. +- `dropout_encoder` _float_ - Dropout rate for the encoder. + +#### forward + +```python +def forward(inputs: torch.Tensor) -> torch.Tensor +``` + +(LEGACY) Forward pass of the Encoder_LEGACY module. + +**Arguments**: + +- `inputs` _torch.Tensor_ - Input tensor of shape (batch_size, seq_len, NUM_FEATURES). + + +**Returns**: + +- `torch.Tensor` - Encoded tensor. + +## Lambda\_LEGACY Objects + +```python +class Lambda_LEGACY(nn.Module) +``` + +LEGACY Lambda module for computing the latent space parameters. + +#### \_\_init\_\_ + +```python +def __init__(ZDIMS: int, hidden_size_layer_1: int, hidden_size_layer_2: int) +``` + +(LEGACY) Initialize the Lambda_LEGACY module. + +**Arguments**: + +- `ZDIMS` _int_ - Size of the latent space. +- `hidden_size_layer_1` _int_ - Size of the first hidden layer. +- `hidden_size_layer_2` _int_ - Size of the second hidden layer. + +#### forward + +```python +def forward( + cell_output: torch.Tensor +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor] +``` + +(LEGACY) Forward pass of the Lambda_LEGACY module. + +**Arguments**: + +- `cell_output` _torch.Tensor_ - Output tensor of the encoder. + + +**Returns**: + + Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: Tuple containing: + - torch.Tensor: Sampled latent tensor. + - torch.Tensor: Mean of the latent distribution. + - torch.Tensor: Log variance of the latent distribution. + +## Decoder\_LEGACY Objects + +```python +class Decoder_LEGACY(nn.Module) +``` + +LEGACY Decoder module of the Variational Autoencoder. + +#### \_\_init\_\_ + +```python +def __init__(TEMPORAL_WINDOW: int, ZDIMS: int, NUM_FEATURES: int, + hidden_size_rec: int, dropout_rec: float) +``` + +(LEGACY) Initialize the Decoder_LEGACY module. + +**Arguments**: + +- `TEMPORAL_WINDOW` _int_ - Size of the temporal window. +- `ZDIMS` _int_ - Size of the latent space. +- `NUM_FEATURES` _int_ - Number of input features. +- `hidden_size_rec` _int_ - Size of the recurrent hidden layer. +- `dropout_rec` _float_ - Dropout rate for the decoder. + +#### forward + +```python +def forward(inputs: torch.Tensor) -> torch.Tensor +``` + +(LEGACY) Forward pass of the Decoder_LEGACY module. + +**Arguments**: + +- `inputs` _torch.Tensor_ - Input tensor. + + +**Returns**: + +- `torch.Tensor` - Reconstructed tensor. + +## Decoder\_Future\_LEGACY Objects + +```python +class Decoder_Future_LEGACY(nn.Module) +``` + +LEGACY Decoder module for predicting future sequences. + +#### \_\_init\_\_ + +```python +def __init__(TEMPORAL_WINDOW: int, ZDIMS: int, NUM_FEATURES: int, + FUTURE_STEPS: int, hidden_size_pred: int, dropout_pred: float) +``` + +(LEGACY) Initialize the Decoder_Future_LEGACY module. + +**Arguments**: + +- `TEMPORAL_WINDOW` _int_ - Size of the temporal window. +- `ZDIMS` _int_ - Size of the latent space. +- `NUM_FEATURES` _int_ - Number of input features. +- `FUTURE_STEPS` _int_ - Number of future steps to predict. +- `hidden_size_pred` _int_ - Size of the prediction hidden layer. +- `dropout_pred` _float_ - Dropout rate for the prediction. + +#### forward + +```python +def forward(inputs: torch.Tensor) -> torch.Tensor +``` + +(LEGACY) Forward pass of the Decoder_Future_LEGACY module. + +**Arguments**: + +- `inputs` _torch.Tensor_ - Input tensor. + + +**Returns**: + +- `torch.Tensor` - Predicted future tensor. + +## RNN\_VAE\_LEGACY Objects + +```python +class RNN_VAE_LEGACY(nn.Module) +``` + +LEGACY Variational Autoencoder module. + +#### \_\_init\_\_ + +```python +def __init__(TEMPORAL_WINDOW: int, ZDIMS: int, NUM_FEATURES: int, + FUTURE_DECODER: bool, FUTURE_STEPS: int, hidden_size_layer_1: int, + hidden_size_layer_2: int, hidden_size_rec: int, + hidden_size_pred: int, dropout_encoder: float, dropout_rec: float, + dropout_pred: float, softplus: bool) +``` + +(LEGACY) Initialize the RNN_VAE_LEGACY module. + +**Arguments**: + +- `TEMPORAL_WINDOW` _int_ - Size of the temporal window. +- `ZDIMS` _int_ - Size of the latent space. +- `NUM_FEATURES` _int_ - Number of input features. +- `FUTURE_DECODER` _bool_ - Whether to include a future decoder. +- `FUTURE_STEPS` _int_ - Number of future steps to predict. +- `hidden_size_layer_1` _int_ - Size of the first hidden layer. +- `hidden_size_layer_2` _int_ - Size of the second hidden layer. +- `hidden_size_rec` _int_ - Size of the recurrent hidden layer. +- `hidden_size_pred` _int_ - Size of the prediction hidden layer. +- `dropout_encoder` _float_ - Dropout rate for the encoder. +- `ZDIMS`0 _float_ - Dropout rate for the decoder. +- `ZDIMS`1 _float_ - Dropout rate for the prediction. +- `ZDIMS`2 _bool, deprecated_ - Whether to use softplus activation. + +#### forward + +```python +def forward(seq: torch.Tensor) -> Tuple +``` + +Forward pass of the RNN_VAE_LEGACY module. + +**Arguments**: + +- `seq` _torch.Tensor_ - Input sequence tensor of shape (batch_size, seq_len, NUM_FEATURES). + + +**Returns**: + +- `Tuple` - Tuple containing: + - torch.Tensor: Predicted tensor. + - torch.Tensor: Future prediction tensor if FUTURE_DECODER is True, else nothing. + - torch.Tensor: Latent tensor. + - torch.Tensor: Mean of the latent distribution. + - torch.Tensor: Log variance of the latent distribution. + diff --git a/docs/vame-docs-app/docs/reference/vame/model/rnn_vae.md b/docs/vame-docs-app/docs/reference/vame/model/rnn_vae.md new file mode 100644 index 00000000..58351449 --- /dev/null +++ b/docs/vame-docs-app/docs/reference/vame/model/rnn_vae.md @@ -0,0 +1,224 @@ +--- +sidebar_label: rnn_vae +title: vame.model.rnn_vae +--- + +Variational Animal Motion Embedding 0.1 Toolbox +© K. Luxem & P. Bauer, Department of Cellular Neuroscience +Leibniz Institute for Neurobiology, Magdeburg, Germany + +https://github.com/LINCellularNeuroscience/VAME +Licensed under GNU General Public License v3.0 + +#### reconstruction\_loss + +```python +def reconstruction_loss(x: torch.Tensor, x_tilde: torch.Tensor, + reduction: str) -> torch.Tensor +``` + +Compute the reconstruction loss between input and reconstructed data. + +**Arguments**: + +- `x` _torch.Tensor_ - Input data tensor. +- `x_tilde` _torch.Tensor_ - Reconstructed data tensor. +- `reduction` _str_ - Type of reduction for the loss. + + +**Returns**: + +- `torch.Tensor` - Reconstruction loss. + +#### future\_reconstruction\_loss + +```python +def future_reconstruction_loss(x: torch.Tensor, x_tilde: torch.Tensor, + reduction: str) -> torch.Tensor +``` + +Compute the future reconstruction loss between input and predicted future data. + +**Arguments**: + +- `x` _torch.Tensor_ - Input future data tensor. +- `x_tilde` _torch.Tensor_ - Reconstructed future data tensor. +- `reduction` _str_ - Type of reduction for the loss. + + +**Returns**: + +- `torch.Tensor` - Future reconstruction loss. + +#### cluster\_loss + +```python +def cluster_loss(H: torch.Tensor, kloss: int, lmbda: float, + batch_size: int) -> torch.Tensor +``` + +Compute the cluster loss. + +**Arguments**: + +- `H` _torch.Tensor_ - Latent representation tensor. +- `kloss` _int_ - Number of clusters. +- `lmbda` _float_ - Lambda value for the loss. +- `batch_size` _int_ - Size of the batch. + + +**Returns**: + +- `torch.Tensor` - Cluster loss. + +#### kullback\_leibler\_loss + +```python +def kullback_leibler_loss(mu: torch.Tensor, + logvar: torch.Tensor) -> torch.Tensor +``` + +Compute the Kullback-Leibler divergence loss. +see Appendix B from VAE paper: Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014 - https://arxiv.org/abs/1312.6114 + +Formula: 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2) + +**Arguments**: + +- `mu` _torch.Tensor_ - Mean of the latent distribution. +- `logvar` _torch.Tensor_ - Log variance of the latent distribution. + + +**Returns**: + +- `torch.Tensor` - Kullback-Leibler divergence loss. + +#### kl\_annealing + +```python +def kl_annealing(epoch: int, kl_start: int, annealtime: int, + function: str) -> float +``` + +Anneal the Kullback-Leibler loss to let the model learn first the reconstruction of the data +before the KL loss term gets introduced. + +**Arguments**: + +- `epoch` _int_ - Current epoch number. +- `kl_start` _int_ - Epoch number to start annealing the loss. +- `annealtime` _int_ - Annealing time. +- `function` _str_ - Annealing function type. + + +**Returns**: + +- `float` - Annealed weight value for the loss. + +#### gaussian + +```python +def gaussian(ins: torch.Tensor, + is_training: bool, + seq_len: int, + std_n: float = 0.8) -> torch.Tensor +``` + +Add Gaussian noise to the input data. + +**Arguments**: + +- `ins` _torch.Tensor_ - Input data tensor. +- `is_training` _bool_ - Whether it is training mode. +- `seq_len` _int_ - Length of the sequence. +- `std_n` _float_ - Standard deviation for the Gaussian noise. + + +**Returns**: + +- `torch.Tensor` - Noisy input data tensor. + +#### train + +```python +def train(train_loader: Data.DataLoader, epoch: int, model: nn.Module, + optimizer: torch.optim.Optimizer, anneal_function: str, BETA: float, + kl_start: int, annealtime: int, seq_len: int, future_decoder: bool, + future_steps: int, scheduler: torch.optim.lr_scheduler._LRScheduler, + mse_red: str, mse_pred: str, kloss: int, klmbda: float, bsize: int, + noise: bool) -> Tuple[float, float, float, float, float, float] +``` + +Train the model. + +**Arguments**: + +- `train_loader` _DataLoader_ - Training data loader. +- `epoch` _int_ - Current epoch number. +- `model` _nn.Module_ - Model to be trained. +- `optimizer` _Optimizer_ - Optimizer for training. +- `anneal_function` _str_ - Annealing function type. +- `BETA` _float_ - Beta value for the loss. +- `kl_start` _int_ - Epoch number to start annealing the loss. +- `annealtime` _int_ - Annealing time. +- `seq_len` _int_ - Length of the sequence. +- `future_decoder` _bool_ - Whether a future decoder is used. +- `epoch`0 _int_ - Number of future steps to predict. +- `epoch`1 _lr_scheduler._LRScheduler_ - Learning rate scheduler. +- `epoch`2 _str_ - Reduction type for MSE reconstruction loss. +- `epoch`3 _str_ - Reduction type for MSE prediction loss. +- `epoch`4 _int_ - Number of clusters for cluster loss. +- `epoch`5 _float_ - Lambda value for cluster loss. +- `epoch`6 _int_ - Size of the batch. +- `epoch`7 _bool_ - Whether to add Gaussian noise to the input. + + +**Returns**: + + Tuple[float, float, float, float, float, float]: Kullback-Leibler weight, train loss, K-means loss, KL loss, + MSE loss, future loss. + +#### test + +```python +def test(test_loader: Data.DataLoader, epoch: int, model: nn.Module, + optimizer: torch.optim.Optimizer, BETA: float, kl_weight: float, + seq_len: int, mse_red: str, kloss: str, klmbda: float, + future_decoder: bool, bsize: int) -> Tuple[float, float, float] +``` + +Evaluate the model on the test dataset. + +**Arguments**: + +- `test_loader` _DataLoader_ - DataLoader for the test dataset. +- `epoch` _int, deprecated_ - Current epoch number. +- `model` _nn.Module_ - The trained model. +- `optimizer` _Optimizer, deprecated_ - The optimizer used for training. +- `BETA` _float_ - Beta value for the VAE loss. +- `kl_weight` _float_ - Weighting factor for the KL divergence loss. +- `seq_len` _int_ - Length of the sequence. +- `mse_red` _str_ - Reduction method for the MSE loss. +- `kloss` _str_ - Loss function for K-means clustering. +- `klmbda` _float_ - Lambda value for K-means loss. +- `epoch`0 _bool_ - Flag indicating whether to use a future decoder. +- `epoch`1 _int_ - Batch size. + + +**Returns**: + + Tuple[float, float, float]: Tuple containing MSE loss per item, total test loss per item, + and K-means loss weighted by the kl_weight. + +#### train\_model + +```python +def train_model(config: str) -> None +``` + +Train Variational Autoencoder using the configuration file values. + +**Arguments**: + +- `config` _str_ - Path to the configuration file. + diff --git a/docs/vame-docs-app/docs/reference/vame/util/align_egocentrical.md b/docs/vame-docs-app/docs/reference/vame/util/align_egocentrical.md new file mode 100644 index 00000000..badbbea0 --- /dev/null +++ b/docs/vame-docs-app/docs/reference/vame/util/align_egocentrical.md @@ -0,0 +1,204 @@ +--- +sidebar_label: align_egocentrical +title: vame.util.align_egocentrical +--- + +Variational Animal Motion Embedding 0.1 Toolbox +© K. Luxem & J. Kürsch & P. Bauer, Department of Cellular Neuroscience +Leibniz Institute for Neurobiology, Magdeburg, Germany + +https://github.com/LINCellularNeuroscience/VAME +Licensed under GNU General Public License v3.0 + +#### crop\_and\_flip + +```python +def crop_and_flip( + rect: Tuple, src: np.ndarray, points: List[np.ndarray], + ref_index: Tuple[int, int]) -> Tuple[np.ndarray, List[np.ndarray]] +``` + +Crop and flip the image based on the given rectangle and points. + +**Arguments**: + +- `rect` _Tuple_ - Rectangle coordinates (center, size, theta). +- `src` _np.ndarray_ - Source image. +- `points` _List[np.ndarray]_ - List of points. +- `ref_index` _Tuple[int, int]_ - Reference indices for alignment. + + +**Returns**: + + Tuple[np.ndarray, List[np.ndarray]]: Cropped and flipped image, and shifted points. + +#### nan\_helper + +```python +def nan_helper(y: np.ndarray) -> Tuple[np.ndarray, np.ndarray] +``` + +Helper function to identify NaN values in an array. + +**Arguments**: + +- `y` _np.ndarray_ - Input array. + + +**Returns**: + + Tuple[np.ndarray, np.ndarray]: Boolean mask for NaN values and function to interpolate them. + +#### interpol + +```python +def interpol(arr: np.ndarray) -> np.ndarray +``` + +Interpolates NaN values in the given array. + +**Arguments**: + +- `arr` _np.ndarray_ - Input array. + + +**Returns**: + +- `np.ndarray` - Array with interpolated NaN values. + +#### background + +```python +def background(path_to_file: str, + filename: str, + video_format: str = '.mp4', + num_frames: int = 1000) -> np.ndarray +``` + +Compute the background image from a fixed camera. + +**Arguments**: + +- `path_to_file` _str_ - Path to the file directory. +- `filename` _str_ - Name of the video file without the format. +- `video_format` _str, optional_ - Format of the video file. Defaults to '.mp4'. +- `num_frames` _int, optional_ - Number of frames to use for background computation. Defaults to 1000. + + +**Returns**: + +- `np.ndarray` - Background image. + +#### align\_mouse + +```python +def align_mouse( + path_to_file: str, + filename: str, + video_format: str, + crop_size: Tuple[int, int], + pose_list: List[np.ndarray], + pose_ref_index: Tuple[int, int], + confidence: float, + pose_flip_ref: Tuple[int, int], + bg: np.ndarray, + frame_count: int, + use_video: bool = True +) -> Tuple[List[np.ndarray], List[List[np.ndarray]], np.ndarray] +``` + +Align the mouse in the video frames. + +**Arguments**: + +- `path_to_file` _str_ - Path to the file directory. +- `filename` _str_ - Name of the video file without the format. +- `video_format` _str_ - Format of the video file. +- `crop_size` _Tuple[int, int]_ - Size to crop the video frames. +- `pose_list` _List[np.ndarray]_ - List of pose coordinates. +- `pose_ref_index` _Tuple[int, int]_ - Pose reference indices. +- `confidence` _float_ - Pose confidence threshold. +- `pose_flip_ref` _Tuple[int, int]_ - Reference indices for flipping. +- `bg` _np.ndarray_ - Background image. +- `frame_count` _int_ - Number of frames to align. +- `filename`0 _bool, optional_ - bool if video should be cropped or DLC points only. Defaults to True. + + +**Returns**: + + Tuple[List[np.ndarray], List[List[np.ndarray]], np.ndarray]: List of aligned images, list of aligned DLC points, and time series data. + +#### play\_aligned\_video + +```python +def play_aligned_video(a: List[np.ndarray], n: List[List[np.ndarray]], + frame_count: int) -> None +``` + +Play the aligned video. + +**Arguments**: + +- `a` _List[np.ndarray]_ - List of aligned images. +- `n` _List[List[np.ndarray]]_ - List of aligned DLC points. +- `frame_count` _int_ - Number of frames in the video. + +#### alignment + +```python +def alignment( + path_to_file: str, + filename: str, + pose_ref_index: List[int], + video_format: str, + crop_size: Tuple[int, int], + confidence: float, + use_video: bool = False, + check_video: bool = False) -> Tuple[np.ndarray, List[np.ndarray]] +``` + +Perform alignment of egocentric data. + +**Arguments**: + +- `path_to_file` _str_ - Path to the file directory. +- `filename` _str_ - Name of the video file without the format. +- `pose_ref_index` _List[int]_ - Pose reference indices. +- `video_format` _str_ - Format of the video file. +- `crop_size` _Tuple[int, int]_ - Size to crop the video frames. +- `confidence` _float_ - Pose confidence threshold. +- `use_video` _bool, optional_ - Whether to use video for alignment. Defaults to False. +- `check_video` _bool, optional_ - Whether to check the aligned video. Defaults to False. + + +**Returns**: + + Tuple[np.ndarray, List[np.ndarray]]: Aligned time series data and list of aligned frames. + +#### egocentric\_alignment + +```python +def egocentric_alignment(config: str, + pose_ref_index: list = [5, 6], + crop_size: tuple = (300, 300), + use_video: bool = False, + video_format: str = '.mp4', + check_video: bool = False) -> None +``` + +Aligns egocentric data for VAME training + +**Arguments**: + +- `config` _str_ - Path for the project config file. +- `pose_ref_index` _list, optional_ - Pose reference index to be used to align. Defaults to [5,6]. +- `crop_size` _tuple, optional_ - Size to crop the video. Defaults to (300,300). +- `use_video` _bool, optional_ - Weather to use video to do the post alignment. Defaults to False. # TODO check what to put in this docstring +- `video_format` _str, optional_ - Video format, can be .mp4 or .avi. Defaults to '.mp4'. +- `check_video` _bool, optional_ - Weather to check the video. Defaults to False. + + +**Raises**: + +- `ValueError` - If the config.yaml indicates that the data is not egocentric. + diff --git a/docs/vame-docs-app/docs/reference/vame/util/auxiliary.md b/docs/vame-docs-app/docs/reference/vame/util/auxiliary.md new file mode 100644 index 00000000..8af473f6 --- /dev/null +++ b/docs/vame-docs-app/docs/reference/vame/util/auxiliary.md @@ -0,0 +1,76 @@ +--- +sidebar_label: auxiliary +title: vame.util.auxiliary +--- + +Variational Animal Motion Embedding 1.0-alpha Toolbox +© K. Luxem & P. Bauer, Department of Cellular Neuroscience +Leibniz Institute for Neurobiology, Magdeburg, Germany + +https://github.com/LINCellularNeuroscience/VAME +Licensed under GNU General Public License v3.0 + +The following code is adapted from: + +DeepLabCut2.0 Toolbox (deeplabcut.org) +© A. & M. Mathis Labs +https://github.com/AlexEMG/DeepLabCut +Please see AUTHORS for contributors. +https://github.com/AlexEMG/DeepLabCut/blob/master/AUTHORS +Licensed under GNU Lesser General Public License v3.0 + +#### create\_config\_template + +```python +def create_config_template() -> Tuple[dict, ruamel.yaml.YAML] +``` + +Creates a template for the config.yaml file. + +**Returns**: + + Tuple[dict, ruamel.yaml.YAML]: A tuple containing the template dictionary and the Ruamel YAML instance. + +#### read\_config + +```python +def read_config(configname: str) -> dict +``` + +Reads structured config file defining a project. + +**Arguments**: + +- `configname` _str_ - Path to the config file. + + +**Returns**: + +- `dict` - The contents of the config file as a dictionary. + +#### write\_config + +```python +def write_config(configname: str, cfg: dict) -> None +``` + +Write structured config file. + +**Arguments**: + +- `configname` _str_ - Path to the config file. +- `cfg` _dict_ - Dictionary containing the config data. + +#### update\_config + +```python +def update_config(config: str, force_update: bool = False) -> None +``` + +Updates the configuration file with default values. + +**Arguments**: + +- `config` _str_ - Path to the config file. +- `force_update` _bool, optional_ - Whether to force the update even if the user declines. Defaults to False. + diff --git a/docs/vame-docs-app/docs/reference/vame/util/csv_to_npy.md b/docs/vame-docs-app/docs/reference/vame/util/csv_to_npy.md new file mode 100644 index 00000000..0ab26c19 --- /dev/null +++ b/docs/vame-docs-app/docs/reference/vame/util/csv_to_npy.md @@ -0,0 +1,61 @@ +--- +sidebar_label: csv_to_npy +title: vame.util.csv_to_npy +--- + +Variational Animal Motion Embedding 1.0-alpha Toolbox +© K. Luxem & P. Bauer, Department of Cellular Neuroscience +Leibniz Institute for Neurobiology, Magdeburg, Germany + +https://github.com/LINCellularNeuroscience/VAME +Licensed under GNU General Public License v3.0 + +#### nan\_helper + +```python +def nan_helper(y: np.ndarray) -> Tuple +``` + +Identifies indices of NaN values in an array and provides a function to convert them to non-NaN indices. + +**Arguments**: + +- `y` _np.ndarray_ - Input array containing NaN values. + + +**Returns**: + + Tuple[np.ndarray, Union[np.ndarray, None]]: A tuple containing two elements: + - An array of boolean values indicating the positions of NaN values. + - A lambda function to convert NaN indices to non-NaN indices. + +#### interpol + +```python +def interpol(arr: np.ndarray) -> np.ndarray +``` + +Interpolates all NaN values of a given array. + +**Arguments**: + +- `arr` _np.ndarray_ - A numpy array with NaN values. + + +**Returns**: + +- `np.ndarray` - A numpy array with interpolated NaN values. + +#### csv\_to\_numpy + +```python +def csv_to_numpy(config: str) -> None +``` + +Converts a pose-estimation.csv file to a numpy array. Note that this code is only useful for data which is a priori egocentric, i.e. head-fixed +or otherwise restrained animals. + +**Raises**: + +- `ValueError` - If the config.yaml file indicates that the data is not egocentric. + diff --git a/docs/vame-docs-app/docs/reference/vame/util/gif_pose_helper.md b/docs/vame-docs-app/docs/reference/vame/util/gif_pose_helper.md new file mode 100644 index 00000000..4f176854 --- /dev/null +++ b/docs/vame-docs-app/docs/reference/vame/util/gif_pose_helper.md @@ -0,0 +1,143 @@ +--- +sidebar_label: gif_pose_helper +title: vame.util.gif_pose_helper +--- + +Variational Animal Motion Embedding 1.0-alpha Toolbox +© K. Luxem & P. Bauer, Department of Cellular Neuroscience +Leibniz Institute for Neurobiology, Magdeburg, Germany + +https://github.com/LINCellularNeuroscience/VAME +Licensed under GNU General Public License v3.0 + +#### crop\_and\_flip + +```python +def crop_and_flip(rect: tuple, src: np.ndarray, points: list, + ref_index: list) -> tuple +``` + +Crop and flip an image based on a rectangle and reference points. + +**Arguments**: + +- `rect` _tuple_ - Tuple containing rectangle information (center, size, angle). +- `src` _np.ndarray_ - Source image to crop and flip. +- `points` _list_ - List of points to be aligned. +- `ref_index` _list_ - Reference indices for alignment. + + +**Returns**: + +- `tuple` - Cropped and flipped image, shifted points. + +#### background + +```python +def background(path_to_file: str, + filename: str, + file_format: str = '.mp4', + num_frames: int = 1000) -> np.ndarray +``` + +Compute background image from fixed camera. + +**Arguments**: + +- `path_to_file` _str_ - Path to the directory containing the video files. +- `filename` _str_ - Name of the video file. +- `file_format` _str, optional_ - Format of the video file. Defaults to '.mp4'. +- `num_frames` _int, optional_ - Number of frames to use for background computation. Defaults to 1000. + + +**Returns**: + +- `np.ndarray` - Background image. + +#### get\_rotation\_matrix + +```python +def get_rotation_matrix( + adjacent: float, opposite: float, + crop_size: tuple = (300, 300)) -> np.ndarray +``` + +Compute the rotation matrix based on the adjacent and opposite sides. + +**Arguments**: + +- `adjacent` _float_ - Length of the adjacent side. +- `opposite` _float_ - Length of the opposite side. +- `crop_size` _tuple, optional_ - Size of the cropped area. Defaults to (300, 300). + + +**Returns**: + +- `np.ndarray` - Rotation matrix. + +#### nan\_helper + +```python +def nan_helper(y: np.ndarray) -> tuple +``` + +Helper function to find indices of NaN values. + +**Arguments**: + +- `y` _np.ndarray_ - Input array. + + +**Returns**: + +- `tuple` - Indices of NaN values. + +#### interpol + +```python +def interpol(arr: np.ndarray) -> np.ndarray +``` + +Interpolates NaN values in the given array. + +**Arguments**: + +- `arr` _np.ndarray_ - Input array with NaN values. + + +**Returns**: + +- `np.ndarray` - Array with interpolated NaN values. + +#### get\_animal\_frames + +```python +def get_animal_frames( + cfg: dict, + filename: str, + pose_ref_index: list, + start: int, + length: int, + subtract_background: bool, + file_format: str = '.mp4', + crop_size: tuple = (300, 300)) -> list +``` + +Extracts frames of an animal from a video file and returns them as a list. + +**Arguments**: + +- `cfg` _dict_ - Configuration dictionary containing project information. +- `filename` _str_ - Name of the video file. +- `pose_ref_index` _list_ - List of reference coordinate indices for alignment. +- `start` _int_ - Starting frame index. +- `length` _int_ - Number of frames to extract. +- `subtract_background` _bool_ - Whether to subtract background or not. +- `file_format` _str, optional_ - Format of the video file. Defaults to '.mp4'. +- `crop_size` _tuple, optional_ - Size of the cropped area. Defaults to (300, 300). + + +**Returns**: + +- `list` - List of extracted frames. + From 0fd3c6b5dd013e3d19f1dc8b7952e73a043bbbae Mon Sep 17 00:00:00 2001 From: vinicvaz Date: Wed, 15 May 2024 08:17:10 -0300 Subject: [PATCH 3/3] remove branch --- .github/workflows/deploy-docs.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/deploy-docs.yaml b/.github/workflows/deploy-docs.yaml index 4c53a54e..aba08154 100644 --- a/.github/workflows/deploy-docs.yaml +++ b/.github/workflows/deploy-docs.yaml @@ -4,7 +4,6 @@ on: push: branches: - docs - - fix/docs-workflows jobs: deploy: