diff --git a/notebooks/inference_tutorial.ipynb b/notebooks/inference_tutorial.ipynb index f7be10a..a1258a9 100644 --- a/notebooks/inference_tutorial.ipynb +++ b/notebooks/inference_tutorial.ipynb @@ -87,6 +87,21 @@ "Let's start this tutorial by importing the packages necessary to run the notebook." ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "\n", + "if os.path.basename(os.getcwd()) == \"notebooks\":\n", + " os.chdir(\"..\")\n", + "assert os.path.basename(os.getcwd()) == \"element-facemap\", (\n", + " \"Please move to the \" + \"element directory\"\n", + ")" + ] + }, { "cell_type": "code", "execution_count": null, @@ -95,8 +110,9 @@ "source": [ "import datajoint as dj\n", "import datetime\n", - "import os\n", - "import matplotlib.pyplot as plt" + "import matplotlib.pyplot as plt\n", + "import pandas as pd\n", + "import numpy as np" ] }, { @@ -221,6 +237,7 @@ " dj.Diagram(subject.Subject)\n", " + dj.Diagram(session.Session)\n", " + dj.Diagram(fbe.VideoRecording)\n", + " + dj.Diagram(fbe.VideoRecording.File)\n", " + dj.Diagram(facemap_inference)\n", ")" ] @@ -370,13 +387,6 @@ "session.SessionDirectory()" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "With the instantiation of the element-facemap" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -467,17 +477,17 @@ "outputs": [], "source": [ "from pathlib import Path\n", - "video_recording_insert = {**session_key, \n", + "video_recording_key = {**session_key, \n", " \"recording_id\": 0}\n", "facemap_root_dir_path = fbe.get_facemap_root_data_dir()\n", - "vid_path = \"example_behavior_videos mounted S3 inbox path\"\n", + "vid_path = \"./example_data/inbox/subject0/session0/*.avi\"\n", "video_recording_file_insert = {\n", - " **video_recording_insert,\n", + " **video_recording_key,\n", " \"file_id\": 0,\n", " \"file_path\": Path(vid_path).relative_to(facemap_root_dir_path),\n", "}\n", "\n", - "fbe.VideoRecording.insert1(video_recording_insert)\n", + "fbe.VideoRecording.insert1(video_recording_key)\n", "fbe.VideoRecording.File.insert1(video_recording_file_insert)" ] }, @@ -486,16 +496,11 @@ "metadata": {}, "source": [ "With an entries present in the `facemap_inference.FacemapModel` and the `fbe.VideoRecording` tables, the criteria is met for insertion into the `facemap_inference.FacemapPoseEstimationTask` table.\n", - "- `facemap_inference.FacemapPoseEstimationTask` is a staging table that pairs a specific `FacemapModel` with a specific `VideoRecording`." + "- `facemap_inference.FacemapPoseEstimationTask` is a staging table that pairs a specific `FacemapModel` with a `VideoRecording`.\n", + "- For this example we will choose to `load` existing results due to the speed of processing. \n", + "- If choosing to run processing, the `task_mode` should be set to `trigger`. This step may take some time and can result in a lost connection database issue, to solve this simply rerun the cell." ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, { "cell_type": "markdown", "metadata": {}, @@ -508,13 +513,18 @@ "execution_count": null, "metadata": {}, "outputs": [], - "source": [] + "source": [ + "model_key = (facemap_inference.FacemapModel & f'model_id={model_id}').fetch1(\"KEY\")\n", + "key = {**video_recording_key, **model_key}\n", + "task_description = \"Demo Facemap Inference Task, loads processed results\"\n", + "facemap_inference.FacemapPoseEstimationTask.insert_pose_estimation_task(key, task_description=task_description, task_mode=\"load\")" + ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Run Facemap Inference Pipeline" + "We can display the key in the `FacemapPoseEstimationTask` table to confirm it was inserted." ] }, { @@ -523,52 +533,14 @@ "metadata": {}, "outputs": [], "source": [ - "def ingest_model(model_name, model_description, full_model_path):\n", - " try:\n", - " model_id = max(facemap_inference.FacemapModel.fetch(\"model_id\"))\n", - " model_id = model_id + 1 # increment model id\n", - " except ValueError: # case that nothing has been inserted\n", - " model_id = 0\n", - "\n", - " facemap_model_insert = dict(\n", - " model_id=model_id, model_name=model_name, model_description=model_description\n", - " )\n", - " facemap_inference.FacemapModel.insert1(facemap_model_insert)\n", - "\n", - " body_part_insert = []\n", - " body_parts = [\n", - " \"eye(back)\",\n", - " \"eye(bottom)\",\n", - " \"eye(front)\",\n", - " \"eye(top)\",\n", - " \"lowerlip\",\n", - " \"mouth\",\n", - " \"nose(bottom)\",\n", - " \"nose(r)\",\n", - " \"nose(tip)\",\n", - " \"nose(top)\",\n", - " \"nosebridge\",\n", - " \"paw\",\n", - " \"whisker(I)\",\n", - " \"whisker(III)\",\n", - " \"whisker(II)\",\n", - " ]\n", - " for bp in body_parts:\n", - " body_part_insert.append(dict(model_id=model_id, body_part=bp))\n", - " # Insert into parent BodyPart table if no entries are present\n", - " if len(facemap_inference.BodyPart()) == 0:\n", - " facemap_inference.BodyPart.insert(body_part_insert)\n", - " file_insert = dict(model_id=model_id, model_file=full_model_path)\n", - "\n", - " facemap_inference.FacemapModel.BodyPart.insert(body_part_insert)\n", - " facemap_inference.FacemapModel.File.insert1(file_insert)" + "(facemap_inference.FacemapPoseEstimationTask() & key)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "#### Insert Subject and Session into subject.Subject, session.Session and session.SessionDirectory tables" + "Next we will ingest the results into `FacemapPoseEstimation` and its part table `FacemapPoseEstimation.BodyPartPosition` for the key that we just inserted into the `FacemapPoseEstimationTask`" ] }, { @@ -577,27 +549,14 @@ "metadata": {}, "outputs": [], "source": [ - "sub_insert = dict(subject=\"mdl_sub\", \n", - " subject_nickname=\"facemap model subject\", \n", - " sex='U', \n", - " subject_birth_date=datetime.datetime.now(), \n", - " subject_description=\"Subject for Facemap Model Inference testing\")\n", - "# subject.Subject.insert1(sub_insert)\n", - "subject_key = (subject.Subject & 'subject=\"mdl_sub\"').fetch1(\"KEY\")" + "facemap_inference.FacemapPoseEstimation.populate(key, display_progress=True)" ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "session_id = 2\n", - "session_insert = dict(subject_key, session_id, session_datetime=datetime.datetime.now())\n", - "sdir_insert = dict(subject_key, session_id, session_dir=\"20230627_Image_eCBsensor_activity/Behavior_20230627/C57-C11-3_Rm_CNO_30min\")\n", - "\n", - "session.Session.insert1(session_insert)\n", - "session.SessionDirectory.insert1(sdir_insert)" + "Once the cell above has completed run the next cells to display the `FacemapPoseEstimation` tables" ] }, { @@ -606,8 +565,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Display Session Table to validate insert\n", - "session.Session() & {**subject_key, 'session_id': session_id}" + "facemap_inference.FacemapPoseEstimation()" ] }, { @@ -616,16 +574,14 @@ "metadata": {}, "outputs": [], "source": [ - "# Display SessionDirectory Table to validate insert\n", - "session.SessionDirectory() & {**subject_key, 'session_id': session_id}" + "facemap_inference.FacemapPoseEstimation.BodyPartPosition()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Ingest locally stored pytorch model(.pt) file\n", - "Provide model name, model filepath, and optional model description" + "### Visualize Pose Estimation Output" ] }, { @@ -634,16 +590,15 @@ "metadata": {}, "outputs": [], "source": [ - "model_name = 'facemap_model_state.pt'\n", - "full_local_model_filepath = \"/Users/sidhulyalkar/.facemap/models/facemap_model_state.pt\"\n", - "ingest_model(model_name, model_description=\"test facemap model\", model_file=full_local_model_filepath)" + "pe_query = {**session_key, 'recording_id': 0, 'model_id': model_id}\n", + "pose_estimation_key = (facemap_inference.FacemapPoseEstimation & pe_query).fetch1(\"KEY\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Generate a Pose Estimation Task" + "Get Trajectory of X and Y coordinates" ] }, { @@ -652,16 +607,43 @@ "metadata": {}, "outputs": [], "source": [ - "model_id = 0\n", - "session_key = session.Session.fetch(\"KEY\")[2] \n", - "generate_facemap_inference_estimation_task(model_id, session_key, task_mode=\"trigger\", bbox=[])" + "# Specify all body parts, or set body_parts to a custom list\n", + "body_parts = \"all\"\n", + "model_name = (facemap_inference.FacemapModel & f'model_id={key[\"model_id\"]}').fetch1(\"model_name\")\n", + "\n", + "if body_parts == \"all\":\n", + " body_parts = (facemap_inference.FacemapPoseEstimation.BodyPartPosition & key).fetch(\"body_part\")\n", + "elif not isinstance(body_parts, list):\n", + " body_parts = list(body_parts)\n" ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ - "#### Display FacemapPoseEstimationTask table" + "# Construct Pandas MultiIndex DataFrame\n", + "df = None\n", + "for body_part in body_parts:\n", + " result_dict = (\n", + " facemap_inference.FacemapPoseEstimation.BodyPartPosition\n", + " & {\"body_part\": body_part}\n", + " & {\"recording_id\": key[\"recording_id\"]}\n", + " & {\"session_id\": key[\"session_id\"]}\n", + " ).fetch(\"x_pos\", \"y_pos\", \"likelihood\", as_dict=True)[0]\n", + " x_pos = result_dict[\"x_pos\"].tolist()\n", + " y_pos = result_dict[\"y_pos\"].tolist()\n", + " likelihood = result_dict[\"likelihood\"].tolist()\n", + " a = np.vstack((x_pos, y_pos, likelihood))\n", + " a = a.T\n", + " pdindex = pd.MultiIndex.from_product(\n", + " [[model_name], [body_part], [\"x\", \"y\", \"likelihood\"]],\n", + " names=[\"model\", \"bodyparts\", \"coords\"],\n", + " )\n", + " frame = pd.DataFrame(a, columns=pdindex, index=range(0, a.shape[0]))\n", + " df = pd.concat([df, frame], axis=1)\n", + "df" ] }, { @@ -670,14 +652,15 @@ "metadata": {}, "outputs": [], "source": [ - "facemap_inference.FacemapPoseEstimationTask()" + "df_xy = df.iloc[:,df.columns.get_level_values(2).isin([\"x\",\"y\"])]['facemap_model_state.pt']\n", + "df_xy.mean()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "#### Display VideoRecording and VideoRecording.File tables from the imported facial behavioral estimation (fbe) schema" + "Plot coordinates across time for each body part" ] }, { @@ -686,7 +669,7 @@ "metadata": {}, "outputs": [], "source": [ - "fbe.VideoRecording()" + "df_xy.plot().legend(loc='best', prop={'size': 5})" ] }, { @@ -695,14 +678,15 @@ "metadata": {}, "outputs": [], "source": [ - "fbe.VideoRecording.File()" + "df_flat = df_xy.copy()\n", + "df_flat.columns = df_flat.columns.map('_'.join)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "#### Run Pose Estimation on all unprocessed FacemapPoseEstimationTasks " + "Plot Trace Overlays of each body part across time" ] }, { @@ -711,16 +695,33 @@ "metadata": {}, "outputs": [], "source": [ - "facemap_inference.FacemapPoseEstimation.populate(display_progress=True)\n", - "# If a lost connection error occurs, rerun the populate and if processing \n", - "# has completed, the data will be loaded and inference will not be rerun. \n" + "fig,ax=plt.subplots(2,2)\n", + "fig.set_figwidth(20)\n", + "fig.set_figheight(15)\n", + "\n", + "df_flat.plot(x='eye(front)_x',y='eye(front)_y',ax=ax[0, 0])\n", + "df_flat.plot(x='eye(back)_x',y='eye(back)_y',ax=ax[0, 0])\n", + "df_flat.plot(x='eye(bottom)_x',y='eye(bottom)_y',ax=ax[0, 0])\n", + "\n", + "df_flat.plot(x='nose(tip)_x',y='nose(tip)_y', ax=ax[1, 0])\n", + "df_flat.plot(x='nose(bottom)_x',y='nose(bottom)_y', ax=ax[1, 0])\n", + "df_flat.plot(x='nose(r)_x',y='nose(r)_y', ax=ax[1, 0])\n", + "df_flat.plot(x='nosebridge_x',y='nosebridge_y', ax=ax[1, 0])\n", + "\n", + "df_flat.plot(x='mouth_x',y='mouth_y', ax=ax[0, 1])\n", + "df_flat.plot(x='lowerlip_x',y='lowerlip_y', ax=ax[0, 1])\n", + "df_flat.plot(x='paw_x',y='paw_y', ax=ax[0, 1])\n", + "\n", + "df_flat.plot(x='whisker(I)_x',y='whisker(I)_y', ax=ax[1, 1])\n", + "df_flat.plot(x='whisker(II)_x',y='whisker(II)_y', ax=ax[1, 1])\n", + "df_flat.plot(x='whisker(II)_x',y='whisker(II)_y', ax=ax[1, 1])\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "#### Display Facemap Pose Estimation Tables" + "### Visualize Keypoints Data" ] }, { @@ -729,40 +730,22 @@ "metadata": {}, "outputs": [], "source": [ - "facemap_inference.FacemapPoseEstimation()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "facemap_inference.FacemapPoseEstimation.BodyPartPosition()" + "from matplotlib import cm\n", + "colors = cm.get_cmap(\"jet\")(np.linspace(0, 1.0, len(body_parts)))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "#### Visualize Pose Estimation Output" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "pe_query = {**session_key, 'recording_id': 0, 'model_id': model_id}\n", - "pose_estimation_key = (facemap_inference.FacemapPoseEstimation & pe_query).fetch1(\"KEY\")" + "(facemap_inference.FacemapPoseEstimation.BodyPartPosition & pose_estimation_key)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Get Trajectory of X and Y coordinates" + "Fetch the keypoints_data from the database as a dictionary in order to index and reshape it." ] }, { @@ -771,14 +754,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Specify all body parts, or set body_parts to a custom list\n", - "body_parts = \"all\"\n", - "model_name = (facemap_inference.FacemapModel & f'model_id={key[\"model_id\"]}').fetch1(\"model_name\")\n", - "\n", - "if body_parts == \"all\":\n", - " body_parts = (facemap_inference.BodyPartPosition & key).fetch(\"body_part\")\n", - "elif not isinstance(body_parts, list):\n", - " body_parts = list(body_parts)\n" + "keypoints_data = (facemap_inference.FacemapPoseEstimation.BodyPartPosition & pose_estimation_key).fetch(as_dict=True)" ] }, { @@ -787,38 +763,30 @@ "metadata": {}, "outputs": [], "source": [ - "# Construct Pandas MultiIndex DataFrame\n", - "df = None\n", - "for body_part in body_parts:\n", - " result_dict = (\n", - " facemap_inference.BodyPartPosition\n", - " & {\"body_part\": body_part}\n", - " & {\"recording_id\": key[\"recording_id\"]}\n", - " & {\"session_id\": key[\"session_id\"]}\n", - " ).fetch(\"x_pos\", \"y_pos\", \"likelihood\", as_dict=True)[0]\n", - " x_pos = result_dict[\"x_pos\"].tolist()\n", - " y_pos = result_dict[\"y_pos\"].tolist()\n", - " likelihood = result_dict[\"likelihood\"].tolist()\n", - " a = np.vstack((x_pos, y_pos, likelihood))\n", - " a = a.T\n", - " pdindex = pd.MultiIndex.from_product(\n", - " [[model_name], [body_part], [\"x\", \"y\", \"likelihood\"]],\n", - " names=[\"model\", \"bodyparts\", \"coords\"],\n", - " )\n", - " frame = pd.DataFrame(a, columns=pdindex, index=range(0, a.shape[0]))\n", - " df = pd.concat([df, frame], axis=1)\n", - "df" + "pose_x_coord = []\n", + "pose_y_coord = []\n", + "pose_likelihood = []\n", + "for body_part_data in keypoints_data:\n", + " pose_x_coord.append(body_part_data[\"x_pos\"][:])\n", + " pose_y_coord.append(body_part_data[\"y_pos\"][:])\n", + " pose_likelihood.append(body_part_data[\"likelihood\"][:])\n", + "\n", + "pose_x_coord = np.array([pose_x_coord]) # size: key points x frames\n", + "pose_y_coord = np.array([pose_y_coord]) # size: key points x frames\n", + "pose_likelihood = np.array([pose_likelihood]) # size: key points x frames\n", + "pose_data = np.concatenate(\n", + " (pose_x_coord, pose_y_coord, pose_likelihood), axis=0\n", + ") # size: 3 x key points x frames\n", + "pose_x_coord = pose_data[0,:,:]\n", + "pose_y_coord = pose_data[1,:,:]\n", + "pose_liklihood = pose_data[2,:,:]" ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "# Or can use the built in function get_trajectory which also constructs this Pandas MultiIndex DataFrame\n", - "# df=facemap_inference.FacemapPoseEstimation.get_trajectory(pose_estimation_key)\n", - "# df" + "Plot keypoints for a subset of frames" ] }, { @@ -827,15 +795,24 @@ "metadata": {}, "outputs": [], "source": [ - "df_xy = df.iloc[:,df.columns.get_level_values(2).isin([\"x\",\"y\"])]['facemap_model_state.pt']\n", - "df_xy.mean()" + "start_frame = 100\n", + "end_frame = 500\n", + "\n", + "plt.figure(figsize=(15, 5), dpi=100)\n", + "for i, bodypart in enumerate(body_parts):\n", + " plt.plot(np.arange(start_frame, end_frame), pose_x_coord[i, start_frame:end_frame], '-', c=colors[i], label=bodypart)\n", + " plt.plot(np.arange(start_frame, end_frame), pose_y_coord[i, start_frame:end_frame], '--', c=colors[i])\n", + "plt.xlabel('Frame')\n", + "plt.ylabel('Keypoint coordinates')\n", + "plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)\n", + "plt.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Plot coordinates across time for each body part" + "Plot a subset of bodypart keypoints for a subset of frames" ] }, { @@ -844,24 +821,34 @@ "metadata": {}, "outputs": [], "source": [ - "df_xy.plot().legend(loc='best', prop={'size': 5})" + "subset_bodyparts = [\"whisker(I)\", \"whisker(II)\", \"whisker(III)\"]\n", + "start_frame = 100\n", + "end_frame = 500\n", + "\n", + "plt.figure(figsize=(15, 5), dpi=100)\n", + "for i, bodypart in enumerate(body_parts):\n", + " if bodypart in subset_bodyparts:\n", + " plt.plot(np.arange(start_frame, end_frame), pose_x_coord[i, start_frame:end_frame], '-', c=colors[i], \n", + " label=bodypart)\n", + " plt.plot(np.arange(start_frame, end_frame), pose_y_coord[i, start_frame:end_frame], '--', c=colors[i])\n", + "plt.xlabel('Frame')\n", + "plt.ylabel('Keypoint coordinates')\n", + "plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)\n", + "plt.show()" ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "df_flat = df_xy.copy()\n", - "df_flat.columns = df_flat.columns.map('_'.join)" + "#### Filter keypoints data by confidence" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Plot Trace Overlays of each body part across time" + "Use Facemap's `filter_outliers` function to remove outliers by applying a median filter to the keypoints data." ] }, { @@ -870,27 +857,37 @@ "metadata": {}, "outputs": [], "source": [ - "fig,ax=plt.subplots(2,2)\n", - "fig.set_figwidth(20)\n", - "fig.set_figheight(15)\n", - "\n", - "df_flat.plot(x='eye(front)_x',y='eye(front)_y',ax=ax[0, 0])\n", - "df_flat.plot(x='eye(back)_x',y='eye(back)_y',ax=ax[0, 0])\n", - "df_flat.plot(x='eye(bottom)_x',y='eye(bottom)_y',ax=ax[0, 0])\n", - "\n", - "df_flat.plot(x='nose(tip)_x',y='nose(tip)_y', ax=ax[1, 0])\n", - "df_flat.plot(x='nose(bottom)_x',y='nose(bottom)_y', ax=ax[1, 0])\n", - "df_flat.plot(x='nose(r)_x',y='nose(r)_y', ax=ax[1, 0])\n", - "df_flat.plot(x='nosebridge_x',y='nosebridge_y', ax=ax[1, 0])\n", - "\n", - "df_flat.plot(x='mouth_x',y='mouth_y', ax=ax[0, 1])\n", - "df_flat.plot(x='lowerlip_x',y='lowerlip_y', ax=ax[0, 1])\n", - "df_flat.plot(x='paw_x',y='paw_y', ax=ax[0, 1])\n", + "from facemap.utils import filter_outliers\n", + "# Use the following function to filter outliers in the keypoints data (see docstring for details)\n", + "\"\"\"\n", + "utils.filter_outliers(x, y, filter_window=15, baseline_window=50, max_spike=25, max_diff=25)\n", + "x: x coordinates of keypoints\n", + "y: y coordinates of keypoints\n", + "filter_window: window size for median filter (default: 15)\n", + "baseline_window: window size for baseline estimation (default: 50)\n", + "max_spike: maximum spike size (default: 25)\n", + "max_diff: maximum difference between baseline and filtered signal (default: 25)\n", + "\"\"\"\n", "\n", - "df_flat.plot(x='whisker(I)_x',y='whisker(I)_y', ax=ax[1, 1])\n", - "df_flat.plot(x='whisker(II)_x',y='whisker(II)_y', ax=ax[1, 1])\n", - "df_flat.plot(x='whisker(II)_x',y='whisker(II)_y', ax=ax[1, 1])\n" + "plt.figure(figsize=(15, 5), dpi=100)\n", + "for i, bodypart in enumerate(body_parts):\n", + " if bodypart in subset_bodyparts:\n", + " x, y = filter_outliers(pose_x_coord[i], pose_y_coord[i])\n", + " plt.plot(np.arange(start_frame, end_frame), x[start_frame:end_frame], '-', c=colors[i], label=bodypart)\n", + " plt.plot(np.arange(start_frame, end_frame), y[start_frame:end_frame], '--', c=colors[i])\n", + "plt.xlabel('Frame')\n", + "plt.ylabel('Keypoint coordinates')\n", + "plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)\n", + "plt.title('Filtered keypoints')\n", + "plt.show()" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { @@ -909,7 +906,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.17" + "version": "3.9.16" }, "orig_nbformat": 4 },