From 48b1044d9af8df6e59ced74f056510b04f6ac6bc Mon Sep 17 00:00:00 2001 From: Thomas Capelle Date: Thu, 21 Sep 2023 21:21:24 +0200 Subject: [PATCH] replace login method (#467) * use !login method * hide nb script --- .github/nb_scripts/fix_login.ipynb | 315 +++++ W&B_Prompts_with_Custom_Columns.ipynb | 1236 ++++++++--------- colabs/audiocraft/AudioCraft.ipynb | 846 +++++------ ...edit_Scorecards_with_XGBoost_and_W&B.ipynb | 10 +- .../Simple_LightGBM_Integration.ipynb | 13 +- .../Using_W&B_Sweeps_with_XGBoost.ipynb | 12 +- .../Image_Classification_with_Tables.ipynb | 26 +- .../Logging_Timbre_Transfer_with_W&B.ipynb | 12 +- .../W&B_Dataset_Visualization.ipynb | 12 +- .../W&B_Tables_Quickstart.ipynb | 10 +- colabs/deepchem/W&B_x_DeepChem.ipynb | 10 +- .../diffusers-image-generation.ipynb | 7 - ...W&B_Dataset_and_Predictions_Viz_Demo.ipynb | 12 +- .../Semantic_Segmentation_Demo_with_W&B.ipynb | 2 +- .../fastai/Weights_&_Biases_with_fastai.ipynb | 2 +- colabs/huggingface/Huggingface_wandb.ipynb | 12 +- ...ng_Face_models_with_Weights_&_Biases.ipynb | 13 +- .../Simple_accelerate_integration_wandb.ipynb | 31 +- ...ging_Face_data_with_Weights_&_Biases.ipynb | 12 +- colabs/huggingface/wandb_hf_example.ipynb | 12 +- ...o_to_Weights_&_Biases_CV,_NLP_and_RL.ipynb | 12 +- colabs/intro/Intro_to_Weights_&_Biases.ipynb | 12 +- .../Intro_to_Weights_&_Biases_keras.ipynb | 12 +- ...ing_with_tfrecords_in_jax_imagenette.ipynb | 7 - ...nteractive_W&B_Charts_Inside_Jupyter.ipynb | 12 +- ...une_Vision_Transformer_using_KerasCV.ipynb | 20 +- .../keras/Image_Segmentation_with_Keras.ipynb | 13 +- .../keras/Keras_param_opti_using_sweeps.ipynb | 13 +- ...ras_pipeline_with_Weights_and_Biases.ipynb | 12 +- colabs/keras/Simple_Keras_Integration.ipynb | 13 +- ...bEvalCallback_in_your_Keras_workflow.ipynb | 2 +- ...bMetricLogger_in_your_Keras_workflow.ipynb | 2 +- ...delCheckpoint_in_your_Keras_workflow.ipynb | 2 +- colabs/keras/cosine_decay_using_keras.ipynb | 13 +- .../keras_nsynth_instrument_prediction.ipynb | 9 +- ...ct_Detector_with_MMDetection_and_W&B.ipynb | 2 +- ...ation_Model_with_MMDetection_and_W&B.ipynb | 2 +- .../MosaicML_Composer_and_wandb.ipynb | 71 +- ...stop_for_everything_object_detection.ipynb | 19 +- ...ur_OCR_Models_with_PaddleOCR_and_W&B.ipynb | 19 +- colabs/paella/Image-Variations.ipynb | 29 +- colabs/paella/Inpainting.ipynb | 29 +- .../paella/Latent-Space-Interpolation.ipynb | 27 +- colabs/paella/Multi-Conditioning.ipynb | 27 +- ...rientation-Guided-Multi-Conditioning.ipynb | 19 +- colabs/paella/Outpainting.ipynb | 29 +- colabs/paella/Structural-Morphing.ipynb | 21 +- colabs/paella/Text-Conditional.ipynb | 19 +- colabs/prompts/WandB_LLM_QA_bot.ipynb | 12 +- ...Prediction_Using_W&B_Pycaret_FastAPI.ipynb | 12 +- .../8_Node_Classification_(with_W&B).ipynb | 12 +- ...raph_Classification_with_PyG_and_W&B.ipynb | 7 - .../pyg/point-cloud-segmentation/00_eda.ipynb | 41 +- .../01_dgcnn_train.ipynb | 53 +- .../02_dgcnn_evaluate.ipynb | 57 +- .../pyg/pointnet-classification/00_eda.ipynb | 39 +- .../01_compare_sampling.ipynb | 23 +- .../02_pointnet_plus_plus.ipynb | 43 +- .../pointnet-classification/03_sweep.ipynb | 39 +- ...a_Transformer_with_Pytorch_Lightning.ipynb | 19 +- ...assification_using_PyTorch_Lightning.ipynb | 9 +- ...ghtning_models_with_Weights_&_Biases.ipynb | 12 +- .../Profile_PyTorch_Code.ipynb | 13 +- ...rch_Lightning_and_Weights_and_Biases.ipynb | 18 +- ...fer_Learning_Using_PyTorch_Lightning.ipynb | 2 +- ...db_End_to_End_with_PyTorch_Lightning.ipynb | 12 +- ...ing_dropout_affect_model_performance.ipynb | 13 +- ...parameter_Sweeps_in_PyTorch_with_W&B.ipynb | 13 +- .../pytorch/Simple_PyTorch_Integration.ipynb | 13 +- colabs/raytune/RayTune_with_wandb.ipynb | 2 +- colabs/raytune/tune-wandb.ipynb | 41 +- colabs/rdkit/wb_rdkit.ipynb | 12 +- colabs/scikit/Simple_Scikit_Integration.ipynb | 13 +- colabs/scikit/w-b-k-means-clustering.ipynb | 2 +- .../SimpleTransformersQA.ipynb | 10 +- colabs/spacy/SpaCy_v3_and_W&B.ipynb | 12 +- colabs/spacy/spaCy_v3_and_W&B_Sweeps.ipynb | 13 +- ...Baselines3_wandb_experiment_tracking.ipynb | 5 - colabs/stylegan_nada/StyleGAN-NADA.ipynb | 12 +- colabs/super-gradients/yolo_nas.ipynb | 3 +- .../AlphaFold_with_W&B_Align,_Fold,_Log.ipynb | 13 +- colabs/tables/Log_Tables_Incrementally.ipynb | 13 +- colabs/tables/W&B_Tables_Quickstart.ipynb | 12 +- .../Accelerator_W&B_Tensorboard.ipynb | 12 +- .../TensorBoard_and_Weights_and_Biases.ipynb | 7 - ...ation_in_TensorFlow_using_W&B_Sweeps.ipynb | 13 +- .../Simple_TensorFlow_Integration.ipynb | 13 +- .../convert_imagenette_tfrecord.ipynb | 7 - colabs/ultralytics/00_inference.ipynb | 3 +- colabs/ultralytics/01_train_val.ipynb | 3 +- .../Artifacts_Quickstart_with_W&B.ipynb | 12 +- .../Basic_Artifacts_with_W&B.ipynb | 12 +- .../W&B_artifacts_for_auditing_purposes.ipynb | 13 +- ...ain_val_test_split_with_tabular_data.ipynb | 10 +- colabs/wandb-log/Configs_in_W&B.ipynb | 12 +- ...ze_metric_logging_with_define_metric.ipynb | 13 +- ...ate_gifs_from_logged_images_on_wandb.ipynb | 19 +- .../Image_Logging_de_duplication.ipynb | 7 - ...Log_(Almost)_Anything_with_W&B_Media.ipynb | 2 +- .../Log_a_Confusion_Matrix_with_W&B.ipynb | 12 +- ...g_Strategies_for_High_Frequency_Data.ipynb | 13 +- ...lot_Precision_Recall_Curves_with_W&B.ipynb | 12 +- .../wandb-log/Plot_ROC_Curves_with_W&B.ipynb | 13 +- ...Run_names_visualized_using_min_dalle.ipynb | 10 +- colabs/wandb-log/Set_Alerts_with_W_&_B.ipynb | 12 +- .../Model_Registry_E2E.ipynb | 19 +- ...g_YOLOX_Models_with_Weights_&_Biases.ipynb | 12 +- examples/jax/jax-llm/create_tokenizer.ipynb | 24 +- ...rFlow_2_0_+_Keras_Crash_Course_+_W&B.ipynb | 5 - .../pytorch-cifar10-sagemaker/train.ipynb | 3 +- examples/pytorch/pytorch-intro/intro.ipynb | 13 +- .../pytorch_mnist.ipynb | 10 +- .../text_classification.ipynb | 12 +- 113 files changed, 2331 insertions(+), 1694 deletions(-) create mode 100644 .github/nb_scripts/fix_login.ipynb diff --git a/.github/nb_scripts/fix_login.ipynb b/.github/nb_scripts/fix_login.ipynb new file mode 100644 index 00000000..1ac6dc39 --- /dev/null +++ b/.github/nb_scripts/fix_login.ipynb @@ -0,0 +1,315 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [], + "source": [ + "import nbformat\n", + "from pathlib import Path\n", + "from nb_helpers.utils import find_nbs" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [], + "source": [ + "nbs_paths = find_nbs(Path.cwd())" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Path('/Users/tcapelle/work/examples/colabs/boosting/Credit_Scorecards_with_XGBoost_and_W&B.ipynb')" + ] + }, + "execution_count": 26, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "nb_path = nbs_paths[3]\n", + "nb_path" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [], + "source": [ + " for i, cell in enumerate(nb[\"cells\"]):\n", + " if cell[\"cell_type\"] == \"code\":\n", + " if delete_line:\n", + " cell_content = cell[\"source\"].split(\"\\n\")\n", + " cell[\"source\"] = \"\\n\".join(cell_content) \n", + " return i" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [], + "source": [ + "nb = nbformat.read(nb_path, 4)" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": {}, + "outputs": [], + "source": [ + "cell = nb[\"cells\"][1]" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "nbformat.notebooknode.NotebookNode" + ] + }, + "execution_count": 30, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "type(cell)" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'attachments': {},\n", + " 'cell_type': 'markdown',\n", + " 'idx_': 1,\n", + " 'metadata': {},\n", + " 'source': '\"Weights
\\n\\n\\n\\n\"Weights'}" + ] + }, + "execution_count": 31, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cell" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": {}, + "outputs": [], + "source": [ + "login_idx = idx_login_cell(nb)" + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": {}, + "outputs": [], + "source": [ + "def insert_login_cell(nb, idx, code=\"!wandb login\"):\n", + " n_cells = len(nb[\"cells\"])\n", + " login_cell = nbformat.v4.new_code_cell(source=code)\n", + " nb[\"cells\"].insert(idx+1, login_cell)\n", + " return nb" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "metadata": {}, + "outputs": [], + "source": [ + "nb = insert_login_cell(nb, login_idx)" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'cell_type': 'code',\n", + " 'execution_count': None,\n", + " 'idx_': 19,\n", + " 'metadata': {},\n", + " 'source': ''}" + ] + }, + "execution_count": 35, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "nb[\"cells\"][login_idx] " + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'id': '98de0a69',\n", + " 'cell_type': 'code',\n", + " 'metadata': {},\n", + " 'execution_count': None,\n", + " 'source': '!wandb login',\n", + " 'outputs': []}" + ] + }, + "execution_count": 36, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "nb[\"cells\"][login_idx+1] " + ] + }, + { + "cell_type": "code", + "execution_count": 37, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'attachments': {},\n", + " 'cell_type': 'markdown',\n", + " 'idx_': 21,\n", + " 'metadata': {},\n", + " 'source': '## Vehicle Loan Dataset\\n\\nWe will be using a simplified version of the [Vehicle Loan Default Prediction dataset](https://www.kaggle.com/sneharshinde/ltfs-av-data) from L&T which has been stored in W&B Artifacts. '}" + ] + }, + "execution_count": 37, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "nb[\"cells\"][login_idx+2] " + ] + }, + { + "cell_type": "code", + "execution_count": 48, + "metadata": {}, + "outputs": [], + "source": [ + "def remove_properties(nb, props=[\"id\", \"idx_\", \"path_\"]):\n", + " for cell in nb[\"cells\"]:\n", + " for prop in props:\n", + " cell.pop(prop, None)\n", + " return nb\n", + "\n", + "def remove_empty_cells(nb):\n", + " nb[\"cells\"] = [cell for cell in nb[\"cells\"] if cell[\"source\"] != \"\"]\n", + " return nb" + ] + }, + { + "cell_type": "code", + "execution_count": 49, + "metadata": {}, + "outputs": [], + "source": [ + "nb = remove_properties(nb)\n", + "nb = remove_empty_cells(nb)" + ] + }, + { + "cell_type": "code", + "execution_count": 50, + "metadata": {}, + "outputs": [ + { + "ename": "AttributeError", + "evalue": "idx_", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", + "\u001b[1;32m/Users/tcapelle/work/examples/fix_login.ipynb Cell 17\u001b[0m line \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 3\u001b[0m login_idx \u001b[39m=\u001b[39m idx_login_cell(nb)\n\u001b[1;32m 4\u001b[0m \u001b[39mif\u001b[39;00m login_idx \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[0;32m----> 5\u001b[0m nb \u001b[39m=\u001b[39m insert_login_cell(nb, login_idx)\n\u001b[1;32m 6\u001b[0m \u001b[39m# delete path_ attribute\u001b[39;00m\n\u001b[1;32m 7\u001b[0m nb\u001b[39m.\u001b[39mpop(\u001b[39m\"\u001b[39m\u001b[39mpath_\u001b[39m\u001b[39m\"\u001b[39m, \u001b[39mNone\u001b[39;00m)\n", + "\u001b[1;32m/Users/tcapelle/work/examples/fix_login.ipynb Cell 17\u001b[0m line \u001b[0;36minsert_login_cell\u001b[0;34m(nb, idx, code)\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[39m# update indexes\u001b[39;00m\n\u001b[1;32m 6\u001b[0m \u001b[39mfor\u001b[39;00m cell \u001b[39min\u001b[39;00m nb[\u001b[39m\"\u001b[39m\u001b[39mcells\u001b[39m\u001b[39m\"\u001b[39m][idx\u001b[39m+\u001b[39m\u001b[39m2\u001b[39m:]:\n\u001b[0;32m----> 7\u001b[0m cell\u001b[39m.\u001b[39midx_ \u001b[39m+\u001b[39m\u001b[39m=\u001b[39m \u001b[39m1\u001b[39m\n\u001b[1;32m 8\u001b[0m \u001b[39mreturn\u001b[39;00m nb\n", + "File \u001b[0;32m~/miniforge3/envs/nbdev2/lib/python3.10/site-packages/nbformat/_struct.py:125\u001b[0m, in \u001b[0;36mStruct.__getattr__\u001b[0;34m(self, key)\u001b[0m\n\u001b[1;32m 123\u001b[0m result \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m[key]\n\u001b[1;32m 124\u001b[0m \u001b[39mexcept\u001b[39;00m \u001b[39mKeyError\u001b[39;00m:\n\u001b[0;32m--> 125\u001b[0m \u001b[39mraise\u001b[39;00m \u001b[39mAttributeError\u001b[39;00m(key) \u001b[39mfrom\u001b[39;00m \u001b[39mNone\u001b[39m\n\u001b[1;32m 126\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[1;32m 127\u001b[0m \u001b[39mreturn\u001b[39;00m result\n", + "\u001b[0;31mAttributeError\u001b[0m: idx_" + ] + } + ], + "source": [ + "for nb_path in nbs_paths:\n", + " nb = nbformat.read(nb_path, 4)\n", + " login_idx = idx_login_cell(nb)\n", + " if login_idx is not None:\n", + " nb = insert_login_cell(nb, login_idx)\n", + " # delete path_ attribute\n", + " nb.pop(\"path_\", None)\n", + " nb = remove_properties(nb)\n", + " nb = remove_empty_cells(nb)\n", + " nbformat.validate(nb)\n", + " nbformat.write(nb, nb_path)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "nbdev2", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/W&B_Prompts_with_Custom_Columns.ipynb b/W&B_Prompts_with_Custom_Columns.ipynb index f1252f0c..be28359e 100644 --- a/W&B_Prompts_with_Custom_Columns.ipynb +++ b/W&B_Prompts_with_Custom_Columns.ipynb @@ -1,632 +1,632 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "e-ZYaV5KGVmA" - }, - "source": [ - "\"Open\n", - "" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "gJSVEAGWGVmA" - }, - "source": [ - "\"Weights\n", - "" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "9f7yMKLwGVmA" - }, - "source": [ - "**[Weights & Biases Prompts](https://docs.wandb.ai/guides/prompts?utm_source=code&utm_medium=colab&utm_campaign=prompts)** is a suite of LLMOps tools built for the development of LLM-powered applications.\n", - "\n", - "Use W&B Prompts to visualize and inspect the execution flow of your LLMs, analyze the inputs and outputs of your LLMs, view the intermediate results and securely store and manage your prompts and LLM chain configurations.\n", - "\n", - "#### [🪄 View Prompts In Action](https://wandb.ai/timssweeney/prompts-demo/)\n", - "\n", - "**In this notebook we will demostrate W&B Prompts:**\n", - "\n", - "- Using our 1-line LangChain integration\n", - "- Using our Trace class when building your own LLM Pipelines\n", - "\n", - "See here for the full [W&B Prompts documentation](https://docs.wandb.ai/guides/prompts)\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "A4wI3b_8GVmB" - }, - "source": [ - "## Installation" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": { - "id": "nDoIqQ8_GVmB" - }, - "outputs": [], - "source": [ - "!pip install \"wandb>=0.15.4\" -qqq\n", - "!pip install \"langchain>=0.0.218\" openai -qqq" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": { - "id": "PcGiSWBSGVmB" - }, - "outputs": [], - "source": [ - "import langchain\n", - "assert langchain.__version__ >= \"0.0.218\", \"Please ensure you are using LangChain v0.0.188 or higher\"" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "pbmQIsjJGVmB" - }, - "source": [ - "## Setup\n", - "\n", - "This demo requires that you have an [OpenAI key](https://platform.openai.com)" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": { - "id": "ZH4g2B0lGVmB", - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "22295db6-5369-474d-a8ea-fb45c4c92085" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Paste your OpenAI key from: https://platform.openai.com/account/api-keys\n", - "··········\n", - "OpenAI API key configured\n" - ] - } - ], - "source": [ - "import os\n", - "from getpass import getpass\n", - "\n", - "if os.getenv(\"OPENAI_API_KEY\") is None:\n", - " os.environ[\"OPENAI_API_KEY\"] = getpass(\"Paste your OpenAI key from: https://platform.openai.com/account/api-keys\\n\")\n", - "assert os.getenv(\"OPENAI_API_KEY\", \"\").startswith(\"sk-\"), \"This doesn't look like a valid OpenAI API key\"\n", - "print(\"OpenAI API key configured\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "79KOB2EhGVmB" - }, - "source": [ - "# W&B Prompts\n", - "\n", - "W&B Prompts consists of three main components:\n", - "\n", - "**Trace table**: Overview of the inputs and outputs of a chain.\n", - "\n", - "**Trace timeline**: Displays the execution flow of the chain and is color-coded according to component types.\n", - "\n", - "**Model architecture**: View details about the structure of the chain and the parameters used to initialize each component of the chain.\n", - "\n", - "After running this section, you will see a new panel automatically created in your workspace, showing each execution, the trace, and the model architecture" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "5kxmdm3zGVmC" - }, - "source": [ - "\"Weights" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "9u97K5vVGVmC" - }, - "source": [ - "## Maths with LangChain" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "oneRFmv6GVmC" - }, - "source": [ - "Set the `LANGCHAIN_WANDB_TRACING` environment variable as well as any other relevant [W&B environment variables](https://docs.wandb.ai/guides/track/environment-variables). This could includes a W&B project name, team name, and more. See [wandb.init](https://docs.wandb.ai/ref/python/init) for a full list of arguments." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": { - "id": "ACl-rMtAGVmC" - }, - "outputs": [], - "source": [ - "os.environ[\"LANGCHAIN_WANDB_TRACING\"] = \"true\"\n", - "os.environ[\"WANDB_PROJECT\"] = \"langchain-testing\"" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": { - "id": "csp3MXG4GVmC" - }, - "outputs": [], - "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.agents import load_tools, initialize_agent, AgentType" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "2hWU2GcAGVmC" - }, - "source": [ - "Create a standard math Agent using LangChain" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": { - "id": "l_JkVMlRGVmC" - }, - "outputs": [], - "source": [ - "llm = ChatOpenAI(temperature=0)\n", - "tools = load_tools([\"llm-math\"], llm=llm)\n", - "math_agent = initialize_agent(tools,\n", - " llm,\n", - " agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "9FFviwCPGVmC" - }, - "source": [ - "Use LangChain as normal by calling your Agent.\n", - "\n", - " You will see a Weights & Biases run start and you will be asked for your [Weights & Biases API key](wwww.wandb.ai/authorize). Once your enter your API key, the inputs and outputs of your Agent calls will start to be streamed to the Weights & Biases App." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": { - "id": "y-RHjVN4GVmC", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 178 - }, - "outputId": "5ccd5f32-6137-46c3-9abd-d458dbdbacca" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stderr", - "text": [ - "\u001b[34m\u001b[1mwandb\u001b[0m: Streaming LangChain activity to W&B at https://wandb.ai/carey/langchain-testing/runs/lcznj5lg\n", - "\u001b[34m\u001b[1mwandb\u001b[0m: `WandbTracer` is currently in beta.\n", - "\u001b[34m\u001b[1mwandb\u001b[0m: Please report any issues to https://github.com/wandb/wandb/issues with the tag `langchain`.\n" - ] - }, - { - "output_type": "stream", - "name": "stdout", - "text": [ - "LLMMathChain._evaluate(\"\n", - "import math\n", - "math.sqrt(5.4)\n", - "\") raised error: invalid syntax (, line 1). Please try again with a valid numerical expression\n", - "0.005720801417544866\n", - "0.15096209512635608\n" - ] - } - ], - "source": [ - "# some sample maths questions\n", - "questions = [\n", - " \"Find the square root of 5.4.\",\n", - " \"What is 3 divided by 7.34 raised to the power of pi?\",\n", - " \"What is the sin of 0.47 radians, divided by the cube root of 27?\"\n", - "]\n", - "\n", - "for question in questions:\n", - " try:\n", - " # call your Agent as normal\n", - " answer = math_agent.run(question)\n", - " print(answer)\n", - " except Exception as e:\n", - " # any errors will be also logged to Weights & Biases\n", - " print(e)\n", - " pass" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "SNYFSaUrGVmC" - }, - "source": [ - "Once each Agent execution completes, all calls in your LangChain object will be logged to Weights & Biases" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "m0bL1xpkGVmC" - }, - "source": [ - "### LangChain Context Manager\n", - "Depending on your use case, you might instead prefer to use a context manager to manage your logging to W&B.\n", - "\n", - "**✨ New: Custom columns** can be logged directly to W&B to display in the same Trace Table with this snippet:\n", - "```python\n", - "import wandb\n", - "wandb.log(custom_metrics_dict, commit=False})\n", - "```\n", - "Use `commit=False` to make sure that metadata is logged to the same row of the Trace Table as the LangChain output." - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": { - "id": "7i9Pj1NKGVmC", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 35 - }, - "outputId": "b44f3ae7-fd49-437f-af7b-fb8f82056bd0" - }, - "outputs": [ - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "'1.0891804557407723'" - ], - "application/vnd.google.colaboratory.intrinsic+json": { - "type": "string" - } - }, - "metadata": {}, - "execution_count": 10 - } - ], - "source": [ - "from langchain.callbacks import wandb_tracing_enabled\n", - "import wandb # To enable custom column logging with wandb.run.log()\n", - "\n", - "# unset the environment variable and use a context manager instead\n", - "if \"LANGCHAIN_WANDB_TRACING\" in os.environ:\n", - " del os.environ[\"LANGCHAIN_WANDB_TRACING\"]\n", - "\n", - "# enable tracing using a context manager\n", - "with wandb_tracing_enabled():\n", - " for i in range (10):\n", - " # Log any custom columns you'd like to add to the Trace Table\n", - " wandb.log({\"custom_column\": i}, commit=False)\n", - " try:\n", - " math_agent.run(f\"What is {i} raised to .123243 power?\") # this should be traced\n", - " except:\n", - " pass\n", - "\n", - "math_agent.run(\"What is 2 raised to .123243 power?\") # this should not be traced" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "JDLzoorhGVmC" - }, - "source": [ - "# Non-Lang Chain Implementation\n", - "\n", - "\n", - "A W&B Trace is created by logging 1 or more \"spans\". A root span is expected, which can accept nested child spans, which can in turn accept their own child spans. A Span represents a unit of work, Spans can have type `AGENT`, `TOOL`, `LLM` or `CHAIN`\n", - "\n", - "When logging with Trace, a single W&B run can have multiple calls to a LLM, Tool, Chain or Agent logged to it, there is no need to start a new W&B run after each generation from your model or pipeline, instead each call will be appended to the Trace Table.\n", - "\n", - "In this quickstart, we will how to log a single call to an OpenAI model to W&B Trace as a single span. Then we will show how to log a more complex series of nested spans.\n", - "\n", - "## Logging with W&B Trace\n", - "A high-level Trace api is available from the [`wandb-addon`](https://github.com/soumik12345/wandb-addons) community library from [@soumik12345](https://github.com/soumik12345). This will be replaced by a wandb-native integration shortly." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "FO3Kf2ngGVmC" - }, - "outputs": [], - "source": [ - "# Install wandb-addons\n", - "!git clone https://github.com/soumik12345/wandb-addons.git\n", - "!pip install ./wandb-addons[prompts] openai wandb -qqq" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "7z98yfoqGVmD" - }, - "source": [ - "Call wandb.init to start a W&B run. Here you can pass a W&B project name as well as an entity name (if logging to a W&B Team), as well as a config and more. See wandb.init for the full list of arguments.\n", - "\n", - "You will see a Weights & Biases run start and be asked for your [Weights & Biases API key](wwww.wandb.ai/authorize). Once your enter your API key, the inputs and outputs of your Agent calls will start to be streamed to the Weights & Biases App.\n", - "\n", - "**Note:** A W&B run supports logging as many traces you needed to a single run, i.e. you can make multiple calls of `run.log` without the need to create a new run each time" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "ZcvgzZ55GVmD" - }, - "outputs": [], - "source": [ - "import wandb\n", - "\n", - "# start a wandb run to log to\n", - "wandb.init(project=\"trace-example\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "4_3Wrg2YGVmD" - }, - "source": [ - "You can also set the entity argument in wandb.init if logging to a W&B Team.\n", - "\n", - "### Logging a single Span\n", - "Now we will query OpenAI times and log the results to a W&B Trace. We will log the inputs and outputs, start and end times, whether the OpenAI call was successful, the token usage, and additional metadata.\n", - "\n", - "You can see the full description of the arguments to the Trace class [here](https://soumik12345.github.io/wandb-addons/prompts/tracer/)." - ] + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "e-ZYaV5KGVmA" + }, + "source": [ + "\"Open\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "gJSVEAGWGVmA" + }, + "source": [ + "\"Weights\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9f7yMKLwGVmA" + }, + "source": [ + "**[Weights & Biases Prompts](https://docs.wandb.ai/guides/prompts?utm_source=code&utm_medium=colab&utm_campaign=prompts)** is a suite of LLMOps tools built for the development of LLM-powered applications.\n", + "\n", + "Use W&B Prompts to visualize and inspect the execution flow of your LLMs, analyze the inputs and outputs of your LLMs, view the intermediate results and securely store and manage your prompts and LLM chain configurations.\n", + "\n", + "#### [🪄 View Prompts In Action](https://wandb.ai/timssweeney/prompts-demo/)\n", + "\n", + "**In this notebook we will demostrate W&B Prompts:**\n", + "\n", + "- Using our 1-line LangChain integration\n", + "- Using our Trace class when building your own LLM Pipelines\n", + "\n", + "See here for the full [W&B Prompts documentation](https://docs.wandb.ai/guides/prompts)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "A4wI3b_8GVmB" + }, + "source": [ + "## Installation" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "id": "nDoIqQ8_GVmB" + }, + "outputs": [], + "source": [ + "!pip install \"wandb>=0.15.4\" -qqq\n", + "!pip install \"langchain>=0.0.218\" openai -qqq" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "id": "PcGiSWBSGVmB" + }, + "outputs": [], + "source": [ + "import langchain\n", + "assert langchain.__version__ >= \"0.0.218\", \"Please ensure you are using LangChain v0.0.188 or higher\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "pbmQIsjJGVmB" + }, + "source": [ + "## Setup\n", + "\n", + "This demo requires that you have an [OpenAI key](https://platform.openai.com)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "ZH4g2B0lGVmB", + "outputId": "22295db6-5369-474d-a8ea-fb45c4c92085" + }, + "outputs": [ { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "q2pkMhpMGVmD" - }, - "outputs": [], - "source": [ - "import openai\n", - "import datetime\n", - "from wandb_addons.prompts import Trace\n", - "\n", - "openai.api_key = os.environ[\"OPENAI_API_KEY\"]\n", - "\n", - "# define your conifg\n", - "model_name = \"gpt-3.5-turbo\"\n", - "temperature = 0.7\n", - "system_message = \"You are a helpful assistant that always replies in 3 concise bullet points using markdown.\"\n", - "\n", - "queries_ls = [\n", - " \"What is the capital of France?\",\n", - " \"How do I boil an egg?\" * 10000, # deliberately trigger an openai error\n", - " \"What to do if the aliens arrive?\"\n", - "]\n", - "\n", - "for query in queries_ls:\n", - " messages=[\n", - " {\"role\": \"system\", \"content\": system_message},\n", - " {\"role\": \"user\", \"content\": query}\n", - " ]\n", - "\n", - " start_time_ms = datetime.datetime.now().timestamp() * 1000\n", - " try:\n", - " response = openai.ChatCompletion.create(model=model_name,\n", - " messages=messages,\n", - " temperature=temperature\n", - " )\n", - "\n", - " end_time_ms = round(datetime.datetime.now().timestamp() * 1000) # logged in milliseconds\n", - " status=\"success\"\n", - " status_message=None,\n", - " response_text = response[\"choices\"][0][\"message\"][\"content\"]\n", - " token_usage = response[\"usage\"].to_dict()\n", - "\n", - "\n", - " except Exception as e:\n", - " end_time_ms = round(datetime.datetime.now().timestamp() * 1000) # logged in milliseconds\n", - " status=\"error\"\n", - " status_message=str(e)\n", - " response_text = \"\"\n", - " token_usage = {}\n", - "\n", - " # create a span in wandb\n", - " root_span = Trace(\n", - " name=\"root_span\",\n", - " kind=\"llm\", # kind can be \"llm\", \"chain\", \"agent\" or \"tool\"\n", - " status_code=status,\n", - " status_message=status_message,\n", - " metadata={\"temperature\": temperature,\n", - " \"token_usage\": token_usage,\n", - " \"model_name\": model_name},\n", - " start_time_ms=start_time_ms,\n", - " end_time_ms=end_time_ms,\n", - " inputs={\"system_prompt\": system_message, \"query\": query},\n", - " outputs={\"response\": response_text},\n", - " )\n", - "\n", - " # log the span to wandb\n", - " root_span.log(name=\"openai_trace\")" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "Paste your OpenAI key from: https://platform.openai.com/account/api-keys\n", + "··········\n", + "OpenAI API key configured\n" + ] + } + ], + "source": [ + "import os\n", + "from getpass import getpass\n", + "\n", + "if os.getenv(\"OPENAI_API_KEY\") is None:\n", + " os.environ[\"OPENAI_API_KEY\"] = getpass(\"Paste your OpenAI key from: https://platform.openai.com/account/api-keys\\n\")\n", + "assert os.getenv(\"OPENAI_API_KEY\", \"\").startswith(\"sk-\"), \"This doesn't look like a valid OpenAI API key\"\n", + "print(\"OpenAI API key configured\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "79KOB2EhGVmB" + }, + "source": [ + "# W&B Prompts\n", + "\n", + "W&B Prompts consists of three main components:\n", + "\n", + "**Trace table**: Overview of the inputs and outputs of a chain.\n", + "\n", + "**Trace timeline**: Displays the execution flow of the chain and is color-coded according to component types.\n", + "\n", + "**Model architecture**: View details about the structure of the chain and the parameters used to initialize each component of the chain.\n", + "\n", + "After running this section, you will see a new panel automatically created in your workspace, showing each execution, the trace, and the model architecture" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5kxmdm3zGVmC" + }, + "source": [ + "\"Weights" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9u97K5vVGVmC" + }, + "source": [ + "## Maths with LangChain" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "oneRFmv6GVmC" + }, + "source": [ + "Set the `LANGCHAIN_WANDB_TRACING` environment variable as well as any other relevant [W&B environment variables](https://docs.wandb.ai/guides/track/environment-variables). This could includes a W&B project name, team name, and more. See [wandb.init](https://docs.wandb.ai/ref/python/init) for a full list of arguments." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "id": "ACl-rMtAGVmC" + }, + "outputs": [], + "source": [ + "os.environ[\"LANGCHAIN_WANDB_TRACING\"] = \"true\"\n", + "os.environ[\"WANDB_PROJECT\"] = \"langchain-testing\"" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "id": "csp3MXG4GVmC" + }, + "outputs": [], + "source": [ + "from langchain.chat_models import ChatOpenAI\n", + "from langchain.agents import load_tools, initialize_agent, AgentType" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2hWU2GcAGVmC" + }, + "source": [ + "Create a standard math Agent using LangChain" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "id": "l_JkVMlRGVmC" + }, + "outputs": [], + "source": [ + "llm = ChatOpenAI(temperature=0)\n", + "tools = load_tools([\"llm-math\"], llm=llm)\n", + "math_agent = initialize_agent(tools,\n", + " llm,\n", + " agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9FFviwCPGVmC" + }, + "source": [ + "Use LangChain as normal by calling your Agent.\n", + "\n", + " You will see a Weights & Biases run start and you will be asked for your [Weights & Biases API key](wwww.wandb.ai/authorize). Once your enter your API key, the inputs and outputs of your Agent calls will start to be streamed to the Weights & Biases App." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 178 }, + "id": "y-RHjVN4GVmC", + "outputId": "5ccd5f32-6137-46c3-9abd-d458dbdbacca" + }, + "outputs": [ { - "cell_type": "markdown", - "metadata": { - "id": "XFcwFgaDGVmD" - }, - "source": [ - "### Logging a LLM pipeline using nested Spans\n", - "\n", - "In this example we will simulate an Agent being called, which then calls a LLM Chain, which calls an OpenAI LLM and then the Agent \"calls\" a Calculator tool.\n", - "\n", - "The inputs, outputs and metadata for each step in the execution of our \"Agent\" is logged in its own span. Spans can have child" - ] + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mwandb\u001b[0m: Streaming LangChain activity to W&B at https://wandb.ai/carey/langchain-testing/runs/lcznj5lg\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: `WandbTracer` is currently in beta.\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: Please report any issues to https://github.com/wandb/wandb/issues with the tag `langchain`.\n" + ] }, { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "ACMaGuYUGVmD" - }, - "outputs": [], - "source": [ - "import time\n", - "\n", - "openai.api_key = os.environ[\"OPENAI_API_KEY\"]\n", - "\n", - "# The query our agent has to answer\n", - "query = \"How many days until the next US election?\"\n", - "\n", - "# part 1 - an Agent is started...\n", - "start_time_ms = round(datetime.datetime.now().timestamp() * 1000)\n", - "\n", - "root_span = Trace(\n", - " name=\"MyAgent\",\n", - " kind=\"agent\",\n", - " start_time_ms=start_time_ms,\n", - " metadata={\"user\": \"optimus_12\"})\n", - "\n", - "\n", - "# part 2 - The Agent calls into a LLMChain..\n", - "chain_span = Trace(\n", - " name=\"LLMChain\",\n", - " kind=\"chain\",\n", - " start_time_ms=start_time_ms)\n", - "\n", - "# add the Chain span as a child of the root\n", - "root_span.add_child(chain_span)\n", - "\n", - "\n", - "# part 3 - the LLMChain calls an OpenAI LLM...\n", - "messages=[\n", - " {\"role\": \"system\", \"content\": system_message},\n", - " {\"role\": \"user\", \"content\": query}\n", - "]\n", - "\n", - "response = openai.ChatCompletion.create(model=model_name,\n", - " messages=messages,\n", - " temperature=temperature)\n", - "\n", - "llm_end_time_ms = round(datetime.datetime.now().timestamp() * 1000)\n", - "response_text = response[\"choices\"][0][\"message\"][\"content\"]\n", - "token_usage = response[\"usage\"].to_dict()\n", - "\n", - "llm_span = Trace(\n", - " name=\"OpenAI\",\n", - " kind=\"llm\",\n", - " status_code=\"success\",\n", - " metadata={\"temperature\":temperature,\n", - " \"token_usage\": token_usage,\n", - " \"model_name\":model_name},\n", - " start_time_ms=start_time_ms,\n", - " end_time_ms=llm_end_time_ms,\n", - " inputs={\"system_prompt\":system_message, \"query\":query},\n", - " outputs={\"response\": response_text},\n", - " )\n", - "\n", - "# add the LLM span as a child of the Chain span...\n", - "chain_span.add_child(llm_span)\n", - "\n", - "# update the end time of the Chain span\n", - "chain_span.add_inputs_and_outputs(\n", - " inputs={\"query\":query},\n", - " outputs={\"response\": response_text})\n", - "\n", - "# update the Chain span's end time\n", - "chain_span._span.end_time_ms = llm_end_time_ms\n", - "\n", - "\n", - "# part 4 - the Agent then calls a Tool...\n", - "time.sleep(3)\n", - "days_to_election = 117\n", - "tool_end_time_ms = round(datetime.datetime.now().timestamp() * 1000)\n", - "\n", - "# create a Tool span\n", - "tool_span = Trace(\n", - " name=\"Calculator\",\n", - " kind=\"tool\",\n", - " status_code=\"success\",\n", - " start_time_ms=llm_end_time_ms,\n", - " end_time_ms=tool_end_time_ms,\n", - " inputs={\"input\": response_text},\n", - " outputs={\"result\": days_to_election})\n", - "\n", - "# add the TOOL span as a child of the root\n", - "root_span.add_child(tool_span)\n", - "\n", - "\n", - "# part 5 - the final results from the tool are added\n", - "root_span.add_inputs_and_outputs(inputs={\"query\": query},\n", - " outputs={\"result\": days_to_election})\n", - "root_span._span.end_time_ms = tool_end_time_ms\n", - "\n", - "\n", - "# part 6 - log all spans to W&B by logging the root span\n", - "root_span.log(name=\"openai_trace\")" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "LLMMathChain._evaluate(\"\n", + "import math\n", + "math.sqrt(5.4)\n", + "\") raised error: invalid syntax (, line 1). Please try again with a valid numerical expression\n", + "0.005720801417544866\n", + "0.15096209512635608\n" + ] + } + ], + "source": [ + "# some sample maths questions\n", + "questions = [\n", + " \"Find the square root of 5.4.\",\n", + " \"What is 3 divided by 7.34 raised to the power of pi?\",\n", + " \"What is the sin of 0.47 radians, divided by the cube root of 27?\"\n", + "]\n", + "\n", + "for question in questions:\n", + " try:\n", + " # call your Agent as normal\n", + " answer = math_agent.run(question)\n", + " print(answer)\n", + " except Exception as e:\n", + " # any errors will be also logged to Weights & Biases\n", + " print(e)\n", + " pass" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SNYFSaUrGVmC" + }, + "source": [ + "Once each Agent execution completes, all calls in your LangChain object will be logged to Weights & Biases" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "m0bL1xpkGVmC" + }, + "source": [ + "### LangChain Context Manager\n", + "Depending on your use case, you might instead prefer to use a context manager to manage your logging to W&B.\n", + "\n", + "**✨ New: Custom columns** can be logged directly to W&B to display in the same Trace Table with this snippet:\n", + "```python\n", + "import wandb\n", + "wandb.log(custom_metrics_dict, commit=False})\n", + "```\n", + "Use `commit=False` to make sure that metadata is logged to the same row of the Trace Table as the LangChain output." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 35 }, + "id": "7i9Pj1NKGVmC", + "outputId": "b44f3ae7-fd49-437f-af7b-fb8f82056bd0" + }, + "outputs": [ { - "cell_type": "markdown", - "metadata": { - "id": "nBFVwawPGVmD" + "data": { + "application/vnd.google.colaboratory.intrinsic+json": { + "type": "string" }, - "source": [ - "Once each Agent execution completes, all calls in your LangChain object will be logged to Weights & Biases" + "text/plain": [ + "'1.0891804557407723'" ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "provenance": [], - "include_colab_link": true - }, - "kernelspec": { - "display_name": "Python 3", - "name": "python3" - } + ], + "source": [ + "from langchain.callbacks import wandb_tracing_enabled\n", + "import wandb # To enable custom column logging with wandb.run.log()\n", + "\n", + "# unset the environment variable and use a context manager instead\n", + "if \"LANGCHAIN_WANDB_TRACING\" in os.environ:\n", + " del os.environ[\"LANGCHAIN_WANDB_TRACING\"]\n", + "\n", + "# enable tracing using a context manager\n", + "with wandb_tracing_enabled():\n", + " for i in range (10):\n", + " # Log any custom columns you'd like to add to the Trace Table\n", + " wandb.log({\"custom_column\": i}, commit=False)\n", + " try:\n", + " math_agent.run(f\"What is {i} raised to .123243 power?\") # this should be traced\n", + " except:\n", + " pass\n", + "\n", + "math_agent.run(\"What is 2 raised to .123243 power?\") # this should not be traced" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "JDLzoorhGVmC" + }, + "source": [ + "# Non-Lang Chain Implementation\n", + "\n", + "\n", + "A W&B Trace is created by logging 1 or more \"spans\". A root span is expected, which can accept nested child spans, which can in turn accept their own child spans. A Span represents a unit of work, Spans can have type `AGENT`, `TOOL`, `LLM` or `CHAIN`\n", + "\n", + "When logging with Trace, a single W&B run can have multiple calls to a LLM, Tool, Chain or Agent logged to it, there is no need to start a new W&B run after each generation from your model or pipeline, instead each call will be appended to the Trace Table.\n", + "\n", + "In this quickstart, we will how to log a single call to an OpenAI model to W&B Trace as a single span. Then we will show how to log a more complex series of nested spans.\n", + "\n", + "## Logging with W&B Trace\n", + "A high-level Trace api is available from the [`wandb-addon`](https://github.com/soumik12345/wandb-addons) community library from [@soumik12345](https://github.com/soumik12345). This will be replaced by a wandb-native integration shortly." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "FO3Kf2ngGVmC" + }, + "outputs": [], + "source": [ + "# Install wandb-addons\n", + "!git clone https://github.com/soumik12345/wandb-addons.git\n", + "!pip install ./wandb-addons[prompts] openai wandb -qqq" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7z98yfoqGVmD" + }, + "source": [ + "Call wandb.init to start a W&B run. Here you can pass a W&B project name as well as an entity name (if logging to a W&B Team), as well as a config and more. See wandb.init for the full list of arguments.\n", + "\n", + "You will see a Weights & Biases run start and be asked for your [Weights & Biases API key](wwww.wandb.ai/authorize). Once your enter your API key, the inputs and outputs of your Agent calls will start to be streamed to the Weights & Biases App.\n", + "\n", + "**Note:** A W&B run supports logging as many traces you needed to a single run, i.e. you can make multiple calls of `run.log` without the need to create a new run each time" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ZcvgzZ55GVmD" + }, + "outputs": [], + "source": [ + "import wandb\n", + "\n", + "# start a wandb run to log to\n", + "wandb.init(project=\"trace-example\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4_3Wrg2YGVmD" + }, + "source": [ + "You can also set the entity argument in wandb.init if logging to a W&B Team.\n", + "\n", + "### Logging a single Span\n", + "Now we will query OpenAI times and log the results to a W&B Trace. We will log the inputs and outputs, start and end times, whether the OpenAI call was successful, the token usage, and additional metadata.\n", + "\n", + "You can see the full description of the arguments to the Trace class [here](https://soumik12345.github.io/wandb-addons/prompts/tracer/)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "q2pkMhpMGVmD" + }, + "outputs": [], + "source": [ + "import openai\n", + "import datetime\n", + "from wandb_addons.prompts import Trace\n", + "\n", + "openai.api_key = os.environ[\"OPENAI_API_KEY\"]\n", + "\n", + "# define your conifg\n", + "model_name = \"gpt-3.5-turbo\"\n", + "temperature = 0.7\n", + "system_message = \"You are a helpful assistant that always replies in 3 concise bullet points using markdown.\"\n", + "\n", + "queries_ls = [\n", + " \"What is the capital of France?\",\n", + " \"How do I boil an egg?\" * 10000, # deliberately trigger an openai error\n", + " \"What to do if the aliens arrive?\"\n", + "]\n", + "\n", + "for query in queries_ls:\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_message},\n", + " {\"role\": \"user\", \"content\": query}\n", + " ]\n", + "\n", + " start_time_ms = datetime.datetime.now().timestamp() * 1000\n", + " try:\n", + " response = openai.ChatCompletion.create(model=model_name,\n", + " messages=messages,\n", + " temperature=temperature\n", + " )\n", + "\n", + " end_time_ms = round(datetime.datetime.now().timestamp() * 1000) # logged in milliseconds\n", + " status=\"success\"\n", + " status_message=None,\n", + " response_text = response[\"choices\"][0][\"message\"][\"content\"]\n", + " token_usage = response[\"usage\"].to_dict()\n", + "\n", + "\n", + " except Exception as e:\n", + " end_time_ms = round(datetime.datetime.now().timestamp() * 1000) # logged in milliseconds\n", + " status=\"error\"\n", + " status_message=str(e)\n", + " response_text = \"\"\n", + " token_usage = {}\n", + "\n", + " # create a span in wandb\n", + " root_span = Trace(\n", + " name=\"root_span\",\n", + " kind=\"llm\", # kind can be \"llm\", \"chain\", \"agent\" or \"tool\"\n", + " status_code=status,\n", + " status_message=status_message,\n", + " metadata={\"temperature\": temperature,\n", + " \"token_usage\": token_usage,\n", + " \"model_name\": model_name},\n", + " start_time_ms=start_time_ms,\n", + " end_time_ms=end_time_ms,\n", + " inputs={\"system_prompt\": system_message, \"query\": query},\n", + " outputs={\"response\": response_text},\n", + " )\n", + "\n", + " # log the span to wandb\n", + " root_span.log(name=\"openai_trace\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "XFcwFgaDGVmD" + }, + "source": [ + "### Logging a LLM pipeline using nested Spans\n", + "\n", + "In this example we will simulate an Agent being called, which then calls a LLM Chain, which calls an OpenAI LLM and then the Agent \"calls\" a Calculator tool.\n", + "\n", + "The inputs, outputs and metadata for each step in the execution of our \"Agent\" is logged in its own span. Spans can have child" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ACMaGuYUGVmD" + }, + "outputs": [], + "source": [ + "import time\n", + "\n", + "openai.api_key = os.environ[\"OPENAI_API_KEY\"]\n", + "\n", + "# The query our agent has to answer\n", + "query = \"How many days until the next US election?\"\n", + "\n", + "# part 1 - an Agent is started...\n", + "start_time_ms = round(datetime.datetime.now().timestamp() * 1000)\n", + "\n", + "root_span = Trace(\n", + " name=\"MyAgent\",\n", + " kind=\"agent\",\n", + " start_time_ms=start_time_ms,\n", + " metadata={\"user\": \"optimus_12\"})\n", + "\n", + "\n", + "# part 2 - The Agent calls into a LLMChain..\n", + "chain_span = Trace(\n", + " name=\"LLMChain\",\n", + " kind=\"chain\",\n", + " start_time_ms=start_time_ms)\n", + "\n", + "# add the Chain span as a child of the root\n", + "root_span.add_child(chain_span)\n", + "\n", + "\n", + "# part 3 - the LLMChain calls an OpenAI LLM...\n", + "messages=[\n", + " {\"role\": \"system\", \"content\": system_message},\n", + " {\"role\": \"user\", \"content\": query}\n", + "]\n", + "\n", + "response = openai.ChatCompletion.create(model=model_name,\n", + " messages=messages,\n", + " temperature=temperature)\n", + "\n", + "llm_end_time_ms = round(datetime.datetime.now().timestamp() * 1000)\n", + "response_text = response[\"choices\"][0][\"message\"][\"content\"]\n", + "token_usage = response[\"usage\"].to_dict()\n", + "\n", + "llm_span = Trace(\n", + " name=\"OpenAI\",\n", + " kind=\"llm\",\n", + " status_code=\"success\",\n", + " metadata={\"temperature\":temperature,\n", + " \"token_usage\": token_usage,\n", + " \"model_name\":model_name},\n", + " start_time_ms=start_time_ms,\n", + " end_time_ms=llm_end_time_ms,\n", + " inputs={\"system_prompt\":system_message, \"query\":query},\n", + " outputs={\"response\": response_text},\n", + " )\n", + "\n", + "# add the LLM span as a child of the Chain span...\n", + "chain_span.add_child(llm_span)\n", + "\n", + "# update the end time of the Chain span\n", + "chain_span.add_inputs_and_outputs(\n", + " inputs={\"query\":query},\n", + " outputs={\"response\": response_text})\n", + "\n", + "# update the Chain span's end time\n", + "chain_span._span.end_time_ms = llm_end_time_ms\n", + "\n", + "\n", + "# part 4 - the Agent then calls a Tool...\n", + "time.sleep(3)\n", + "days_to_election = 117\n", + "tool_end_time_ms = round(datetime.datetime.now().timestamp() * 1000)\n", + "\n", + "# create a Tool span\n", + "tool_span = Trace(\n", + " name=\"Calculator\",\n", + " kind=\"tool\",\n", + " status_code=\"success\",\n", + " start_time_ms=llm_end_time_ms,\n", + " end_time_ms=tool_end_time_ms,\n", + " inputs={\"input\": response_text},\n", + " outputs={\"result\": days_to_election})\n", + "\n", + "# add the TOOL span as a child of the root\n", + "root_span.add_child(tool_span)\n", + "\n", + "\n", + "# part 5 - the final results from the tool are added\n", + "root_span.add_inputs_and_outputs(inputs={\"query\": query},\n", + " outputs={\"result\": days_to_election})\n", + "root_span._span.end_time_ms = tool_end_time_ms\n", + "\n", + "\n", + "# part 6 - log all spans to W&B by logging the root span\n", + "root_span.log(name=\"openai_trace\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nBFVwawPGVmD" + }, + "source": [ + "Once each Agent execution completes, all calls in your LangChain object will be logged to Weights & Biases" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "include_colab_link": true, + "provenance": [] }, - "nbformat": 4, - "nbformat_minor": 0 + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } diff --git a/colabs/audiocraft/AudioCraft.ipynb b/colabs/audiocraft/AudioCraft.ipynb index e18c8832..9bb30020 100644 --- a/colabs/audiocraft/AudioCraft.ipynb +++ b/colabs/audiocraft/AudioCraft.ipynb @@ -1,431 +1,431 @@ { - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "id": "W-26KlXuiXul" - }, - "source": [ - "\"Weights\n", - "\n", - "\n", - "# 🎸 Generating Music using [Audiocraft](https://github.com/facebookresearch/audiocraft) and W&B 🐝\n", - "\n", - "\n", - "\n", - "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/wandb/examples/blob/master/colabs/audiocraft/AudioCraft.ipynb)\n", - "\n", - "In this notebook we demonstrate how you can generate music and other types of audio from text prompts or generate new music from existing music using SoTA models such as [MusicGen](https://github.com/facebookresearch/audiocraft/blob/main/docs/MUSICGEN.md) and [AudioGen](https://github.com/facebookresearch/audiocraft/blob/main/docs/AUDIOGEN.md) from [Audiocraft](https://github.com/facebookresearch/audiocraft) and play and visualize them using [Weights & Biases](https://wandb.ai/site).\n", - "\n", - "If you want to know more about the underlying architectures for MusicGen and AudioGen and explore some cool audio samples generated by these models, you can check out [this W&B report](http://wandb.me/audiocraft_2mp)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "cellView": "form", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 1000 - }, - "id": "EZU3hg4B1om6", - "outputId": "3311a45d-35c3-49e8-cbd5-4618386fa2a1" - }, - "outputs": [], - "source": [ - "# @title Install AudioCraft + WandB\n", - "!pip install -U git+https://git@github.com/facebookresearch/audiocraft#egg=audiocraft\n", - "!pip install -qq -U wandb" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "cellView": "form", - "id": "RerQaiZt14r8" - }, - "outputs": [], - "source": [ - "# @title\n", - "import os\n", - "import random\n", - "from tempfile import TemporaryDirectory\n", - "\n", - "from scipy import signal\n", - "from scipy.io import wavfile\n", - "\n", - "import torchaudio\n", - "from audiocraft.models import AudioGen, MusicGen, MultiBandDiffusion\n", - "from audiocraft.data.audio import audio_write\n", - "\n", - "import wandb\n", - "import numpy as np\n", - "from tqdm.auto import tqdm\n", - "from google.colab import files\n", - "import matplotlib.pyplot as plt" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "cellView": "form", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 162 - }, - "id": "3MTX8GoE7AzN", - "outputId": "ebe733d0-3a21-47e5-d217-89a622cafc62" - }, - "outputs": [], - "source": [ - "# @title ## Audio Generation Configs\n", - "\n", - "# @markdown In this section, you can interact with the user interface to chose the models you want to use to generate audio, prompts and other configs. Once you execute this cell, it initializes a [wandb run](https://docs.wandb.ai/guides/runs) which will be used to automatically log all the generated audio along with all the prompts and configs, to ensure your AI-generated music is never lost and your experiments are always reproducible and easy to share. \n", - "\n", - "# @markdown **Note:** If you have provided prompts, you will be prompted to provide an audio file in addition to the prompts to condition the model. If you don't want to provide a file as an additional condition to the model, just press on the `cancel` button.\n", - "\n", - "# @markdown ---\n", - "# @markdown WandB Project Name\n", - "project_name = \"audiocraft\" # @param {type:\"string\"}\n", - "\n", - "wandb.init(project=project_name, job_type=\"musicgen/inference\")\n", - "\n", - "config = wandb.config\n", - "\n", - "# @markdown Select the Model for audio generation supported by [AudioCraft](https://github.com/facebookresearch/audiocraft). You can select either the MusicGen model variants (great for generating music) or the AudioGen model variants (great for generating non-musical audio). Also note that you can run all variants of MusicGen except the `large` one on the free-tier Colab GPU.\n", - "model_name = \"musicgen-small\" # @param [\"musicgen-small\", \"musicgen-medium\", \"musicgen-large\", \"musicgen-melody\", \"audiogen-medium\"]\n", - "config.model_name = \"facebook/\" + model_name if model_name == \"audiogen-medium\" else model_name\n", - "\n", - "# @markdown Whether to enable [MultiBand Diffusion](https://github.com/facebookresearch/audiocraft/blob/main/docs/MBD.md) or not. MultiBand diffusion is a collection of 4 models that can decode tokens from EnCodec tokenizer into waveform audio. Note that enabling this increases the time required to generate the audio.\n", - "enable_multi_band_diffusion = True # @param {type:\"boolean\"}\n", - "# config.enable_multi_band_diffusion = enable_multi_band_diffusion\n", - "\n", - "if \"musicgen\" not in model_name:\n", - " wandb.termwarn(\"Multi-band Diffusion is only available for Musicgen\")\n", - " config.enable_multi_band_diffusion = False\n", - "else:\n", - " config.enable_multi_band_diffusion = enable_multi_band_diffusion\n", - "\n", - "# @markdown ---\n", - "# @markdown ## Conditional Generation Configs\n", - "\n", - "# @markdown The prompt for generating audio. You can give multiple prompts separated by `|` in the input. You can also leave it blank for unconditional generation.\n", - "config.prompts = \"happy rock | energetic EDM | sad jazz\" # @param {type:\"string\"}\n", - "\n", - "descriptions = [prompt.strip() for prompt in config.prompts.split(\"|\")]\n", - "config.is_unconditional = config.prompts.strip() == \"\"\n", - "\n", - "input_audio, input_sampling_rate, wandb_input_audio = None, None, None\n", - "if not config.is_unconditional:\n", - " input_audio_file = files.upload()\n", - " if input_audio_file != {}:\n", - " if config.model_name == \"facebook/audiogen-medium\":\n", - " error = f\"{config.model_name} does not support audio-based conditioning\"\n", - " raise ValueError(error)\n", - " wandb_input_audio = wandb.Audio(list(input_audio_file.keys())[0])\n", - " input_audio, input_sampling_rate = torchaudio.load(\n", - " list(input_audio_file.keys())[0]\n", - " )\n", - " config.input_audio_available = True\n", - " else:\n", - " config.input_audio_available = False\n", - "else:\n", - " if config.model_name == \"facebook/audiogen-medium\":\n", - " error = f\"{config.model_name} does not support unconditional generration\"\n", - " raise ValueError(error)\n", - "\n", - "# @markdown Number of audio samples generated, this is relevant only for unconditional generation, i.e, if `config.prompts` is left blank.\n", - "config.num_samples = 4 # @param {type:\"slider\", min:1, max:10, step:1}\n", - "\n", - "# @markdown Specify the random seed\n", - "seed = None # @param {type:\"raw\"}\n", - "\n", - "max_seed = int(1024 * 1024 * 1024)\n", - "if not isinstance(seed, int):\n", - " seed = random.randint(1, max_seed)\n", - "if seed < 0:\n", - " seed = - seed\n", - "seed = seed % max_seed\n", - "config.seed = seed\n", - "\n", - "# @markdown ---\n", - "# @markdown ## Generation Parameters\n", - "# @markdown Use sampling if True, else do argmax decoding\n", - "config.use_sampling = True # @param {type:\"boolean\"}\n", - "\n", - "# @markdown `top_k` used for sampling; limits us to `k` number of of the top tokens to consider.\n", - "config.top_k = 250 # @param {type:\"slider\", min:0, max:1000, step:1}\n", - "\n", - "# @markdown `top_p` used for sampling; limits us to the top tokens within a probability mass `p`\n", - "config.top_p = 0.0 # @param {type:\"slider\", min:0, max:1.0, step:0.01}\n", - "\n", - "# @markdown Softmax temperature parameter\n", - "config.temperature = 1.0 # @param {type:\"slider\", min:0, max:1.0, step:0.01}\n", - "\n", - "# @markdown Duration of the generated waveform\n", - "config.duration = 10 # @param {type:\"slider\", min:1, max:30, step:1}\n", - "\n", - "# @markdown Coefficient used for classifier free guidance\n", - "config.cfg_coef = 3 # @param {type:\"slider\", min:1, max:100, step:1}\n", - "\n", - "# @markdown Whether to perform 2 forward for Classifier Free Guidance instead of batching together the two. This has some impact on how things are padded but seems to have little impact in practice.\n", - "config.two_step_cfg = False # @param {type:\"boolean\"}\n", - "\n", - "# @markdown When doing extended generation (i.e. more than 30 seconds), by how much should we extend the audio each time. Larger values will mean less context is preserved, and shorter value will require extra computations.\n", - "config.extend_stride = 0 # @param {type:\"slider\", min:0, max:30, step:1}" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "cellView": "form", - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "SfM8rhVX7ES9", - "outputId": "a935173b-382a-4514-97fc-eec12e188379" - }, - "outputs": [], - "source": [ - "# @title Generate Audio using MusicGen\n", - "\n", - "# @markdown In this section, the audio is generated using the configs, specified in the aforementioned section. If you wish to peek behind the curtain and checkout the code, click on the `Show Code` button. In order to know about the different APIs for audio generation, visit the [official audiocraft documentations](https://facebookresearch.github.io/audiocraft/api_docs/audiocraft/index.html).\n", - "\n", - "model = None\n", - "if config.model_name == \"facebook/audiogen-medium\":\n", - " model = AudioGen.get_pretrained(config.model_name)\n", - "elif \"musicgen\" in config.model_name:\n", - " model = MusicGen.get_pretrained(config.model_name.split(\"-\")[-1])\n", - "\n", - "multi_band_diffusion = None\n", - "if config.enable_multi_band_diffusion:\n", - " multi_band_diffusion = MultiBandDiffusion.get_mbd_musicgen()\n", - "\n", - "model.set_generation_params(\n", - " use_sampling=config.use_sampling,\n", - " top_k=config.top_k,\n", - " top_p=config.top_p,\n", - " temperature=config.temperature,\n", - " duration=config.duration,\n", - " cfg_coef=config.cfg_coef,\n", - " two_step_cfg=config.two_step_cfg,\n", - " extend_stride=config.extend_stride\n", - ")\n", - "\n", - "generated_wav, tokens = None, None\n", - "if config.is_unconditional:\n", - " if input_audio is None:\n", - " if \"musicgen\" in config.model_name:\n", - " generated_wav, tokens = model.generate_unconditional(\n", - " num_samples=config.num_samples,\n", - " progress=True,\n", - " return_tokens=True\n", - " )\n", - " else:\n", - " generated_wav = model.generate_unconditional(\n", - " num_samples=config.num_samples,\n", - " progress=True,\n", - " )\n", - " else:\n", - " if \"musicgen\" in config.model_name:\n", - " generated_wav, tokens = model.generate_with_chroma(\n", - " descriptions,\n", - " input_audio[None].expand(3, -1, -1),\n", - " input_sampling_rate,\n", - " return_tokens=True\n", - " )\n", - " else:\n", - " generated_wav = model.generate_with_chroma(\n", - " descriptions,\n", - " input_audio[None].expand(3, -1, -1),\n", - " input_sampling_rate,\n", - " )\n", - "else:\n", - " if \"musicgen\" in config.model_name:\n", - " generated_wav, tokens = model.generate(\n", - " descriptions,\n", - " progress=True,\n", - " return_tokens=True\n", - " )\n", - " else:\n", - " generated_wav = model.generate(\n", - " descriptions,\n", - " progress=True,\n", - " )\n", - "\n", - "generated_wav_diffusion = None\n", - "if config.enable_multi_band_diffusion:\n", - " generated_wav_diffusion = multi_band_diffusion.tokens_to_wav(tokens)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "cellView": "form", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 104 - }, - "id": "_n-1RthFVPYN", - "outputId": "ac7bbc70-8114-4ef7-ef64-ecae9ba898cf" - }, - "outputs": [], - "source": [ - "# @title Log Audio to Weights & Biases Dashboard\n", - "\n", - "# @markdown In this section, we log the generated audio to Weights & Biases where you can listen and visualize them using an interactive audio player and waveform visualizer. Also, shoutout to [Atanu Sarkar](https://github.com/mratanusarkar) for building the spectrogram viusalization function which lets you visualize the spectrogram of the generated audio inside a [`wandb.Table`](https://docs.wandb.ai/guides/tables/tables-walkthrough).\n", - "\n", - "def get_spectrogram(audio_file, output_file):\n", - " sample_rate, samples = wavfile.read(audio_file)\n", - " frequencies, times, Sxx = signal.spectrogram(samples, sample_rate)\n", - "\n", - " log_Sxx = 10 * np.log10(Sxx + 1e-10)\n", - " vmin = np.percentile(log_Sxx, 5)\n", - " vmax = np.percentile(log_Sxx, 95)\n", - "\n", - " mean_spectrum = np.mean(log_Sxx, axis=1)\n", - " threshold_low = np.percentile(mean_spectrum, 5)\n", - " threshold_high = np.percentile(mean_spectrum, 95)\n", - "\n", - " freq_indices = np.where(mean_spectrum > threshold_low)\n", - " freq_min = 20\n", - " freq_max = frequencies[freq_indices].max()\n", - "\n", - " fig, ax = plt.subplots()\n", - " cmap = plt.get_cmap('magma')\n", - "\n", - " ax.pcolormesh(\n", - " times,\n", - " frequencies,\n", - " log_Sxx,\n", - " shading='gouraud',\n", - " cmap=cmap,\n", - " vmin=vmin,\n", - " vmax=vmax\n", - " )\n", - " ax.axis('off')\n", - " ax.set_ylim([freq_min, freq_max])\n", - "\n", - " plt.subplots_adjust(left=0, right=1, top=1, bottom=0)\n", - " plt.savefig(\n", - " output_file, format='png', bbox_inches='tight', pad_inches=0\n", - " )\n", - " plt.close()\n", - "\n", - " return wandb.Image(output_file)\n", - "\n", - "\n", - "temp_dir = TemporaryDirectory()\n", - "columns = [\"Model\", \"Prompt\", \"Generated-Audio\", \"Spectrogram\", \"Seed\"]\n", - "if input_audio is not None:\n", - " columns.insert(2, \"Input-Audio\")\n", - "if config.enable_multi_band_diffusion:\n", - " columns.insert(4, \"Generated-Audio-Diffusion\")\n", - " columns.insert(5, \"Spectrogram-Diffusion\")\n", - "wandb_table = wandb.Table(columns=columns)\n", - "\n", - "for idx, wav in enumerate(generated_wav):\n", - "\n", - " file_name = os.path.join(temp_dir.name, str(idx))\n", - " audio_write(\n", - " file_name,\n", - " wav.cpu(),\n", - " model.sample_rate,\n", - " strategy=\"loudness\",\n", - " loudness_compressor=True,\n", - " )\n", - " wandb_audio = wandb.Audio(file_name + \".wav\")\n", - " wandb.log({\"Generated-Audio\": wandb_audio}, commit=False)\n", - "\n", - " file_name_diffusion, wandb_diffusion_audio = None, None\n", - " if config.enable_multi_band_diffusion:\n", - " file_name_diffusion = os.path.join(\n", - " temp_dir.name, str(idx) + \"_diffusion\"\n", - " )\n", - " audio_write(\n", - " file_name_diffusion,\n", - " generated_wav_diffusion[idx].cpu(),\n", - " model.sample_rate,\n", - " strategy=\"loudness\",\n", - " loudness_compressor=True,\n", - " )\n", - " wandb_diffusion_audio = wandb.Audio(file_name_diffusion + \".wav\")\n", - " wandb.log(\n", - " {\"Generated-Audio-Diffusion\": wandb_diffusion_audio},\n", - " commit=False\n", - " )\n", - "\n", - " wandb.log({}, commit=True)\n", - "\n", - " desc = descriptions[idx] if len(descriptions) > 1 else config.prompts\n", - " wandb_table_row = [\n", - " model_name,\n", - " desc,\n", - " wandb_audio,\n", - " get_spectrogram(\n", - " audio_file=file_name + \".wav\",\n", - " output_file=os.path.join(temp_dir.name, str(idx) + \".png\")\n", - " ),\n", - " config.seed\n", - " ]\n", - " if input_audio is not None:\n", - " wandb_table_row.insert(2, wandb_input_audio)\n", - " if config.enable_multi_band_diffusion:\n", - " wandb_table_row.insert(4, wandb_diffusion_audio)\n", - " wandb_table_row.insert(\n", - " 5,\n", - " get_spectrogram(\n", - " audio_file=file_name_diffusion + \".wav\",\n", - " output_file=os.path.join(\n", - " temp_dir.name, str(idx) + \"_diffusion.png\"\n", - " )\n", - " )\n", - " )\n", - " wandb_table.add_data(*wandb_table_row)\n", - "\n", - "wandb.log({\"Generated-Audio-Table\": wandb_table})\n", - "\n", - "wandb.finish()\n", - "temp_dir.cleanup()" - ] + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "W-26KlXuiXul" + }, + "source": [ + "\"Weights\n", + "\n", + "\n", + "# 🎸 Generating Music using [Audiocraft](https://github.com/facebookresearch/audiocraft) and W&B 🐝\n", + "\n", + "\n", + "\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/wandb/examples/blob/master/colabs/audiocraft/AudioCraft.ipynb)\n", + "\n", + "In this notebook we demonstrate how you can generate music and other types of audio from text prompts or generate new music from existing music using SoTA models such as [MusicGen](https://github.com/facebookresearch/audiocraft/blob/main/docs/MUSICGEN.md) and [AudioGen](https://github.com/facebookresearch/audiocraft/blob/main/docs/AUDIOGEN.md) from [Audiocraft](https://github.com/facebookresearch/audiocraft) and play and visualize them using [Weights & Biases](https://wandb.ai/site).\n", + "\n", + "If you want to know more about the underlying architectures for MusicGen and AudioGen and explore some cool audio samples generated by these models, you can check out [this W&B report](http://wandb.me/audiocraft_2mp)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 1000 }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This is how the W&B Table looks like with the interactive audio player, waveform visualizer and spectrogram visualization along with the prompts and other configs. Note that the notebook automatically sets the seed if you leave it blank, so your experiments are always reproducible.\n", - "\n", - "![](https://github.com/wandb/examples/blob/example/audiocraft/colabs/audiocraft/assets/music_gen.png?raw=1)" - ] + "id": "EZU3hg4B1om6", + "outputId": "3311a45d-35c3-49e8-cbd5-4618386fa2a1" + }, + "outputs": [], + "source": [ + "# @title Install AudioCraft + WandB\n", + "!pip install -U git+https://git@github.com/facebookresearch/audiocraft#egg=audiocraft\n", + "!pip install -qq -U wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "id": "RerQaiZt14r8" + }, + "outputs": [], + "source": [ + "# @title\n", + "import os\n", + "import random\n", + "from tempfile import TemporaryDirectory\n", + "\n", + "from scipy import signal\n", + "from scipy.io import wavfile\n", + "\n", + "import torchaudio\n", + "from audiocraft.models import AudioGen, MusicGen, MultiBandDiffusion\n", + "from audiocraft.data.audio import audio_write\n", + "\n", + "import wandb\n", + "import numpy as np\n", + "from tqdm.auto import tqdm\n", + "from google.colab import files\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 162 }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If you want to know more about the underlying architectures for MusicGen and AudioGen and explore some cool audio samples generated by these models, you can check out [this W&B report](http://wandb.me/audiocraft_2mp)." - ] - } - ], - "metadata": { - "accelerator": "GPU", + "id": "3MTX8GoE7AzN", + "outputId": "ebe733d0-3a21-47e5-d217-89a622cafc62" + }, + "outputs": [], + "source": [ + "# @title ## Audio Generation Configs\n", + "\n", + "# @markdown In this section, you can interact with the user interface to chose the models you want to use to generate audio, prompts and other configs. Once you execute this cell, it initializes a [wandb run](https://docs.wandb.ai/guides/runs) which will be used to automatically log all the generated audio along with all the prompts and configs, to ensure your AI-generated music is never lost and your experiments are always reproducible and easy to share. \n", + "\n", + "# @markdown **Note:** If you have provided prompts, you will be prompted to provide an audio file in addition to the prompts to condition the model. If you don't want to provide a file as an additional condition to the model, just press on the `cancel` button.\n", + "\n", + "# @markdown ---\n", + "# @markdown WandB Project Name\n", + "project_name = \"audiocraft\" # @param {type:\"string\"}\n", + "\n", + "wandb.init(project=project_name, job_type=\"musicgen/inference\")\n", + "\n", + "config = wandb.config\n", + "\n", + "# @markdown Select the Model for audio generation supported by [AudioCraft](https://github.com/facebookresearch/audiocraft). You can select either the MusicGen model variants (great for generating music) or the AudioGen model variants (great for generating non-musical audio). Also note that you can run all variants of MusicGen except the `large` one on the free-tier Colab GPU.\n", + "model_name = \"musicgen-small\" # @param [\"musicgen-small\", \"musicgen-medium\", \"musicgen-large\", \"musicgen-melody\", \"audiogen-medium\"]\n", + "config.model_name = \"facebook/\" + model_name if model_name == \"audiogen-medium\" else model_name\n", + "\n", + "# @markdown Whether to enable [MultiBand Diffusion](https://github.com/facebookresearch/audiocraft/blob/main/docs/MBD.md) or not. MultiBand diffusion is a collection of 4 models that can decode tokens from EnCodec tokenizer into waveform audio. Note that enabling this increases the time required to generate the audio.\n", + "enable_multi_band_diffusion = True # @param {type:\"boolean\"}\n", + "# config.enable_multi_band_diffusion = enable_multi_band_diffusion\n", + "\n", + "if \"musicgen\" not in model_name:\n", + " wandb.termwarn(\"Multi-band Diffusion is only available for Musicgen\")\n", + " config.enable_multi_band_diffusion = False\n", + "else:\n", + " config.enable_multi_band_diffusion = enable_multi_band_diffusion\n", + "\n", + "# @markdown ---\n", + "# @markdown ## Conditional Generation Configs\n", + "\n", + "# @markdown The prompt for generating audio. You can give multiple prompts separated by `|` in the input. You can also leave it blank for unconditional generation.\n", + "config.prompts = \"happy rock | energetic EDM | sad jazz\" # @param {type:\"string\"}\n", + "\n", + "descriptions = [prompt.strip() for prompt in config.prompts.split(\"|\")]\n", + "config.is_unconditional = config.prompts.strip() == \"\"\n", + "\n", + "input_audio, input_sampling_rate, wandb_input_audio = None, None, None\n", + "if not config.is_unconditional:\n", + " input_audio_file = files.upload()\n", + " if input_audio_file != {}:\n", + " if config.model_name == \"facebook/audiogen-medium\":\n", + " error = f\"{config.model_name} does not support audio-based conditioning\"\n", + " raise ValueError(error)\n", + " wandb_input_audio = wandb.Audio(list(input_audio_file.keys())[0])\n", + " input_audio, input_sampling_rate = torchaudio.load(\n", + " list(input_audio_file.keys())[0]\n", + " )\n", + " config.input_audio_available = True\n", + " else:\n", + " config.input_audio_available = False\n", + "else:\n", + " if config.model_name == \"facebook/audiogen-medium\":\n", + " error = f\"{config.model_name} does not support unconditional generration\"\n", + " raise ValueError(error)\n", + "\n", + "# @markdown Number of audio samples generated, this is relevant only for unconditional generation, i.e, if `config.prompts` is left blank.\n", + "config.num_samples = 4 # @param {type:\"slider\", min:1, max:10, step:1}\n", + "\n", + "# @markdown Specify the random seed\n", + "seed = None # @param {type:\"raw\"}\n", + "\n", + "max_seed = int(1024 * 1024 * 1024)\n", + "if not isinstance(seed, int):\n", + " seed = random.randint(1, max_seed)\n", + "if seed < 0:\n", + " seed = - seed\n", + "seed = seed % max_seed\n", + "config.seed = seed\n", + "\n", + "# @markdown ---\n", + "# @markdown ## Generation Parameters\n", + "# @markdown Use sampling if True, else do argmax decoding\n", + "config.use_sampling = True # @param {type:\"boolean\"}\n", + "\n", + "# @markdown `top_k` used for sampling; limits us to `k` number of of the top tokens to consider.\n", + "config.top_k = 250 # @param {type:\"slider\", min:0, max:1000, step:1}\n", + "\n", + "# @markdown `top_p` used for sampling; limits us to the top tokens within a probability mass `p`\n", + "config.top_p = 0.0 # @param {type:\"slider\", min:0, max:1.0, step:0.01}\n", + "\n", + "# @markdown Softmax temperature parameter\n", + "config.temperature = 1.0 # @param {type:\"slider\", min:0, max:1.0, step:0.01}\n", + "\n", + "# @markdown Duration of the generated waveform\n", + "config.duration = 10 # @param {type:\"slider\", min:1, max:30, step:1}\n", + "\n", + "# @markdown Coefficient used for classifier free guidance\n", + "config.cfg_coef = 3 # @param {type:\"slider\", min:1, max:100, step:1}\n", + "\n", + "# @markdown Whether to perform 2 forward for Classifier Free Guidance instead of batching together the two. This has some impact on how things are padded but seems to have little impact in practice.\n", + "config.two_step_cfg = False # @param {type:\"boolean\"}\n", + "\n", + "# @markdown When doing extended generation (i.e. more than 30 seconds), by how much should we extend the audio each time. Larger values will mean less context is preserved, and shorter value will require extra computations.\n", + "config.extend_stride = 0 # @param {type:\"slider\", min:0, max:30, step:1}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", "colab": { - "gpuType": "V100", - "provenance": [] + "base_uri": "https://localhost:8080/" }, - "kernelspec": { - "display_name": "Python 3", - "name": "python3" + "id": "SfM8rhVX7ES9", + "outputId": "a935173b-382a-4514-97fc-eec12e188379" + }, + "outputs": [], + "source": [ + "# @title Generate Audio using MusicGen\n", + "\n", + "# @markdown In this section, the audio is generated using the configs, specified in the aforementioned section. If you wish to peek behind the curtain and checkout the code, click on the `Show Code` button. In order to know about the different APIs for audio generation, visit the [official audiocraft documentations](https://facebookresearch.github.io/audiocraft/api_docs/audiocraft/index.html).\n", + "\n", + "model = None\n", + "if config.model_name == \"facebook/audiogen-medium\":\n", + " model = AudioGen.get_pretrained(config.model_name)\n", + "elif \"musicgen\" in config.model_name:\n", + " model = MusicGen.get_pretrained(config.model_name.split(\"-\")[-1])\n", + "\n", + "multi_band_diffusion = None\n", + "if config.enable_multi_band_diffusion:\n", + " multi_band_diffusion = MultiBandDiffusion.get_mbd_musicgen()\n", + "\n", + "model.set_generation_params(\n", + " use_sampling=config.use_sampling,\n", + " top_k=config.top_k,\n", + " top_p=config.top_p,\n", + " temperature=config.temperature,\n", + " duration=config.duration,\n", + " cfg_coef=config.cfg_coef,\n", + " two_step_cfg=config.two_step_cfg,\n", + " extend_stride=config.extend_stride\n", + ")\n", + "\n", + "generated_wav, tokens = None, None\n", + "if config.is_unconditional:\n", + " if input_audio is None:\n", + " if \"musicgen\" in config.model_name:\n", + " generated_wav, tokens = model.generate_unconditional(\n", + " num_samples=config.num_samples,\n", + " progress=True,\n", + " return_tokens=True\n", + " )\n", + " else:\n", + " generated_wav = model.generate_unconditional(\n", + " num_samples=config.num_samples,\n", + " progress=True,\n", + " )\n", + " else:\n", + " if \"musicgen\" in config.model_name:\n", + " generated_wav, tokens = model.generate_with_chroma(\n", + " descriptions,\n", + " input_audio[None].expand(3, -1, -1),\n", + " input_sampling_rate,\n", + " return_tokens=True\n", + " )\n", + " else:\n", + " generated_wav = model.generate_with_chroma(\n", + " descriptions,\n", + " input_audio[None].expand(3, -1, -1),\n", + " input_sampling_rate,\n", + " )\n", + "else:\n", + " if \"musicgen\" in config.model_name:\n", + " generated_wav, tokens = model.generate(\n", + " descriptions,\n", + " progress=True,\n", + " return_tokens=True\n", + " )\n", + " else:\n", + " generated_wav = model.generate(\n", + " descriptions,\n", + " progress=True,\n", + " )\n", + "\n", + "generated_wav_diffusion = None\n", + "if config.enable_multi_band_diffusion:\n", + " generated_wav_diffusion = multi_band_diffusion.tokens_to_wav(tokens)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 104 }, - "language_info": { - "name": "python" - } + "id": "_n-1RthFVPYN", + "outputId": "ac7bbc70-8114-4ef7-ef64-ecae9ba898cf" + }, + "outputs": [], + "source": [ + "# @title Log Audio to Weights & Biases Dashboard\n", + "\n", + "# @markdown In this section, we log the generated audio to Weights & Biases where you can listen and visualize them using an interactive audio player and waveform visualizer. Also, shoutout to [Atanu Sarkar](https://github.com/mratanusarkar) for building the spectrogram viusalization function which lets you visualize the spectrogram of the generated audio inside a [`wandb.Table`](https://docs.wandb.ai/guides/tables/tables-walkthrough).\n", + "\n", + "def get_spectrogram(audio_file, output_file):\n", + " sample_rate, samples = wavfile.read(audio_file)\n", + " frequencies, times, Sxx = signal.spectrogram(samples, sample_rate)\n", + "\n", + " log_Sxx = 10 * np.log10(Sxx + 1e-10)\n", + " vmin = np.percentile(log_Sxx, 5)\n", + " vmax = np.percentile(log_Sxx, 95)\n", + "\n", + " mean_spectrum = np.mean(log_Sxx, axis=1)\n", + " threshold_low = np.percentile(mean_spectrum, 5)\n", + " threshold_high = np.percentile(mean_spectrum, 95)\n", + "\n", + " freq_indices = np.where(mean_spectrum > threshold_low)\n", + " freq_min = 20\n", + " freq_max = frequencies[freq_indices].max()\n", + "\n", + " fig, ax = plt.subplots()\n", + " cmap = plt.get_cmap('magma')\n", + "\n", + " ax.pcolormesh(\n", + " times,\n", + " frequencies,\n", + " log_Sxx,\n", + " shading='gouraud',\n", + " cmap=cmap,\n", + " vmin=vmin,\n", + " vmax=vmax\n", + " )\n", + " ax.axis('off')\n", + " ax.set_ylim([freq_min, freq_max])\n", + "\n", + " plt.subplots_adjust(left=0, right=1, top=1, bottom=0)\n", + " plt.savefig(\n", + " output_file, format='png', bbox_inches='tight', pad_inches=0\n", + " )\n", + " plt.close()\n", + "\n", + " return wandb.Image(output_file)\n", + "\n", + "\n", + "temp_dir = TemporaryDirectory()\n", + "columns = [\"Model\", \"Prompt\", \"Generated-Audio\", \"Spectrogram\", \"Seed\"]\n", + "if input_audio is not None:\n", + " columns.insert(2, \"Input-Audio\")\n", + "if config.enable_multi_band_diffusion:\n", + " columns.insert(4, \"Generated-Audio-Diffusion\")\n", + " columns.insert(5, \"Spectrogram-Diffusion\")\n", + "wandb_table = wandb.Table(columns=columns)\n", + "\n", + "for idx, wav in enumerate(generated_wav):\n", + "\n", + " file_name = os.path.join(temp_dir.name, str(idx))\n", + " audio_write(\n", + " file_name,\n", + " wav.cpu(),\n", + " model.sample_rate,\n", + " strategy=\"loudness\",\n", + " loudness_compressor=True,\n", + " )\n", + " wandb_audio = wandb.Audio(file_name + \".wav\")\n", + " wandb.log({\"Generated-Audio\": wandb_audio}, commit=False)\n", + "\n", + " file_name_diffusion, wandb_diffusion_audio = None, None\n", + " if config.enable_multi_band_diffusion:\n", + " file_name_diffusion = os.path.join(\n", + " temp_dir.name, str(idx) + \"_diffusion\"\n", + " )\n", + " audio_write(\n", + " file_name_diffusion,\n", + " generated_wav_diffusion[idx].cpu(),\n", + " model.sample_rate,\n", + " strategy=\"loudness\",\n", + " loudness_compressor=True,\n", + " )\n", + " wandb_diffusion_audio = wandb.Audio(file_name_diffusion + \".wav\")\n", + " wandb.log(\n", + " {\"Generated-Audio-Diffusion\": wandb_diffusion_audio},\n", + " commit=False\n", + " )\n", + "\n", + " wandb.log({}, commit=True)\n", + "\n", + " desc = descriptions[idx] if len(descriptions) > 1 else config.prompts\n", + " wandb_table_row = [\n", + " model_name,\n", + " desc,\n", + " wandb_audio,\n", + " get_spectrogram(\n", + " audio_file=file_name + \".wav\",\n", + " output_file=os.path.join(temp_dir.name, str(idx) + \".png\")\n", + " ),\n", + " config.seed\n", + " ]\n", + " if input_audio is not None:\n", + " wandb_table_row.insert(2, wandb_input_audio)\n", + " if config.enable_multi_band_diffusion:\n", + " wandb_table_row.insert(4, wandb_diffusion_audio)\n", + " wandb_table_row.insert(\n", + " 5,\n", + " get_spectrogram(\n", + " audio_file=file_name_diffusion + \".wav\",\n", + " output_file=os.path.join(\n", + " temp_dir.name, str(idx) + \"_diffusion.png\"\n", + " )\n", + " )\n", + " )\n", + " wandb_table.add_data(*wandb_table_row)\n", + "\n", + "wandb.log({\"Generated-Audio-Table\": wandb_table})\n", + "\n", + "wandb.finish()\n", + "temp_dir.cleanup()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This is how the W&B Table looks like with the interactive audio player, waveform visualizer and spectrogram visualization along with the prompts and other configs. Note that the notebook automatically sets the seed if you leave it blank, so your experiments are always reproducible.\n", + "\n", + "![](https://github.com/wandb/examples/blob/example/audiocraft/colabs/audiocraft/assets/music_gen.png?raw=1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If you want to know more about the underlying architectures for MusicGen and AudioGen and explore some cool audio samples generated by these models, you can check out [this W&B report](http://wandb.me/audiocraft_2mp)." + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "V100", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" }, - "nbformat": 4, - "nbformat_minor": 0 + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } diff --git a/colabs/boosting/Credit_Scorecards_with_XGBoost_and_W&B.ipynb b/colabs/boosting/Credit_Scorecards_with_XGBoost_and_W&B.ipynb index 6bd674f2..14966715 100644 --- a/colabs/boosting/Credit_Scorecards_with_XGBoost_and_W&B.ipynb +++ b/colabs/boosting/Credit_Scorecards_with_XGBoost_and_W&B.ipynb @@ -211,11 +211,19 @@ "outputs": [], "source": [ "import wandb\n", - "wandb.login()\n", "\n", "WANDB_PROJECT ='vehicle_loan_default'" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" + ] + }, { "attachments": {}, "cell_type": "markdown", diff --git a/colabs/boosting/Simple_LightGBM_Integration.ipynb b/colabs/boosting/Simple_LightGBM_Integration.ipynb index 35ba65ec..e3a6e637 100644 --- a/colabs/boosting/Simple_LightGBM_Integration.ipynb +++ b/colabs/boosting/Simple_LightGBM_Integration.ipynb @@ -111,9 +111,16 @@ "outputs": [], "source": [ "import wandb\n", - "from wandb.lightgbm import wandb_callback, log_summary\n", - "\n", - "wandb.login()" + "from wandb.lightgbm import wandb_callback, log_summary\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/boosting/Using_W&B_Sweeps_with_XGBoost.ipynb b/colabs/boosting/Using_W&B_Sweeps_with_XGBoost.ipynb index e2649961..310ace6b 100644 --- a/colabs/boosting/Using_W&B_Sweeps_with_XGBoost.ipynb +++ b/colabs/boosting/Using_W&B_Sweeps_with_XGBoost.ipynb @@ -97,8 +97,16 @@ "outputs": [], "source": [ "\n", - "import wandb\n", - "wandb.login()" + "import wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/datasets-predictions/Image_Classification_with_Tables.ipynb b/colabs/datasets-predictions/Image_Classification_with_Tables.ipynb index f8252a03..2b81b3f0 100644 --- a/colabs/datasets-predictions/Image_Classification_with_Tables.ipynb +++ b/colabs/datasets-predictions/Image_Classification_with_Tables.ipynb @@ -140,8 +140,16 @@ "outputs": [], "source": [ "\n", - "import wandb\n", - "wandb.login()" + "import wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { @@ -613,20 +621,6 @@ "3. [Articles](https://www.wandb.com/articles) - blog posts and tutorials\n", "4. [Community](wandb.me/slack) - join our Slack community forum" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/colabs/datasets-predictions/Logging_Timbre_Transfer_with_W&B.ipynb b/colabs/datasets-predictions/Logging_Timbre_Transfer_with_W&B.ipynb index 5c761177..23add602 100644 --- a/colabs/datasets-predictions/Logging_Timbre_Transfer_with_W&B.ipynb +++ b/colabs/datasets-predictions/Logging_Timbre_Transfer_with_W&B.ipynb @@ -193,8 +193,16 @@ "metadata": {}, "outputs": [], "source": [ - "WANDB_PROJECT = \"timbre_demo\"\n", - "wandb.login()" + "WANDB_PROJECT = \"timbre_demo\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/datasets-predictions/W&B_Dataset_Visualization.ipynb b/colabs/datasets-predictions/W&B_Dataset_Visualization.ipynb index 244dce8d..4f548d63 100644 --- a/colabs/datasets-predictions/W&B_Dataset_Visualization.ipynb +++ b/colabs/datasets-predictions/W&B_Dataset_Visualization.ipynb @@ -93,8 +93,16 @@ "source": [ "# default project name where results will be logged\n", "WANDB_PROJECT = \"dsviz-demo-colab\"\n", - "NUM_EXAMPLES = 50\n", - "wandb.login()" + "NUM_EXAMPLES = 50" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/datasets-predictions/W&B_Tables_Quickstart.ipynb b/colabs/datasets-predictions/W&B_Tables_Quickstart.ipynb index 592878ae..8e847a4d 100644 --- a/colabs/datasets-predictions/W&B_Tables_Quickstart.ipynb +++ b/colabs/datasets-predictions/W&B_Tables_Quickstart.ipynb @@ -82,11 +82,19 @@ "source": [ "\n", "import wandb\n", - "wandb.login()\n", "\n", "WANDB_PROJECT = \"mnist-viz\"" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" + ] + }, { "cell_type": "markdown", "metadata": {}, diff --git a/colabs/deepchem/W&B_x_DeepChem.ipynb b/colabs/deepchem/W&B_x_DeepChem.ipynb index 81687e46..a6412e2b 100644 --- a/colabs/deepchem/W&B_x_DeepChem.ipynb +++ b/colabs/deepchem/W&B_x_DeepChem.ipynb @@ -58,12 +58,20 @@ "outputs": [], "source": [ "import wandb\n", - "wandb.login()\n", "\n", "import warnings\n", "warnings.filterwarnings('ignore')" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" + ] + }, { "cell_type": "markdown", "metadata": {}, diff --git a/colabs/diffusers/diffusers-image-generation.ipynb b/colabs/diffusers/diffusers-image-generation.ipynb index 00ac78c0..f3c603c5 100644 --- a/colabs/diffusers/diffusers-image-generation.ipynb +++ b/colabs/diffusers/diffusers-image-generation.ipynb @@ -512,13 +512,6 @@ "source": [ "notebook_launcher(training_loop, num_processes=config.num_processes)" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/colabs/dsviz/W&B_Dataset_and_Predictions_Viz_Demo.ipynb b/colabs/dsviz/W&B_Dataset_and_Predictions_Viz_Demo.ipynb index b7447302..c17221a5 100644 --- a/colabs/dsviz/W&B_Dataset_and_Predictions_Viz_Demo.ipynb +++ b/colabs/dsviz/W&B_Dataset_and_Predictions_Viz_Demo.ipynb @@ -112,8 +112,16 @@ "outputs": [], "source": [ "# default project name where results will be logged\n", - "WANDB_PROJECT = \"dsviz-demo\"\n", - "wandb.login()" + "WANDB_PROJECT = \"dsviz-demo\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/fastai/Semantic_Segmentation_Demo_with_W&B.ipynb b/colabs/fastai/Semantic_Segmentation_Demo_with_W&B.ipynb index a6d95eb1..4dd645a0 100644 --- a/colabs/fastai/Semantic_Segmentation_Demo_with_W&B.ipynb +++ b/colabs/fastai/Semantic_Segmentation_Demo_with_W&B.ipynb @@ -91,7 +91,7 @@ "metadata": {}, "outputs": [], "source": [ - "wandb.login()" + "!wandb login" ] }, { diff --git a/colabs/fastai/Weights_&_Biases_with_fastai.ipynb b/colabs/fastai/Weights_&_Biases_with_fastai.ipynb index 7327755b..9dc02117 100644 --- a/colabs/fastai/Weights_&_Biases_with_fastai.ipynb +++ b/colabs/fastai/Weights_&_Biases_with_fastai.ipynb @@ -89,7 +89,7 @@ "metadata": {}, "outputs": [], "source": [ - "wandb.login()" + "!wandb login" ] }, { diff --git a/colabs/huggingface/Huggingface_wandb.ipynb b/colabs/huggingface/Huggingface_wandb.ipynb index 4ff85657..5f17ac0f 100644 --- a/colabs/huggingface/Huggingface_wandb.ipynb +++ b/colabs/huggingface/Huggingface_wandb.ipynb @@ -126,8 +126,16 @@ "metadata": {}, "outputs": [], "source": [ - "import wandb\n", - "wandb.login()" + "import wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/huggingface/Optimize_Hugging_Face_models_with_Weights_&_Biases.ipynb b/colabs/huggingface/Optimize_Hugging_Face_models_with_Weights_&_Biases.ipynb index e950b3d4..55b2a1d1 100644 --- a/colabs/huggingface/Optimize_Hugging_Face_models_with_Weights_&_Biases.ipynb +++ b/colabs/huggingface/Optimize_Hugging_Face_models_with_Weights_&_Biases.ipynb @@ -109,9 +109,16 @@ "metadata": {}, "outputs": [], "source": [ - "import wandb\n", - "\n", - "wandb.login()" + "import wandb\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/huggingface/Simple_accelerate_integration_wandb.ipynb b/colabs/huggingface/Simple_accelerate_integration_wandb.ipynb index b371a500..bf2b5093 100644 --- a/colabs/huggingface/Simple_accelerate_integration_wandb.ipynb +++ b/colabs/huggingface/Simple_accelerate_integration_wandb.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "a7b2cb7b", "metadata": {}, "source": [ "\"Open\n", @@ -11,7 +12,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "cadda531-5f7b-4793-876b-dceedec26cc5", + "id": "457a3515", "metadata": {}, "source": [ "# Using Huggingface Accelerate with Weights and Biases\n", @@ -20,7 +21,7 @@ }, { "cell_type": "markdown", - "id": "c5ccfcf6-5d99-474c-a58e-77783906efb4", + "id": "434e4e75", "metadata": {}, "source": [ "[Accelerate](https://github.com/huggingface/accelerate) is this amazing little framework that simplifies your PyTorch training scripts enabling you to train with all the tricks out there!\n", @@ -68,7 +69,7 @@ }, { "cell_type": "markdown", - "id": "02aa793a-f2d3-468c-823e-492d2bebe4b7", + "id": "0a62d793", "metadata": {}, "source": [ "## Training and Image Classifier" @@ -77,7 +78,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6c83e24e-ad59-4038-8a4b-67f6dcce94cb", + "id": "8446561f", "metadata": {}, "outputs": [], "source": [ @@ -87,7 +88,7 @@ { "cell_type": "code", "execution_count": null, - "id": "90dc4bba-0181-4a8b-bfc5-6ae87a3c07a4", + "id": "7b308699", "metadata": {}, "outputs": [], "source": [ @@ -111,7 +112,7 @@ }, { "cell_type": "markdown", - "id": "8760c536-636b-4cd9-b244-3bb61254ebd6", + "id": "f9042940", "metadata": {}, "source": [ "Store your configuration parameters" @@ -120,7 +121,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0afbcd78-f3f8-4cfc-a89c-5e3767d16e94", + "id": "08cae1f3", "metadata": {}, "outputs": [], "source": [ @@ -137,7 +138,7 @@ }, { "cell_type": "markdown", - "id": "0370911f-7e69-43d3-93ec-50ed271c9ae6", + "id": "9d513fa3", "metadata": {}, "source": [ "setup transforms" @@ -146,7 +147,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8a68787b-43a7-459c-97b8-6b483291a821", + "id": "3dfeb520", "metadata": {}, "outputs": [], "source": [ @@ -159,7 +160,7 @@ }, { "cell_type": "markdown", - "id": "fed9579d-244c-47ee-9131-51d9ef7aa069", + "id": "534a5f7e", "metadata": {}, "source": [ "Create a simple CNN" @@ -168,7 +169,7 @@ { "cell_type": "code", "execution_count": null, - "id": "aead4006-6a4e-4009-b9b8-776d1ca1bf62", + "id": "f0cab9be", "metadata": {}, "outputs": [], "source": [ @@ -188,7 +189,7 @@ }, { "cell_type": "markdown", - "id": "ac8f89dd-4e18-4644-9d6a-b7ae7a5447a9", + "id": "3d6f2cf8", "metadata": {}, "source": [ "Wrap everything into a training functions (this is necessary to run on multiple GPUS, if it is only one, you can skip the wrapping)" @@ -197,7 +198,7 @@ { "cell_type": "code", "execution_count": null, - "id": "26cbaf18-655c-4a35-8060-8c25eb7f27fc", + "id": "0fd6d359", "metadata": {}, "outputs": [], "source": [ @@ -250,7 +251,7 @@ }, { "cell_type": "markdown", - "id": "e6227c06-2365-4386-9250-e1c22ac84bec", + "id": "a8e34c91", "metadata": {}, "source": [ "Let's train on 2 GPUs! This is really nice, as accelerate will take care of only calling `log` on the main process, so only one run get's created, so no need to manually check the rank of the process when using multiple GPUs." @@ -259,7 +260,7 @@ { "cell_type": "code", "execution_count": null, - "id": "028f7217-9a0f-4881-9219-38f1a65dbf53", + "id": "b0922368", "metadata": {}, "outputs": [], "source": [ diff --git a/colabs/huggingface/Visualize_your_Hugging_Face_data_with_Weights_&_Biases.ipynb b/colabs/huggingface/Visualize_your_Hugging_Face_data_with_Weights_&_Biases.ipynb index 6be6becf..c386b743 100644 --- a/colabs/huggingface/Visualize_your_Hugging_Face_data_with_Weights_&_Biases.ipynb +++ b/colabs/huggingface/Visualize_your_Hugging_Face_data_with_Weights_&_Biases.ipynb @@ -62,8 +62,16 @@ "metadata": {}, "outputs": [], "source": [ - "import wandb\n", - "wandb.login()" + "import wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/huggingface/wandb_hf_example.ipynb b/colabs/huggingface/wandb_hf_example.ipynb index 2c94bdce..51436229 100644 --- a/colabs/huggingface/wandb_hf_example.ipynb +++ b/colabs/huggingface/wandb_hf_example.ipynb @@ -89,8 +89,16 @@ "outputs": [], "source": [ "# Login and authenticate Weights & Biases\n", - "import wandb\n", - "wandb.login()" + "import wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/intro/3_in_1_Intro_to_Weights_&_Biases_CV,_NLP_and_RL.ipynb b/colabs/intro/3_in_1_Intro_to_Weights_&_Biases_CV,_NLP_and_RL.ipynb index c1fd9d6b..24b56715 100644 --- a/colabs/intro/3_in_1_Intro_to_Weights_&_Biases_CV,_NLP_and_RL.ipynb +++ b/colabs/intro/3_in_1_Intro_to_Weights_&_Biases_CV,_NLP_and_RL.ipynb @@ -126,8 +126,16 @@ "metadata": {}, "outputs": [], "source": [ - "import wandb\n", - "wandb.login()" + "import wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/intro/Intro_to_Weights_&_Biases.ipynb b/colabs/intro/Intro_to_Weights_&_Biases.ipynb index 23aa4b2f..ad267b72 100644 --- a/colabs/intro/Intro_to_Weights_&_Biases.ipynb +++ b/colabs/intro/Intro_to_Weights_&_Biases.ipynb @@ -76,8 +76,16 @@ "outputs": [], "source": [ "# Log in to your W&B account\n", - "import wandb\n", - "wandb.login()" + "import wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/intro/Intro_to_Weights_&_Biases_keras.ipynb b/colabs/intro/Intro_to_Weights_&_Biases_keras.ipynb index df3f2a22..566a2747 100644 --- a/colabs/intro/Intro_to_Weights_&_Biases_keras.ipynb +++ b/colabs/intro/Intro_to_Weights_&_Biases_keras.ipynb @@ -73,8 +73,16 @@ "outputs": [], "source": [ "# Log in to your W&B account\n", - "import wandb\n", - "wandb.login()" + "import wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/jax/training_with_tfrecords_in_jax_imagenette.ipynb b/colabs/jax/training_with_tfrecords_in_jax_imagenette.ipynb index b4191be8..04d7418c 100644 --- a/colabs/jax/training_with_tfrecords_in_jax_imagenette.ipynb +++ b/colabs/jax/training_with_tfrecords_in_jax_imagenette.ipynb @@ -504,13 +504,6 @@ "source": [ "wandb.finish()" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/colabs/jupyter/Interactive_W&B_Charts_Inside_Jupyter.ipynb b/colabs/jupyter/Interactive_W&B_Charts_Inside_Jupyter.ipynb index 3918bb68..1870d3dd 100644 --- a/colabs/jupyter/Interactive_W&B_Charts_Inside_Jupyter.ipynb +++ b/colabs/jupyter/Interactive_W&B_Charts_Inside_Jupyter.ipynb @@ -95,8 +95,16 @@ "metadata": {}, "outputs": [], "source": [ - "import wandb\n", - "wandb.login()" + "import wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/keras/Fine_tune_Vision_Transformer_using_KerasCV.ipynb b/colabs/keras/Fine_tune_Vision_Transformer_using_KerasCV.ipynb index 8554cbef..84d07626 100644 --- a/colabs/keras/Fine_tune_Vision_Transformer_using_KerasCV.ipynb +++ b/colabs/keras/Fine_tune_Vision_Transformer_using_KerasCV.ipynb @@ -47,9 +47,16 @@ "\n", "import wandb\n", "from wandb.keras import WandbMetricsLogger\n", - "from wandb.keras import WandbEvalCallback\n", - "\n", - "wandb.login()" + "from wandb.keras import WandbEvalCallback\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { @@ -321,13 +328,6 @@ "# Close the W&B run\n", "wandb.finish()" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/colabs/keras/Image_Segmentation_with_Keras.ipynb b/colabs/keras/Image_Segmentation_with_Keras.ipynb index 85cf9cae..3ab79047 100644 --- a/colabs/keras/Image_Segmentation_with_Keras.ipynb +++ b/colabs/keras/Image_Segmentation_with_Keras.ipynb @@ -34,9 +34,16 @@ "source": [ "import wandb\n", "from wandb.keras import WandbMetricsLogger\n", - "from wandb.keras import WandbEvalCallback\n", - "\n", - "wandb.login()" + "from wandb.keras import WandbEvalCallback\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/keras/Keras_param_opti_using_sweeps.ipynb b/colabs/keras/Keras_param_opti_using_sweeps.ipynb index b6175ce6..2e05fb1b 100644 --- a/colabs/keras/Keras_param_opti_using_sweeps.ipynb +++ b/colabs/keras/Keras_param_opti_using_sweeps.ipynb @@ -102,9 +102,16 @@ "outputs": [], "source": [ "import wandb\n", - "from wandb.keras import WandbMetricsLogger, WandbModelCheckpoint\n", - "\n", - "wandb.login()" + "from wandb.keras import WandbMetricsLogger, WandbModelCheckpoint\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/keras/Legacy_Keras_pipeline_with_Weights_and_Biases.ipynb b/colabs/keras/Legacy_Keras_pipeline_with_Weights_and_Biases.ipynb index 3008b3f8..ec3d3a0a 100644 --- a/colabs/keras/Legacy_Keras_pipeline_with_Weights_and_Biases.ipynb +++ b/colabs/keras/Legacy_Keras_pipeline_with_Weights_and_Biases.ipynb @@ -112,8 +112,16 @@ "metadata": {}, "outputs": [], "source": [ - "# Login to W&B\n", - "wandb.login()" + "# Login to W&B" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/keras/Simple_Keras_Integration.ipynb b/colabs/keras/Simple_Keras_Integration.ipynb index f5e51f51..bd1e97a2 100644 --- a/colabs/keras/Simple_Keras_Integration.ipynb +++ b/colabs/keras/Simple_Keras_Integration.ipynb @@ -120,9 +120,16 @@ "outputs": [], "source": [ "import wandb\n", - "from wandb.keras import WandbMetricsLogger, WandbModelCheckpoint, WandbEvalCallback\n", - "\n", - "wandb.login()" + "from wandb.keras import WandbMetricsLogger, WandbModelCheckpoint, WandbEvalCallback\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/keras/Use_WandbEvalCallback_in_your_Keras_workflow.ipynb b/colabs/keras/Use_WandbEvalCallback_in_your_Keras_workflow.ipynb index 4cbce176..58092c99 100644 --- a/colabs/keras/Use_WandbEvalCallback_in_your_Keras_workflow.ipynb +++ b/colabs/keras/Use_WandbEvalCallback_in_your_Keras_workflow.ipynb @@ -100,7 +100,7 @@ "metadata": {}, "outputs": [], "source": [ - "wandb.login()" + "!wandb login" ] }, { diff --git a/colabs/keras/Use_WandbMetricLogger_in_your_Keras_workflow.ipynb b/colabs/keras/Use_WandbMetricLogger_in_your_Keras_workflow.ipynb index 3c3ddec2..8bef5ab4 100644 --- a/colabs/keras/Use_WandbMetricLogger_in_your_Keras_workflow.ipynb +++ b/colabs/keras/Use_WandbMetricLogger_in_your_Keras_workflow.ipynb @@ -98,7 +98,7 @@ "metadata": {}, "outputs": [], "source": [ - "wandb.login()" + "!wandb login" ] }, { diff --git a/colabs/keras/Use_WandbModelCheckpoint_in_your_Keras_workflow.ipynb b/colabs/keras/Use_WandbModelCheckpoint_in_your_Keras_workflow.ipynb index 3ca4c90c..fd5258b4 100644 --- a/colabs/keras/Use_WandbModelCheckpoint_in_your_Keras_workflow.ipynb +++ b/colabs/keras/Use_WandbModelCheckpoint_in_your_Keras_workflow.ipynb @@ -98,7 +98,7 @@ "metadata": {}, "outputs": [], "source": [ - "wandb.login()" + "!wandb login" ] }, { diff --git a/colabs/keras/cosine_decay_using_keras.ipynb b/colabs/keras/cosine_decay_using_keras.ipynb index 9128a2c2..ea9cbcf7 100644 --- a/colabs/keras/cosine_decay_using_keras.ipynb +++ b/colabs/keras/cosine_decay_using_keras.ipynb @@ -40,9 +40,16 @@ "\n", "# Weights and Biases related imports\n", "import wandb\n", - "from wandb.keras import WandbMetricsLogger\n", - "\n", - "wandb.login()" + "from wandb.keras import WandbMetricsLogger\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/keras/keras_nsynth_instrument_prediction.ipynb b/colabs/keras/keras_nsynth_instrument_prediction.ipynb index a7bcc152..7f9153e7 100644 --- a/colabs/keras/keras_nsynth_instrument_prediction.ipynb +++ b/colabs/keras/keras_nsynth_instrument_prediction.ipynb @@ -45,7 +45,7 @@ "metadata": {}, "outputs": [], "source": [ - "wandb.login()" + "!wandb login" ] }, { @@ -649,13 +649,6 @@ "source": [ "run.finish()" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/colabs/mmdetection/Train_Object_Detector_with_MMDetection_and_W&B.ipynb b/colabs/mmdetection/Train_Object_Detector_with_MMDetection_and_W&B.ipynb index eb3ab018..13127841 100644 --- a/colabs/mmdetection/Train_Object_Detector_with_MMDetection_and_W&B.ipynb +++ b/colabs/mmdetection/Train_Object_Detector_with_MMDetection_and_W&B.ipynb @@ -155,7 +155,7 @@ "metadata": {}, "outputs": [], "source": [ - "wandb.login()" + "!wandb login" ] }, { diff --git a/colabs/mmdetection/Train_an_Object_Detection+Semantic_Segmentation_Model_with_MMDetection_and_W&B.ipynb b/colabs/mmdetection/Train_an_Object_Detection+Semantic_Segmentation_Model_with_MMDetection_and_W&B.ipynb index 62b6e301..db94670d 100644 --- a/colabs/mmdetection/Train_an_Object_Detection+Semantic_Segmentation_Model_with_MMDetection_and_W&B.ipynb +++ b/colabs/mmdetection/Train_an_Object_Detection+Semantic_Segmentation_Model_with_MMDetection_and_W&B.ipynb @@ -153,7 +153,7 @@ "metadata": {}, "outputs": [], "source": [ - "wandb.login()" + "!wandb login" ] }, { diff --git a/colabs/mosaicml/MosaicML_Composer_and_wandb.ipynb b/colabs/mosaicml/MosaicML_Composer_and_wandb.ipynb index e9b2b3d2..dc2e0f46 100644 --- a/colabs/mosaicml/MosaicML_Composer_and_wandb.ipynb +++ b/colabs/mosaicml/MosaicML_Composer_and_wandb.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "5e6559e8", "metadata": {}, "source": [ "\"Open\n", @@ -10,7 +11,7 @@ }, { "cell_type": "markdown", - "id": "910a2eb0-65b5-4b6c-971a-19bf8121ab13", + "id": "acbebc67", "metadata": {}, "source": [ "\"Weights\n", @@ -23,7 +24,7 @@ }, { "cell_type": "markdown", - "id": "4961c393-0154-4937-98d7-16e472b9a0d3", + "id": "57b84004", "metadata": {}, "source": [ "[MosaicML Composer](https://docs.mosaicml.com) is a library for training neural networks better, faster, and cheaper. It contains many state-of-the-art methods for accelerating neural network training and improving generalization, along with an optional Trainer API that makes composing many different enhancements easy.\n", @@ -38,7 +39,7 @@ }, { "cell_type": "markdown", - "id": "4ec877f6-57aa-4423-ae4b-f85769c59dd6", + "id": "eec2c407", "metadata": {}, "source": [ "W&B integration with Composer can automatically:\n", @@ -52,7 +53,7 @@ }, { "cell_type": "markdown", - "id": "092b0104-530a-438d-bd68-08f627cc8920", + "id": "66988495", "metadata": {}, "source": [ "### 🛠️ Installation and set-up\n", @@ -65,7 +66,7 @@ { "cell_type": "code", "execution_count": null, - "id": "56bbcb21-babd-488b-a20d-080f43f09897", + "id": "135bc088", "metadata": {}, "outputs": [], "source": [ @@ -74,7 +75,7 @@ }, { "cell_type": "markdown", - "id": "39e54ec9-51b6-4f49-9ea1-2ed82f03add3", + "id": "8574cf66", "metadata": {}, "source": [ "## Getting Started with Composer 🔥" @@ -82,7 +83,7 @@ }, { "cell_type": "markdown", - "id": "a2e0f570-d323-4ecf-bdb3-1469730f562b", + "id": "620b12c7", "metadata": {}, "source": [ "Composer gives you access to a set of functions to speedup your models and infuse them with state of the art methods. For instance, you can insert [BlurPool](https://docs.mosaicml.com/en/latest/method_cards/blurpool.html) into your CNN by calling `CF.apply_blurpool(model)` into your PyTorch model. Take a look at all the [functional](https://docs.mosaicml.com/en/latest/functional_api.html) methods available." @@ -91,7 +92,7 @@ { "cell_type": "code", "execution_count": null, - "id": "55d71a76-7354-43ee-87c1-a83aa88b0b69", + "id": "5b42d839", "metadata": {}, "outputs": [], "source": [ @@ -110,7 +111,7 @@ }, { "cell_type": "markdown", - "id": "15ad4bdf-9d1f-4bf6-8e30-ff72e06c6718", + "id": "64177537", "metadata": {}, "source": [ "> 💡 you can use this upgraded model with your favourite PyTorch training or... " @@ -118,7 +119,7 @@ }, { "cell_type": "markdown", - "id": "642eb0d5-b4f6-4add-9d59-235222bc2236", + "id": "887ed891", "metadata": {}, "source": [ "## Use the `Trainer` class with Weights and Biases 🏋️‍♀️\n", @@ -134,7 +135,7 @@ { "cell_type": "code", "execution_count": null, - "id": "caecb6eb-25b9-4daf-a1d2-465fecf98ef7", + "id": "5e156cae", "metadata": {}, "outputs": [], "source": [ @@ -145,7 +146,7 @@ { "cell_type": "code", "execution_count": null, - "id": "93fbba13-dbb9-436e-8237-fa60f0e54675", + "id": "023ddcaa", "metadata": {}, "outputs": [], "source": [ @@ -164,7 +165,7 @@ }, { "cell_type": "markdown", - "id": "b8f42d23-1e98-46b7-872d-17e326a30ef1", + "id": "6bed58ce", "metadata": {}, "source": [ "let's grab a copy of MNIST from `torchvision`" @@ -173,7 +174,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5df9ec80-22ec-4cad-91a3-685a1392dcc3", + "id": "1dd6466a", "metadata": {}, "outputs": [], "source": [ @@ -186,7 +187,7 @@ }, { "cell_type": "markdown", - "id": "e13640e3-7ff3-4fb2-a795-6443568cbf3e", + "id": "74834fa6", "metadata": {}, "source": [ "we can import a simple ConvNet model to try" @@ -195,7 +196,7 @@ { "cell_type": "code", "execution_count": null, - "id": "eb8f7103-cad2-4f40-9d48-5ac2b394b4d6", + "id": "53023ea7", "metadata": {}, "outputs": [], "source": [ @@ -204,7 +205,7 @@ }, { "cell_type": "markdown", - "id": "baa820b1-12bb-43c8-ae7c-5cea24b69c35", + "id": "602b9245", "metadata": {}, "source": [ "### 📊 Tracking the experiment\n", @@ -214,7 +215,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c4424591-cd28-4af2-987b-be0588a99a8e", + "id": "bf267683", "metadata": {}, "outputs": [], "source": [ @@ -239,7 +240,7 @@ }, { "cell_type": "markdown", - "id": "2f2b5740-7736-4f6d-88f1-c89a669691fe", + "id": "40dd887d", "metadata": {}, "source": [ "we are able to tweak what are we logging using `Callbacks` into the `Trainer` class." @@ -248,7 +249,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e60c66bc-f046-4a57-9ea7-a6e9fe5cfc3f", + "id": "a2ae3b59", "metadata": {}, "outputs": [], "source": [ @@ -259,7 +260,7 @@ }, { "cell_type": "markdown", - "id": "0e647bcc-abe5-45cd-bb7d-fa91e6c2006e", + "id": "0ab089d5", "metadata": {}, "source": [ "we include callbacks that measure the model throughput (and the learning rate) and logs them to Weights & Biases. [Callbacks](https://docs.mosaicml.com/en/latest/trainer/callbacks.html) control what is being logged, whereas loggers specify where the information is being saved. For more information on loggers, see [Logging](https://docs.mosaicml.com/en/latest/trainer/logging.html)." @@ -268,7 +269,7 @@ { "cell_type": "code", "execution_count": null, - "id": "fb0ad37f-943a-4341-8a16-32bf88e98ac1", + "id": "592ad057", "metadata": {}, "outputs": [], "source": [ @@ -287,7 +288,7 @@ }, { "cell_type": "markdown", - "id": "1ae01513-21a8-49a5-bec3-83bebf079710", + "id": "06e6218a", "metadata": {}, "source": [ "once we are ready to train we call `fit`" @@ -296,7 +297,7 @@ { "cell_type": "code", "execution_count": null, - "id": "99044ec7-3e07-4c21-8ba2-9eed79ecb801", + "id": "01b772f1", "metadata": {}, "outputs": [], "source": [ @@ -305,7 +306,7 @@ }, { "cell_type": "markdown", - "id": "22563587-a1bc-4885-95a7-81d47858efe4", + "id": "ad73682a", "metadata": {}, "source": [ "## ⚙️ Advanced: Using callbacks to log sample predictions\n", @@ -318,7 +319,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b4c0116c-70e0-4f34-828a-1c911c709346", + "id": "1a4e5088", "metadata": {}, "outputs": [], "source": [ @@ -347,7 +348,7 @@ }, { "cell_type": "markdown", - "id": "77e816d1-a114-4bca-8a7b-a70b13969366", + "id": "5a9f37e4", "metadata": {}, "source": [ "we add `LogPredictions` to the other callbacks" @@ -356,7 +357,7 @@ { "cell_type": "code", "execution_count": null, - "id": "682b52b0-5055-4cfd-a6c1-1a114390e965", + "id": "085d175d", "metadata": {}, "outputs": [], "source": [ @@ -366,7 +367,7 @@ { "cell_type": "code", "execution_count": null, - "id": "92cd7e36-e0c9-4235-980f-f590500478a1", + "id": "23c76cbc", "metadata": {}, "outputs": [], "source": [ @@ -385,7 +386,7 @@ }, { "cell_type": "markdown", - "id": "2ea27598-8d08-4ce0-b3ef-ed34fd161e48", + "id": "277b0e09", "metadata": {}, "source": [ "Once we're ready to train, we just call the `fit` method." @@ -394,7 +395,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6ccc77ed-bfd3-4698-a9a4-431b8a46079d", + "id": "7ff55a74", "metadata": {}, "outputs": [], "source": [ @@ -403,7 +404,7 @@ }, { "cell_type": "markdown", - "id": "ebfd5172-59b2-4895-97e9-8e8906ee7147", + "id": "8afd1f8f", "metadata": {}, "source": [ "We can monitor losses, metrics, gradients, parameters and sample predictions as the model trains." @@ -411,7 +412,7 @@ }, { "cell_type": "markdown", - "id": "e74c0eba-58ce-435a-8bee-9c55663e17de", + "id": "1d84a154", "metadata": {}, "source": [ "![composer.png](https://i.imgur.com/VFZLOB3.png?1)" @@ -419,7 +420,7 @@ }, { "cell_type": "markdown", - "id": "8f0ddffd-2ed4-4be6-848e-d95e373cc84d", + "id": "83f37a08", "metadata": {}, "source": [ "## 📚 Resources\n", @@ -429,7 +430,7 @@ }, { "cell_type": "markdown", - "id": "0c60f734-8eef-4b95-b4e1-d4d33ff48399", + "id": "8abc3031", "metadata": {}, "source": [ "## ❓ Questions about W&B\n", diff --git a/colabs/paddlepaddle/paddledetection/PaddleDetection_and_W&B_Your_one_stop_for_everything_object_detection.ipynb b/colabs/paddlepaddle/paddledetection/PaddleDetection_and_W&B_Your_one_stop_for_everything_object_detection.ipynb index 05fa664d..ef757a1a 100644 --- a/colabs/paddlepaddle/paddledetection/PaddleDetection_and_W&B_Your_one_stop_for_everything_object_detection.ipynb +++ b/colabs/paddlepaddle/paddledetection/PaddleDetection_and_W&B_Your_one_stop_for_everything_object_detection.ipynb @@ -110,8 +110,16 @@ "outputs": [], "source": [ "\n", - "import wandb\n", - "wandb.login()" + "import wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { @@ -376,13 +384,6 @@ "\n", "If you have any questions about using W&B to track your model performance and predictions, please contact support@wandb.com" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/colabs/paddlepaddle/paddleocr/Train_and_Debug_Your_OCR_Models_with_PaddleOCR_and_W&B.ipynb b/colabs/paddlepaddle/paddleocr/Train_and_Debug_Your_OCR_Models_with_PaddleOCR_and_W&B.ipynb index 12ee49fa..484f7cc8 100644 --- a/colabs/paddlepaddle/paddleocr/Train_and_Debug_Your_OCR_Models_with_PaddleOCR_and_W&B.ipynb +++ b/colabs/paddlepaddle/paddleocr/Train_and_Debug_Your_OCR_Models_with_PaddleOCR_and_W&B.ipynb @@ -94,8 +94,16 @@ "outputs": [], "source": [ "\n", - "import wandb\n", - "wandb.login()" + "import wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { @@ -425,13 +433,6 @@ "\n", "If you have any questions about using W&B to track your model performance and predictions, please contact support@wandb.com" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/colabs/paella/Image-Variations.ipynb b/colabs/paella/Image-Variations.ipynb index d3556d64..2f3ee438 100644 --- a/colabs/paella/Image-Variations.ipynb +++ b/colabs/paella/Image-Variations.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "4d6fcb46", "metadata": {}, "source": [ "\"Open\n", @@ -10,7 +11,7 @@ }, { "cell_type": "markdown", - "id": "f25c1396", + "id": "00f3f799", "metadata": {}, "source": [ "# 🔥🔥 Image Variations with Paella + WandB Playground 🪄🐝\n", @@ -23,7 +24,7 @@ { "cell_type": "code", "execution_count": null, - "id": "dde43958-b0ed-4bb2-99b7-08cf5a66bf53", + "id": "903d4fb4", "metadata": {}, "outputs": [], "source": [ @@ -52,7 +53,7 @@ { "cell_type": "code", "execution_count": null, - "id": "674111ae-ff8e-41b4-81b4-4961eb625f2b", + "id": "14d37dc7", "metadata": {}, "outputs": [], "source": [ @@ -88,7 +89,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0b15f5b6-9641-45c8-8fbe-709cb26343c2", + "id": "fcf96a10", "metadata": {}, "outputs": [], "source": [ @@ -99,7 +100,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7e4d1f19-74dc-44c9-8977-99cb573a44ae", + "id": "22f0c5ca", "metadata": {}, "outputs": [], "source": [ @@ -124,7 +125,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12e1580f-6808-4309-b8f1-39b5709e920b", + "id": "5043abfe", "metadata": {}, "outputs": [], "source": [ @@ -197,7 +198,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10af9b57-3028-4c26-af7e-73a58df648ab", + "id": "20019eac", "metadata": {}, "outputs": [], "source": [ @@ -231,7 +232,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e6b81f38-ddba-4b0b-8eb0-1f0c49b0e3ef", + "id": "39485bc8", "metadata": {}, "outputs": [], "source": [ @@ -261,7 +262,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f6822644-c7bc-422b-84b3-31ea3923fce7", + "id": "a51835d4", "metadata": {}, "outputs": [], "source": [ @@ -281,21 +282,13 @@ { "cell_type": "code", "execution_count": null, - "id": "5979feb4-2524-498a-9da6-477927d57316", + "id": "bccc3c86", "metadata": {}, "outputs": [], "source": [ "log_image_variations_results(original_image, sampled)\n", "wandb.finish()" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3e85d29a-caf0-4331-ab68-bb33112d4776", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/colabs/paella/Inpainting.ipynb b/colabs/paella/Inpainting.ipynb index ad17db88..b7f0fc96 100644 --- a/colabs/paella/Inpainting.ipynb +++ b/colabs/paella/Inpainting.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "f9c62921", "metadata": {}, "source": [ "\"Open\n", @@ -11,7 +12,7 @@ { "cell_type": "code", "execution_count": null, - "id": "bd4a4bd6", + "id": "dbb0aa27", "metadata": {}, "outputs": [], "source": [ @@ -25,7 +26,7 @@ { "cell_type": "code", "execution_count": null, - "id": "735c22d8-d4b4-483f-a1c5-3e57b0dd430f", + "id": "e22c1740", "metadata": {}, "outputs": [], "source": [ @@ -54,7 +55,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2318c203-2cce-4f2d-9f51-1d094c95c5ee", + "id": "1a821410", "metadata": {}, "outputs": [], "source": [ @@ -89,7 +90,7 @@ { "cell_type": "code", "execution_count": null, - "id": "066cad54-dc91-42e1-9671-58715daf4c2d", + "id": "88c43a1a", "metadata": {}, "outputs": [], "source": [ @@ -100,7 +101,7 @@ { "cell_type": "code", "execution_count": null, - "id": "db412629-cdaf-492b-b65e-184ebcd0ee62", + "id": "521c9242", "metadata": {}, "outputs": [], "source": [ @@ -139,7 +140,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ea8632b6-fdd0-459a-bded-e987a3d14610", + "id": "3fcd0c06", "metadata": {}, "outputs": [], "source": [ @@ -212,7 +213,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f2ad44fc-ce5d-42ab-aea5-b985f8336379", + "id": "d8a07be6", "metadata": {}, "outputs": [], "source": [ @@ -246,7 +247,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4e0424fd-1561-437a-9bbd-8e648f18df47", + "id": "45c4c80c", "metadata": {}, "outputs": [], "source": [ @@ -264,7 +265,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3598db26-c261-4bde-a30f-5927581e0554", + "id": "e93c67bb", "metadata": {}, "outputs": [], "source": [ @@ -296,21 +297,13 @@ { "cell_type": "code", "execution_count": null, - "id": "643d9e25-a083-4554-9640-ef681a305a08", + "id": "a39f1b2c", "metadata": {}, "outputs": [], "source": [ "log_inoutpainting_results(original_image, sampled, encoded_tokens, mask)\n", "wandb.finish()" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b17cd0e2-a354-4a8b-a68d-11f7d7d48345", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/colabs/paella/Latent-Space-Interpolation.ipynb b/colabs/paella/Latent-Space-Interpolation.ipynb index c63c287b..64ef0a24 100644 --- a/colabs/paella/Latent-Space-Interpolation.ipynb +++ b/colabs/paella/Latent-Space-Interpolation.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "631fa4cf", "metadata": {}, "source": [ "\"Open\n", @@ -11,7 +12,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f37e1062", + "id": "0723d1d7", "metadata": {}, "outputs": [], "source": [ @@ -25,7 +26,7 @@ { "cell_type": "code", "execution_count": null, - "id": "833f912f-98d0-4055-933c-9803ab1a34ed", + "id": "593e5b91", "metadata": {}, "outputs": [], "source": [ @@ -54,7 +55,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1b41825a-63f3-4551-9c4d-a809297f7e0d", + "id": "1c49c27a", "metadata": {}, "outputs": [], "source": [ @@ -87,7 +88,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7b03d40b-65b6-4939-95ef-4bf2ce3b0133", + "id": "6b34c686", "metadata": {}, "outputs": [], "source": [ @@ -98,7 +99,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9ffdcff9-c5a4-4158-a1ba-52beff090801", + "id": "c4807b36", "metadata": {}, "outputs": [], "source": [ @@ -112,7 +113,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7eb4b944-fbe6-431a-adf8-6ef3e72a4c82", + "id": "d9cf7b37", "metadata": {}, "outputs": [], "source": [ @@ -186,7 +187,7 @@ { "cell_type": "code", "execution_count": null, - "id": "eeaf0fdd-95fb-4154-91bb-806f9aed6c5a", + "id": "21ffec76", "metadata": {}, "outputs": [], "source": [ @@ -220,7 +221,7 @@ { "cell_type": "code", "execution_count": null, - "id": "00b740f5-0b4e-465a-bf90-a5b5860cc785", + "id": "d89b136c", "metadata": {}, "outputs": [], "source": [ @@ -277,21 +278,13 @@ { "cell_type": "code", "execution_count": null, - "id": "de5f608e-f9c2-419b-b9d1-3a1722d055be", + "id": "f8492571", "metadata": {}, "outputs": [], "source": [ "log_interpolation_results(sampled)\n", "wandb.finish()" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ffa7516b-bd77-41a3-b9d3-95b110f57106", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/colabs/paella/Multi-Conditioning.ipynb b/colabs/paella/Multi-Conditioning.ipynb index e42e9807..8a24f6ab 100644 --- a/colabs/paella/Multi-Conditioning.ipynb +++ b/colabs/paella/Multi-Conditioning.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "361d4376", "metadata": {}, "source": [ "\"Open\n", @@ -10,7 +11,7 @@ }, { "cell_type": "markdown", - "id": "c883955a", + "id": "cc74bb07", "metadata": {}, "source": [ "# 🔥🔥 Multi-Conditional Image Generation with Paella + WandB Playground 🪄🐝\n", @@ -23,7 +24,7 @@ { "cell_type": "code", "execution_count": null, - "id": "fb35d29d-ecec-4917-b876-f65b7e8def5f", + "id": "e7a9ba4f", "metadata": {}, "outputs": [], "source": [ @@ -52,7 +53,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e23328f6-12f2-498c-9322-803c67f2a00f", + "id": "2d3bef06", "metadata": {}, "outputs": [], "source": [ @@ -88,7 +89,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4693a9b6-950f-45bc-b08c-1075bf8abe62", + "id": "79d039cd", "metadata": {}, "outputs": [], "source": [ @@ -99,7 +100,7 @@ { "cell_type": "code", "execution_count": null, - "id": "172af2c6-3440-4cbb-8dc2-88f95d0ea9a0", + "id": "1b4882d7", "metadata": {}, "outputs": [], "source": [ @@ -119,7 +120,7 @@ { "cell_type": "code", "execution_count": null, - "id": "cff68bc9-8fc2-449d-994b-9642669f8d63", + "id": "5c46b95e", "metadata": {}, "outputs": [], "source": [ @@ -193,7 +194,7 @@ { "cell_type": "code", "execution_count": null, - "id": "27874a53-1e32-4206-b64f-05e58fba45ab", + "id": "1a931f01", "metadata": {}, "outputs": [], "source": [ @@ -227,7 +228,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3753c2be-cbce-4292-b73f-2701a8ad99c0", + "id": "4e177084", "metadata": {}, "outputs": [], "source": [ @@ -258,21 +259,13 @@ { "cell_type": "code", "execution_count": null, - "id": "fc9c8431-6068-4961-bc01-f128b1d88fb6", + "id": "6c50f803", "metadata": {}, "outputs": [], "source": [ "log_multi_conditioning_results(sampled)\n", "wandb.finish()" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "17a90d54-7717-402f-9086-ea40c1866adf", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/colabs/paella/Orientation-Guided-Multi-Conditioning.ipynb b/colabs/paella/Orientation-Guided-Multi-Conditioning.ipynb index fc5e35a7..f37d3592 100644 --- a/colabs/paella/Orientation-Guided-Multi-Conditioning.ipynb +++ b/colabs/paella/Orientation-Guided-Multi-Conditioning.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "9c1c12f0", "metadata": {}, "source": [ "\"Open\n", @@ -10,7 +11,7 @@ }, { "cell_type": "markdown", - "id": "d10bd631", + "id": "0876094f", "metadata": {}, "source": [ "# 🔥🔥 Orientation Guided Multi-Conditional Image Generation with Paella + WandB Playground 🪄🐝\n", @@ -23,7 +24,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c1019eb6-1b29-4ce5-95fa-6358ce0dc584", + "id": "5c73aeff", "metadata": {}, "outputs": [], "source": [ @@ -52,7 +53,7 @@ { "cell_type": "code", "execution_count": null, - "id": "94f4d23e-ff7e-4de0-a69c-ca13751b96a5", + "id": "d98ac2a7", "metadata": {}, "outputs": [], "source": [ @@ -86,7 +87,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6ca68fca-20b2-4ef5-ba7d-fb12bc906a06", + "id": "620506d5", "metadata": {}, "outputs": [], "source": [ @@ -97,7 +98,7 @@ { "cell_type": "code", "execution_count": null, - "id": "412fe0e2-032c-4154-80f6-22f4f0d53315", + "id": "2f317b6e", "metadata": {}, "outputs": [], "source": [ @@ -123,7 +124,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b30c6757-c9cf-4acc-b909-de9ce990bb29", + "id": "1ab1b0a6", "metadata": {}, "outputs": [], "source": [ @@ -196,7 +197,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4784d7a7-d0c0-4314-922a-6db000642b77", + "id": "3b79d1c9", "metadata": {}, "outputs": [], "source": [ @@ -230,7 +231,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9574e4b9-050f-47ee-8ca3-951268539d3f", + "id": "5865c4c9", "metadata": {}, "outputs": [], "source": [ @@ -277,7 +278,7 @@ { "cell_type": "code", "execution_count": null, - "id": "a8e626a6-6b86-4208-8767-101a152401cc", + "id": "8f3a239b", "metadata": {}, "outputs": [], "source": [ diff --git a/colabs/paella/Outpainting.ipynb b/colabs/paella/Outpainting.ipynb index afda36bd..253b79db 100644 --- a/colabs/paella/Outpainting.ipynb +++ b/colabs/paella/Outpainting.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "3f469752", "metadata": {}, "source": [ "\"Open\n", @@ -10,7 +11,7 @@ }, { "cell_type": "markdown", - "id": "b2903db1", + "id": "4c7732a8", "metadata": {}, "source": [ "# 🔥🔥 Image Outpainting with Paella + WandB Playground 🪄🐝\n", @@ -23,7 +24,7 @@ { "cell_type": "code", "execution_count": null, - "id": "25439e07-57df-4b4a-a095-f02fd5b82a31", + "id": "0500c760", "metadata": {}, "outputs": [], "source": [ @@ -52,7 +53,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e9f71025-a469-415e-ad7e-656080e8ef08", + "id": "086082b2", "metadata": {}, "outputs": [], "source": [ @@ -88,7 +89,7 @@ { "cell_type": "code", "execution_count": null, - "id": "a38d18fc-075f-4a05-9a66-97c8a9ae9d93", + "id": "717de70c", "metadata": {}, "outputs": [], "source": [ @@ -99,7 +100,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2c956e5b-df8b-4d4d-93d6-fbbd23523241", + "id": "7691dcc3", "metadata": {}, "outputs": [], "source": [ @@ -138,7 +139,7 @@ { "cell_type": "code", "execution_count": null, - "id": "871f433c-5c86-472d-8313-cfa74eec7a0f", + "id": "e1caa2c4", "metadata": {}, "outputs": [], "source": [ @@ -211,7 +212,7 @@ { "cell_type": "code", "execution_count": null, - "id": "536ad909-0764-44d1-bf79-b8e905a760b0", + "id": "fae7b4d1", "metadata": {}, "outputs": [], "source": [ @@ -245,7 +246,7 @@ { "cell_type": "code", "execution_count": null, - "id": "edf4cc02-c1bf-4b6b-aeef-8c121322bd9f", + "id": "ec739af3", "metadata": {}, "outputs": [], "source": [ @@ -263,7 +264,7 @@ { "cell_type": "code", "execution_count": null, - "id": "07446a5c-ac79-46cd-898b-fb29048d30a7", + "id": "b727ff64", "metadata": {}, "outputs": [], "source": [ @@ -300,21 +301,13 @@ { "cell_type": "code", "execution_count": null, - "id": "de73e4a4-8e9e-41de-9138-be7b3209db18", + "id": "14aeafb1", "metadata": {}, "outputs": [], "source": [ "log_inoutpainting_results(original_image, sampled, encoded_tokens, mask)\n", "wandb.finish()" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "cecd9119-ad2f-4968-9395-e89626fbb032", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/colabs/paella/Structural-Morphing.ipynb b/colabs/paella/Structural-Morphing.ipynb index 26a7c7a1..19ab1ef9 100644 --- a/colabs/paella/Structural-Morphing.ipynb +++ b/colabs/paella/Structural-Morphing.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "65899be7", "metadata": {}, "source": [ "\"Open\n", @@ -10,7 +11,7 @@ }, { "cell_type": "markdown", - "id": "aea68e9b", + "id": "f8776d95", "metadata": {}, "source": [ "# 🔥🔥 Structural Morphing of Images with Paella + WandB Playground 🪄🐝\n", @@ -23,7 +24,7 @@ { "cell_type": "code", "execution_count": null, - "id": "864b20f3-0e1f-4631-a3b5-b01ca229b9df", + "id": "904fb436", "metadata": {}, "outputs": [], "source": [ @@ -52,7 +53,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8441ea28-1555-4d59-b1de-56e85f48fdc5", + "id": "9f0bd618", "metadata": {}, "outputs": [], "source": [ @@ -87,7 +88,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f0814312-cca0-4486-a222-4b90c6eecfd9", + "id": "9a7f2514", "metadata": {}, "outputs": [], "source": [ @@ -98,7 +99,7 @@ { "cell_type": "code", "execution_count": null, - "id": "bcee6f91-56bd-4ff5-b042-95465e0949f5", + "id": "9031290b", "metadata": {}, "outputs": [], "source": [ @@ -123,7 +124,7 @@ { "cell_type": "code", "execution_count": null, - "id": "37526b5e-14d3-4057-99a3-9b12e1baab6a", + "id": "349f8255", "metadata": {}, "outputs": [], "source": [ @@ -196,7 +197,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15138eea-95cd-45c7-af76-d62309d266c3", + "id": "08b7c8b4", "metadata": {}, "outputs": [], "source": [ @@ -230,7 +231,7 @@ { "cell_type": "code", "execution_count": null, - "id": "58743cf3-30c0-4ddc-b008-ee847742c96b", + "id": "c10f09cc", "metadata": {}, "outputs": [], "source": [ @@ -248,7 +249,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0ee870ef-70b3-4728-8dcd-1bfdd7117246", + "id": "982332f3", "metadata": {}, "outputs": [], "source": [ @@ -277,7 +278,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f6ccceac-8531-41b0-8b73-9953f1eff480", + "id": "8a647254", "metadata": {}, "outputs": [], "source": [ diff --git a/colabs/paella/Text-Conditional.ipynb b/colabs/paella/Text-Conditional.ipynb index edf4049a..48dcf23b 100644 --- a/colabs/paella/Text-Conditional.ipynb +++ b/colabs/paella/Text-Conditional.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "ca9fdb97", "metadata": {}, "source": [ "\"Open\n", @@ -10,7 +11,7 @@ }, { "cell_type": "markdown", - "id": "d4a618d5", + "id": "8efa3520", "metadata": {}, "source": [ "# 🔥🔥 Text-Conditional Image Generation with Paella + WandB Playground 🪄🐝\n", @@ -23,7 +24,7 @@ { "cell_type": "code", "execution_count": null, - "id": "78f75e79-7ed1-4282-b6dd-b6dcb1d1e408", + "id": "756c31f4", "metadata": {}, "outputs": [], "source": [ @@ -52,7 +53,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2c3fc5a7-72ed-4a3b-8d29-ff1519ec88ba", + "id": "1b3402e6", "metadata": {}, "outputs": [], "source": [ @@ -82,7 +83,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4813a37e-31d4-403e-bfcb-d17ea7cfa995", + "id": "89d05080", "metadata": {}, "outputs": [], "source": [ @@ -93,7 +94,7 @@ { "cell_type": "code", "execution_count": null, - "id": "127b8965-3def-4d7e-9e7c-c9875bbe946f", + "id": "4b4b2e95", "metadata": {}, "outputs": [], "source": [ @@ -107,7 +108,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7d62edb2-fa7a-447c-8ecd-1f65f73adf41", + "id": "848691aa", "metadata": {}, "outputs": [], "source": [ @@ -181,7 +182,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d18f9ead-88ce-4919-950e-7f99624f9344", + "id": "b65d3b6d", "metadata": {}, "outputs": [], "source": [ @@ -215,7 +216,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0de7052a-ac91-4c57-8da2-ddbd2c1cb837", + "id": "8826240b", "metadata": {}, "outputs": [], "source": [ @@ -237,7 +238,7 @@ { "cell_type": "code", "execution_count": null, - "id": "625bb261-f76f-4a4c-b7e4-b3eee2d6f5d4", + "id": "c30cda12", "metadata": {}, "outputs": [], "source": [ diff --git a/colabs/prompts/WandB_LLM_QA_bot.ipynb b/colabs/prompts/WandB_LLM_QA_bot.ipynb index 6cf57f5b..f1924ff7 100644 --- a/colabs/prompts/WandB_LLM_QA_bot.ipynb +++ b/colabs/prompts/WandB_LLM_QA_bot.ipynb @@ -114,8 +114,16 @@ "metadata": {}, "outputs": [], "source": [ - "import wandb\n", - "wandb.login()" + "import wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/pycaret/Default_Credit_Prediction_Using_W&B_Pycaret_FastAPI.ipynb b/colabs/pycaret/Default_Credit_Prediction_Using_W&B_Pycaret_FastAPI.ipynb index 2de82091..8b1fe03e 100644 --- a/colabs/pycaret/Default_Credit_Prediction_Using_W&B_Pycaret_FastAPI.ipynb +++ b/colabs/pycaret/Default_Credit_Prediction_Using_W&B_Pycaret_FastAPI.ipynb @@ -117,8 +117,16 @@ "metadata": {}, "outputs": [], "source": [ - "import wandb\n", - "wandb.login()" + "import wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/pyg/8_Node_Classification_(with_W&B).ipynb b/colabs/pyg/8_Node_Classification_(with_W&B).ipynb index c05f8d88..7583e56b 100644 --- a/colabs/pyg/8_Node_Classification_(with_W&B).ipynb +++ b/colabs/pyg/8_Node_Classification_(with_W&B).ipynb @@ -45,8 +45,16 @@ "source": [ "enable_wandb = True\n", "if enable_wandb:\n", - " import wandb\n", - " wandb.login()" + " import wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/pyg/Graph_Classification_with_PyG_and_W&B.ipynb b/colabs/pyg/Graph_Classification_with_PyG_and_W&B.ipynb index 3614737c..acf4a2e5 100644 --- a/colabs/pyg/Graph_Classification_with_PyG_and_W&B.ipynb +++ b/colabs/pyg/Graph_Classification_with_PyG_and_W&B.ipynb @@ -580,13 +580,6 @@ "\n", "If you have any questions about using W&B to track your model performance and predictions, please contact support@wandb.com" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/colabs/pyg/point-cloud-segmentation/00_eda.ipynb b/colabs/pyg/point-cloud-segmentation/00_eda.ipynb index f4abc616..89a81516 100644 --- a/colabs/pyg/point-cloud-segmentation/00_eda.ipynb +++ b/colabs/pyg/point-cloud-segmentation/00_eda.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "5ef81b15", "metadata": {}, "source": [ "\"Open\n", @@ -11,7 +12,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "57500bde-8f7c-4e37-aff7-3aea89793d2f", + "id": "e0e9e05d", "metadata": {}, "source": [ "# 🔥🔥 Explore ShapeNet Dataset using PyTorch Geometric and Weights & Biases 🪄🐝\n", @@ -31,7 +32,7 @@ }, { "cell_type": "markdown", - "id": "3fffd9cb-f1c6-4b1b-9de9-dc66f7d09f1f", + "id": "281c2001", "metadata": {}, "source": [ "# Install Required Packages" @@ -40,7 +41,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f7fc4b41-2b6f-4624-bdd7-dfa2decae773", + "id": "d0ee5b86", "metadata": {}, "outputs": [], "source": [ @@ -53,7 +54,7 @@ { "cell_type": "code", "execution_count": null, - "id": "45e8441e-3b6d-4b24-abb9-deccbcc6c64a", + "id": "f5a3bf42", "metadata": {}, "outputs": [], "source": [ @@ -66,7 +67,7 @@ }, { "cell_type": "markdown", - "id": "8040f1cd-cf19-4028-a1f6-0009c8dda7aa", + "id": "b4907ca7", "metadata": {}, "source": [ "## Import Libraries" @@ -75,7 +76,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f110ae50-84a9-4bb0-83d5-7f45f21a2b61", + "id": "740d3dca", "metadata": {}, "outputs": [], "source": [ @@ -99,7 +100,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2a83ea19-e50e-410c-a539-6eccdb68ae71", + "id": "61fa78fc", "metadata": {}, "outputs": [], "source": [ @@ -119,7 +120,7 @@ }, { "cell_type": "markdown", - "id": "35c96004-0dad-420d-b5f3-e01795d563ee", + "id": "21bdbd6e", "metadata": {}, "source": [ "## Visualize Train-Val Dataset" @@ -128,7 +129,7 @@ { "cell_type": "code", "execution_count": null, - "id": "010dba04-fa80-4a2b-9176-dadeeb23ca65", + "id": "728f4672", "metadata": {}, "outputs": [], "source": [ @@ -145,7 +146,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f29f2964-31eb-4375-a1f4-c5bb120b8232", + "id": "f0660a21", "metadata": {}, "outputs": [], "source": [ @@ -171,7 +172,7 @@ { "cell_type": "code", "execution_count": null, - "id": "33adc481-bc6f-4773-adc9-1d4fb6a82e2d", + "id": "c11cd1ea", "metadata": {}, "outputs": [], "source": [ @@ -187,7 +188,7 @@ }, { "cell_type": "markdown", - "id": "2e71c385-1b0f-428c-8559-e2fac1816261", + "id": "6704bb3a", "metadata": {}, "source": [ "## Visualize Test Dataset" @@ -196,7 +197,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f893cc65-779b-426b-bc9c-397c7894957a", + "id": "49b04315", "metadata": {}, "outputs": [], "source": [ @@ -211,7 +212,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c8001b95-57d0-43b5-9092-d874ddbca536", + "id": "714a6358", "metadata": {}, "outputs": [], "source": [ @@ -236,7 +237,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e8cbb460-32ca-4814-9890-11cee3f3e22a", + "id": "bbf6c666", "metadata": {}, "outputs": [], "source": [ @@ -253,20 +254,12 @@ { "cell_type": "code", "execution_count": null, - "id": "d592a642-4ca7-40f8-af69-0a8b3b2693a8", + "id": "4e179ef9", "metadata": {}, "outputs": [], "source": [ "wandb.finish()" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e7364297-0751-4dc5-9661-4043d243018c", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/colabs/pyg/point-cloud-segmentation/01_dgcnn_train.ipynb b/colabs/pyg/point-cloud-segmentation/01_dgcnn_train.ipynb index fd725f09..be383d14 100644 --- a/colabs/pyg/point-cloud-segmentation/01_dgcnn_train.ipynb +++ b/colabs/pyg/point-cloud-segmentation/01_dgcnn_train.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "dc42e2f0", "metadata": {}, "source": [ "\"Open\n", @@ -11,7 +12,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "cdbbe526-23ee-4a90-af8c-9f7092f50192", + "id": "ed3fb916", "metadata": {}, "source": [ "# 🔥🔥 Train DGCNN Model using PyTorch Geometric and Weights & Biases 🪄🐝\n", @@ -29,7 +30,7 @@ }, { "cell_type": "markdown", - "id": "ed5c4e28-f40c-4c6a-827a-e28ff6db0501", + "id": "abb4ac7d", "metadata": {}, "source": [ "# Install Required Packages" @@ -38,7 +39,7 @@ { "cell_type": "code", "execution_count": null, - "id": "726d3107-dd0a-440a-a164-43b708af858f", + "id": "b3b74df1", "metadata": {}, "outputs": [], "source": [ @@ -51,7 +52,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4ebab99e-383f-410d-93a0-c8ff29dad2de", + "id": "7b0e8997", "metadata": {}, "outputs": [], "source": [ @@ -64,7 +65,7 @@ }, { "cell_type": "markdown", - "id": "dec996ce-98b9-465d-b530-e0f88ac7080d", + "id": "6a6c41d3", "metadata": {}, "source": [ "## Import Libraries" @@ -73,7 +74,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c7f2a4cd-24e6-457d-b7f6-7fc94912fd12", + "id": "8256d0a4", "metadata": {}, "outputs": [], "source": [ @@ -97,7 +98,7 @@ }, { "cell_type": "markdown", - "id": "c71deb4e-f171-4a31-9801-a0e8f9cc9c04", + "id": "bd995109", "metadata": {}, "source": [ "# Initialize Weights & Biases\n", @@ -108,7 +109,7 @@ { "cell_type": "code", "execution_count": null, - "id": "a1053dec-59c9-4e9d-84e2-a1297dbf715f", + "id": "598d0d2f", "metadata": {}, "outputs": [], "source": [ @@ -147,7 +148,7 @@ }, { "cell_type": "markdown", - "id": "57a72e20-ff1b-4cdf-b87f-10baff51bac7", + "id": "21b2e1dc", "metadata": {}, "source": [ "# Load ShapeNet Dataset using PyTorch Geometric\n", @@ -158,7 +159,7 @@ { "cell_type": "code", "execution_count": null, - "id": "42955441-03d8-4e9f-b856-7d7edc708981", + "id": "901b65e4", "metadata": {}, "outputs": [], "source": [ @@ -174,7 +175,7 @@ { "cell_type": "code", "execution_count": null, - "id": "da27bf0f-1e0f-438d-b602-2124606e7138", + "id": "593299c7", "metadata": {}, "outputs": [], "source": [ @@ -188,7 +189,7 @@ }, { "cell_type": "markdown", - "id": "3e335648-933d-4865-aaaf-0f24c0358d16", + "id": "ff838a22", "metadata": {}, "source": [ "Now, we need to offset the segmentation labels" @@ -197,7 +198,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9e14c4af-b5af-4805-a479-f0725d6ef1de", + "id": "bc1bfc6c", "metadata": {}, "outputs": [], "source": [ @@ -217,7 +218,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10e09ff7-e64b-4912-a30b-461d5eb72cb2", + "id": "d9589c7a", "metadata": {}, "outputs": [], "source": [ @@ -229,7 +230,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8bee5029-3bdd-4fa8-9a37-4d024c0cf6b5", + "id": "b3aa8265", "metadata": {}, "outputs": [], "source": [ @@ -249,7 +250,7 @@ }, { "cell_type": "markdown", - "id": "6d1bc429-7d54-411a-b477-b7bbc7bfc3d6", + "id": "f5e0dc65", "metadata": {}, "source": [ "# Implementing the DGCNN Model using PyTorch Geometric" @@ -258,7 +259,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ac1bd7a8-585f-45fc-bc59-a8feec3512db", + "id": "fd50b563", "metadata": {}, "outputs": [], "source": [ @@ -290,7 +291,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5a677e7b-9179-4c71-b1dc-af636dcf95d5", + "id": "f69f1f9a", "metadata": {}, "outputs": [], "source": [ @@ -309,7 +310,7 @@ }, { "cell_type": "markdown", - "id": "3d94603b-51df-4299-ae69-f43617827d1c", + "id": "b9ec4b0b", "metadata": {}, "source": [ "# Training DGCNN and Logging Metrics on Weights & Biases" @@ -318,7 +319,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1a9fbe02-b095-4df7-ab6b-af0dd6a411bf", + "id": "6e8d5486", "metadata": {}, "outputs": [], "source": [ @@ -381,7 +382,7 @@ { "cell_type": "code", "execution_count": null, - "id": "94d7e550-19b1-40b8-9e01-2997a694a4fa", + "id": "28a8c910", "metadata": {}, "outputs": [], "source": [ @@ -441,7 +442,7 @@ { "cell_type": "code", "execution_count": null, - "id": "da4fad76-f961-4a0a-9f37-c72c1422e885", + "id": "04abc40e", "metadata": {}, "outputs": [], "source": [ @@ -507,7 +508,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e8c31a63-96a4-409f-bb0d-2b56b034adf0", + "id": "23998504", "metadata": {}, "outputs": [], "source": [ @@ -533,7 +534,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6f013430-fd25-4541-9dd3-3c67eef9074a", + "id": "43338109", "metadata": {}, "outputs": [], "source": [ @@ -558,7 +559,7 @@ { "cell_type": "code", "execution_count": null, - "id": "000c8944-6821-43a3-94d2-548f8b174334", + "id": "30145cd7", "metadata": {}, "outputs": [], "source": [ @@ -567,7 +568,7 @@ }, { "cell_type": "markdown", - "id": "b093dee2-e902-4ad8-af3c-d7126ef985c1", + "id": "87f7fe22", "metadata": {}, "source": [ "Next, you can check out the following notebook to learn how to evaluate the model on the ShapeNetCore dataset using Weights & Biases, you can check out the following notebook:\n", diff --git a/colabs/pyg/point-cloud-segmentation/02_dgcnn_evaluate.ipynb b/colabs/pyg/point-cloud-segmentation/02_dgcnn_evaluate.ipynb index 86c581af..35b6f0c0 100644 --- a/colabs/pyg/point-cloud-segmentation/02_dgcnn_evaluate.ipynb +++ b/colabs/pyg/point-cloud-segmentation/02_dgcnn_evaluate.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "025dae63", "metadata": {}, "source": [ "\"Open\n", @@ -11,7 +12,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "9385afd2-4e70-4fc3-820b-576bdf035e42", + "id": "1b4e7314", "metadata": {}, "source": [ "# 🔥🔥 Evaluate DGCNN Model Weights & Biases 🪄🐝\n", @@ -27,7 +28,7 @@ }, { "cell_type": "markdown", - "id": "1e79b763-e197-4a1d-9f3f-f89d873c12ed", + "id": "89db1834", "metadata": {}, "source": [ "# Install Required Packages" @@ -36,7 +37,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4e46eac0-9bf8-4c3b-906a-ca56343810ea", + "id": "a77aebd4", "metadata": {}, "outputs": [], "source": [ @@ -49,7 +50,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e6a87ef2-abf9-4d71-a4ae-d58e3ebba706", + "id": "be70eea7", "metadata": {}, "outputs": [], "source": [ @@ -62,7 +63,7 @@ }, { "cell_type": "markdown", - "id": "267f5a6e-fedd-48ef-9861-66e8ffb9fbbd", + "id": "7f81a149", "metadata": {}, "source": [ "## Import Libraries" @@ -71,7 +72,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1d8cf4d8-6839-47fb-b789-857997e15107", + "id": "4b0853b8", "metadata": {}, "outputs": [], "source": [ @@ -95,7 +96,7 @@ }, { "cell_type": "markdown", - "id": "0643258d-84be-4a10-a67e-0cf0331e07aa", + "id": "ff18b9d1", "metadata": {}, "source": [ "# Initialize Weights & Biases\n", @@ -106,7 +107,7 @@ { "cell_type": "code", "execution_count": null, - "id": "308fec6f-7397-4c9d-a83d-c7b5632053b6", + "id": "f6e08809", "metadata": {}, "outputs": [], "source": [ @@ -145,7 +146,7 @@ }, { "cell_type": "markdown", - "id": "c6f068eb-5b69-4c73-9e21-4f0d5eab0d9e", + "id": "d5c4fb1f", "metadata": {}, "source": [ "# Load ShapeNet Dataset using PyTorch Geometric\n", @@ -156,7 +157,7 @@ { "cell_type": "code", "execution_count": null, - "id": "208dc676-c0bf-4a3d-bc5a-5b4251664de9", + "id": "9aa02f55", "metadata": {}, "outputs": [], "source": [ @@ -172,7 +173,7 @@ { "cell_type": "code", "execution_count": null, - "id": "27014b34-cfff-47be-b761-db8c484dad97", + "id": "46d88e72", "metadata": {}, "outputs": [], "source": [ @@ -191,7 +192,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e8d0e1df-7522-485b-b697-3dd066d85f8c", + "id": "65a3b8f1", "metadata": {}, "outputs": [], "source": [ @@ -213,7 +214,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2c67c4fa-92b4-495a-a247-c4e8a6c56d79", + "id": "ca65ff1d", "metadata": {}, "outputs": [], "source": [ @@ -226,7 +227,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5147d1c3-9772-4be9-8932-c9238a9270ef", + "id": "10bb8aeb", "metadata": {}, "outputs": [], "source": [ @@ -242,7 +243,7 @@ }, { "cell_type": "markdown", - "id": "1ce1b72e-cd7b-452f-ac08-dc2155561077", + "id": "29385a29", "metadata": {}, "source": [ "# Load Checkpoint" @@ -251,7 +252,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d8a02d75-be84-436a-871c-ccadb863dc95", + "id": "aad21a2d", "metadata": {}, "outputs": [], "source": [ @@ -282,7 +283,7 @@ }, { "cell_type": "markdown", - "id": "73c1f670-2a87-4083-9ec7-ed07785a5d66", + "id": "d86f033f", "metadata": {}, "source": [ "Since we saved the checkpoints as artifacts on our Weights & Biases workspace, we can now fetch and load them." @@ -291,7 +292,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22cb064c-58b4-4887-b31b-d16622405b94", + "id": "c733c60c", "metadata": {}, "outputs": [], "source": [ @@ -312,7 +313,7 @@ }, { "cell_type": "markdown", - "id": "a676bbf9-50be-4a99-9222-427c927bbe57", + "id": "dbfc3726", "metadata": {}, "source": [ "# Evaluation" @@ -321,7 +322,7 @@ { "cell_type": "code", "execution_count": null, - "id": "98e79961-e71c-4a2f-9040-72c6a5b32f09", + "id": "25bf63ea", "metadata": {}, "outputs": [], "source": [ @@ -392,7 +393,7 @@ }, { "cell_type": "markdown", - "id": "e0c5f0bb-01b1-48a3-980d-c22434f2be8a", + "id": "d19e4e20", "metadata": {}, "source": [ "We evaluate the results and store them in a Weights & Biases Table." @@ -401,7 +402,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9427209c-216a-4887-9b3b-a73c474680c4", + "id": "46f08bc2", "metadata": {}, "outputs": [], "source": [ @@ -413,7 +414,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e1688f54-ef52-4f42-a6f6-b5c3364da87c", + "id": "21b13d76", "metadata": {}, "outputs": [], "source": [ @@ -423,20 +424,12 @@ { "cell_type": "code", "execution_count": null, - "id": "3dfd8610-bda8-4e7b-9939-b1e7b7b4e603", + "id": "45a26573", "metadata": {}, "outputs": [], "source": [ "wandb.finish()" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "87c44316-de14-4031-b6d2-ba8c9621a66d", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/colabs/pyg/pointnet-classification/00_eda.ipynb b/colabs/pyg/pointnet-classification/00_eda.ipynb index 5bf56f5e..03e4fe77 100644 --- a/colabs/pyg/pointnet-classification/00_eda.ipynb +++ b/colabs/pyg/pointnet-classification/00_eda.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "e0dda925", "metadata": {}, "source": [ "\"Open\n", @@ -11,7 +12,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "927cc500", + "id": "411e99e0", "metadata": {}, "source": [ "# 🔥🔥 Explore ModelNet Datasets using PyTorch Geometric and Weights & Biases 🪄🐝\n", @@ -22,7 +23,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "73b8e385", + "id": "ea78427f", "metadata": {}, "source": [ "## Install Required Libraries" @@ -31,7 +32,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c0e582cd-7d1e-4223-8352-a0b1a05f5adf", + "id": "4e0fd350", "metadata": {}, "outputs": [], "source": [ @@ -44,7 +45,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "3441841e", + "id": "46b62412", "metadata": {}, "source": [ "We now install PyTorch Geometric according to our PyTorch Version. We also install Weights & Biases." @@ -53,7 +54,7 @@ { "cell_type": "code", "execution_count": null, - "id": "cb6c2bcc", + "id": "64b6b879", "metadata": {}, "outputs": [], "source": [ @@ -67,7 +68,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "8f2cdd46", + "id": "448482ba", "metadata": {}, "source": [ "### Import Libraries" @@ -76,7 +77,7 @@ { "cell_type": "code", "execution_count": null, - "id": "bac559ed-172f-4fd3-b558-b0a659764fd0", + "id": "dae389f0", "metadata": {}, "outputs": [], "source": [ @@ -105,7 +106,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "1bd2a644", + "id": "09a1bfdd", "metadata": {}, "source": [ "## Initialize Weights & Biases\n", @@ -116,7 +117,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8aa7a7d3-e763-4d9e-a05b-1ffde74ef569", + "id": "faf958d9", "metadata": {}, "outputs": [], "source": [ @@ -146,7 +147,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "5ce9b4f7", + "id": "7d3f67fd", "metadata": {}, "source": [ "## Load ModelNet Dataset using PyTorch Geometric" @@ -155,7 +156,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1573fe1d-c5d5-437f-a743-33432c45a1ff", + "id": "2812970c", "metadata": {}, "outputs": [], "source": [ @@ -180,7 +181,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "e76ffe8d", + "id": "b372188e", "metadata": {}, "source": [ "## Log Data to [`wandb.Table`](https://docs.wandb.ai/ref/python/data-types/table)\n", @@ -191,7 +192,7 @@ { "cell_type": "code", "execution_count": null, - "id": "274115e0-d682-48e0-a701-f4e8d3c276cc", + "id": "df976f6b", "metadata": {}, "outputs": [], "source": [ @@ -220,7 +221,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7c7b7c12-3e1e-4f7b-b917-a213a488a910", + "id": "880285e4", "metadata": {}, "outputs": [], "source": [ @@ -250,7 +251,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f6f51d44-ed23-4dc0-996e-6efc3c4f7fa6", + "id": "23e19ecb", "metadata": {}, "outputs": [], "source": [ @@ -260,19 +261,13 @@ { "attachments": {}, "cell_type": "markdown", - "id": "b992544b", + "id": "45f51adf", "metadata": {}, "source": [ "Next, you can check out the following notebook to learn how to compare different sampling strategies in PyTorch Geometric using Weights & Biases\n", "\n", "[![](https://colab.research.google.com/assets/colab-badge.svg)](http://wandb.me/pyg-sampling)" ] - }, - { - "cell_type": "markdown", - "id": "5ad6bd7f", - "metadata": {}, - "source": [] } ], "metadata": { diff --git a/colabs/pyg/pointnet-classification/01_compare_sampling.ipynb b/colabs/pyg/pointnet-classification/01_compare_sampling.ipynb index 866be074..41c86d30 100644 --- a/colabs/pyg/pointnet-classification/01_compare_sampling.ipynb +++ b/colabs/pyg/pointnet-classification/01_compare_sampling.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "540308b6", "metadata": {}, "source": [ "\"Open\n", @@ -11,7 +12,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "75ae3439", + "id": "25d4dadf", "metadata": {}, "source": [ "# 🔥🔥 Explore Graph Sampling Techniques using PyTorch Geometric and Weights & Biases 🪄🐝\n", @@ -26,7 +27,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "24bc9d53", + "id": "5cfeb685", "metadata": {}, "source": [ "## Install Required Libraries" @@ -35,7 +36,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9fbbb85e", + "id": "afcd8d80", "metadata": {}, "outputs": [], "source": [ @@ -48,7 +49,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "0387e2f3", + "id": "7636011f", "metadata": {}, "source": [ "We now install PyTorch Geometric according to our PyTorch Version. We also install Weights & Biases." @@ -57,7 +58,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "54e2c502", + "id": "0f6e59e4", "metadata": {}, "source": [ "!pip install -q torch-scatter -f https://data.pyg.org/whl/torch-${TORCH}.html\n", @@ -70,7 +71,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "e5e35477", + "id": "3bcb92b9", "metadata": {}, "source": [ "### Import Libraries" @@ -79,7 +80,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9899f13f-7d7b-43c2-873a-07d23b75a1f2", + "id": "5dc0e1de", "metadata": {}, "outputs": [], "source": [ @@ -106,7 +107,7 @@ { "cell_type": "code", "execution_count": null, - "id": "581da785-44d2-4618-b84e-aaed217f0da6", + "id": "80da5344", "metadata": {}, "outputs": [], "source": [ @@ -134,7 +135,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "e19aa6d1", + "id": "3ebaa86f", "metadata": {}, "source": [ "We take a single point cloud from the dataset and compare the KNN-sampled subgraph and radius-sampled subgraph by visualizing the subgraphs as [`wandb.Html`](https://docs.wandb.ai/ref/python/data-types/html) on a [Weights & Biases Table](https://docs.wandb.ai/guides/data-vis)." @@ -143,7 +144,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e550b0c2-7b97-469b-b8da-dfb234ddfd65", + "id": "2a51314d", "metadata": {}, "outputs": [], "source": [ @@ -183,7 +184,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "1441daab", + "id": "6d2ade6e", "metadata": {}, "source": [ "Next, you can check out the following notebook to learn how to train the PointNet++ architecture using PyTorch Geometric and Weights & Biases\n", diff --git a/colabs/pyg/pointnet-classification/02_pointnet_plus_plus.ipynb b/colabs/pyg/pointnet-classification/02_pointnet_plus_plus.ipynb index 26b36c74..f3affb99 100644 --- a/colabs/pyg/pointnet-classification/02_pointnet_plus_plus.ipynb +++ b/colabs/pyg/pointnet-classification/02_pointnet_plus_plus.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "b93b0d32", "metadata": {}, "source": [ "\"Open\n", @@ -11,7 +12,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "8e6f5935", + "id": "3391c082", "metadata": {}, "source": [ "# 🔥🔥 Train PointNet++ Model using PyTorch Geometric and Weights & Biases 🪄🐝\n", @@ -28,7 +29,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "183d8da6", + "id": "ee4f787d", "metadata": {}, "source": [ "## Install Required Libraries" @@ -37,7 +38,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5349d817", + "id": "ecf9a745", "metadata": {}, "outputs": [], "source": [ @@ -51,7 +52,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "c2d4ce37", + "id": "48bb8716", "metadata": {}, "source": [ "We now install PyTorch Geometric according to our PyTorch Version. We also install Weights & Biases." @@ -60,7 +61,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5ed00f24", + "id": "f039fce2", "metadata": {}, "outputs": [], "source": [ @@ -74,7 +75,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "3fcc8028", + "id": "e1eadafb", "metadata": {}, "source": [ "### Import Libraries" @@ -83,7 +84,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c38e68ed-e9b6-452b-913a-42af29cf3b43", + "id": "a89ab10a", "metadata": {}, "outputs": [], "source": [ @@ -105,7 +106,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "0393be9e", + "id": "014483ac", "metadata": {}, "source": [ "## Initialize Weights & Biases\n", @@ -116,7 +117,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f345aa58-8ed3-4d0b-9925-8d776d6576d6", + "id": "cd601943", "metadata": {}, "outputs": [], "source": [ @@ -162,7 +163,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "3c06b3dc", + "id": "1e3b7a37", "metadata": {}, "source": [ "## Load ModelNet Dataset using PyTorch Geometric\n", @@ -173,7 +174,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0a7c5bb1-30e1-4a6e-be2a-a375172d5571", + "id": "6aaa6cb3", "metadata": {}, "outputs": [], "source": [ @@ -224,7 +225,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "f8b2e6e4", + "id": "2896d8b7", "metadata": {}, "source": [ "## Implementing the PointNet++ Model using PyTorch Geometric" @@ -233,7 +234,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1724081c-235a-49ff-9d1f-0ec2e06e1a4e", + "id": "93ad63ad", "metadata": {}, "outputs": [], "source": [ @@ -258,7 +259,7 @@ { "cell_type": "code", "execution_count": null, - "id": "17957287-c51c-469f-9669-302d6b9903e1", + "id": "63c5d023", "metadata": {}, "outputs": [], "source": [ @@ -278,7 +279,7 @@ { "cell_type": "code", "execution_count": null, - "id": "27720171-ca69-49a2-9c7f-66cbc4527e67", + "id": "95a8a971", "metadata": {}, "outputs": [], "source": [ @@ -318,7 +319,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "f9b7e440", + "id": "5c4ec58d", "metadata": {}, "source": [ "## Training PointNet++ and Logging Metrics on Weights & Biases" @@ -327,7 +328,7 @@ { "cell_type": "code", "execution_count": null, - "id": "33cf5e53-6cdc-4037-b126-1201ed711205", + "id": "12aa9785", "metadata": {}, "outputs": [], "source": [ @@ -349,7 +350,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f86c06ce-d212-48b0-984f-bc14582cf716", + "id": "d7d4ab03", "metadata": {}, "outputs": [], "source": [ @@ -463,7 +464,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3d7e93ee-7346-4119-91d5-9871880e2507", + "id": "a7dbb213", "metadata": {}, "outputs": [], "source": [ @@ -488,7 +489,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3b035bac-8438-4f96-a545-3f0e071490c4", + "id": "40ad9e4a", "metadata": {}, "outputs": [], "source": [ @@ -498,7 +499,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "05441179", + "id": "31a8a8ac", "metadata": {}, "source": [ "Next, you can check out the following notebook to learn how to run a hyperparameter sweep on our PointNet++ trainig loop using Weights & Biases:\n", diff --git a/colabs/pyg/pointnet-classification/03_sweep.ipynb b/colabs/pyg/pointnet-classification/03_sweep.ipynb index 09845a97..3a8b58cf 100644 --- a/colabs/pyg/pointnet-classification/03_sweep.ipynb +++ b/colabs/pyg/pointnet-classification/03_sweep.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "15297b92", "metadata": {}, "source": [ "\"Open\n", @@ -11,7 +12,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "d767398b", + "id": "f529ef25", "metadata": {}, "source": [ "# 🔥🔥 Run a Hyperparamter Sweep on PointNet++ 🪄🐝\n", @@ -26,7 +27,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "9803ffe3", + "id": "f036a8ae", "metadata": {}, "source": [ "## Install Required Libraries" @@ -35,7 +36,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f9004c21-0b42-4935-837c-9f4197ded5ec", + "id": "3ae1ec82", "metadata": {}, "outputs": [], "source": [ @@ -49,7 +50,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "5dac741c", + "id": "5b7d66c7", "metadata": {}, "source": [ "We now install PyTorch Geometric according to our PyTorch Version. We also install Weights & Biases." @@ -58,7 +59,7 @@ { "cell_type": "code", "execution_count": null, - "id": "28080574", + "id": "19b5b8bf", "metadata": {}, "outputs": [], "source": [ @@ -72,7 +73,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "749b65e3", + "id": "22b0c260", "metadata": {}, "source": [ "### Import Libraries" @@ -81,7 +82,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ffe08f49-0e83-42b7-b404-54955ca98620", + "id": "c3f71cab", "metadata": {}, "outputs": [], "source": [ @@ -103,7 +104,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "ae6f606d", + "id": "df67a2eb", "metadata": {}, "source": [ "## Function to Build Data Loaders" @@ -112,7 +113,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8eaf84b3-9588-4b6b-82b6-b7e276021ab0", + "id": "1a450c72", "metadata": {}, "outputs": [], "source": [ @@ -154,7 +155,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "c7527796", + "id": "cd89a7b6", "metadata": {}, "source": [ "## Implementing the PointNet++ Model using PyTorch Geometric" @@ -163,7 +164,7 @@ { "cell_type": "code", "execution_count": null, - "id": "101cf26d-76ed-4174-8061-c06d7ea480be", + "id": "4efd3a6c", "metadata": {}, "outputs": [], "source": [ @@ -225,7 +226,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "25e67ec4", + "id": "b990617a", "metadata": {}, "source": [ "## Define a Training Function Instrumented with WandB" @@ -234,7 +235,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ba5b572b-5041-4fa4-af37-028e5ff2c49d", + "id": "d790cb58", "metadata": {}, "outputs": [], "source": [ @@ -358,7 +359,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "f2767675", + "id": "e6671e92", "metadata": {}, "source": [ "## Start the Hyperparameter Sweep" @@ -367,7 +368,7 @@ { "cell_type": "code", "execution_count": null, - "id": "cec4ce63-92bb-4fa0-a04e-81ba9b7337f2", + "id": "d31702ae", "metadata": {}, "outputs": [], "source": [ @@ -393,14 +394,6 @@ "# Run Sweep\n", "wandb.agent(sweep_id, function=train, count=30)" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5680dad3-f42c-4b6a-b3e7-4dedeedfc88c", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/colabs/pytorch-lightning/Fine_tuning_a_Transformer_with_Pytorch_Lightning.ipynb b/colabs/pytorch-lightning/Fine_tuning_a_Transformer_with_Pytorch_Lightning.ipynb index ac7348d7..08f30055 100644 --- a/colabs/pytorch-lightning/Fine_tuning_a_Transformer_with_Pytorch_Lightning.ipynb +++ b/colabs/pytorch-lightning/Fine_tuning_a_Transformer_with_Pytorch_Lightning.ipynb @@ -102,12 +102,18 @@ "source": [ "\"\"\"\n", "Note that if you are using W&B local you will need to pass the url of your W&B \n", - "deployment to wandb.login through the host keyword argument.\n", "\n", "For example:\n", - "wandb.login(host=\"api.wandb.ai\")\n", - "\"\"\"\n", - "wandb.login()" + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { @@ -456,11 +462,6 @@ "\"sweeps-diagram\"\n" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [] - }, { "cell_type": "markdown", "metadata": {}, diff --git a/colabs/pytorch-lightning/Image_Classification_using_PyTorch_Lightning.ipynb b/colabs/pytorch-lightning/Image_Classification_using_PyTorch_Lightning.ipynb index ecd1ffa8..53c8e7d1 100644 --- a/colabs/pytorch-lightning/Image_Classification_using_PyTorch_Lightning.ipynb +++ b/colabs/pytorch-lightning/Image_Classification_using_PyTorch_Lightning.ipynb @@ -96,7 +96,7 @@ "metadata": {}, "outputs": [], "source": [ - "wandb.login()" + "!wandb login" ] }, { @@ -440,13 +440,6 @@ "- [Step-by-step walk-through](https://pytorch-lightning.readthedocs.io/en/latest/starter/introduction.html) - This is one of the official tutorials. Their documentation is really well written and I highly encourage it as a good learning resource.\n", "- [Use Pytorch Lightning with Weights & Biases](https://wandb.me/lightning) - This is a quick colab that you can run through to learn more about how to use W&B with PyTorch Lightning." ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/colabs/pytorch-lightning/Optimize_Pytorch_Lightning_models_with_Weights_&_Biases.ipynb b/colabs/pytorch-lightning/Optimize_Pytorch_Lightning_models_with_Weights_&_Biases.ipynb index 268e5dbb..364a36df 100644 --- a/colabs/pytorch-lightning/Optimize_Pytorch_Lightning_models_with_Weights_&_Biases.ipynb +++ b/colabs/pytorch-lightning/Optimize_Pytorch_Lightning_models_with_Weights_&_Biases.ipynb @@ -80,8 +80,16 @@ "metadata": {}, "outputs": [], "source": [ - "import wandb\n", - "wandb.login()" + "import wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/pytorch-lightning/Profile_PyTorch_Code.ipynb b/colabs/pytorch-lightning/Profile_PyTorch_Code.ipynb index 573e035a..82694b32 100644 --- a/colabs/pytorch-lightning/Profile_PyTorch_Code.ipynb +++ b/colabs/pytorch-lightning/Profile_PyTorch_Code.ipynb @@ -113,9 +113,16 @@ "# drop slow mirror from list of MNIST mirrors\n", "torchvision.datasets.MNIST.mirrors = [mirror for mirror in torchvision.datasets.MNIST.mirrors\n", " if not mirror.startswith(\"http://yann.lecun.com\")]\n", - " \n", - "# login to W&B\n", - "wandb.login()" + " " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/pytorch-lightning/Supercharge_your_Training_with_Pytorch_Lightning_and_Weights_and_Biases.ipynb b/colabs/pytorch-lightning/Supercharge_your_Training_with_Pytorch_Lightning_and_Weights_and_Biases.ipynb index 36b8af7a..e5a6e36a 100644 --- a/colabs/pytorch-lightning/Supercharge_your_Training_with_Pytorch_Lightning_and_Weights_and_Biases.ipynb +++ b/colabs/pytorch-lightning/Supercharge_your_Training_with_Pytorch_Lightning_and_Weights_and_Biases.ipynb @@ -19,11 +19,6 @@ "# ⚡ 💘 🏋️‍♀️ Supercharge your Training with PyTorch Lightning + Weights & Biases" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [] - }, { "cell_type": "markdown", "metadata": {}, @@ -155,9 +150,16 @@ "import wandb\n", "\n", "# ⚡ 🤝 🏋️‍♀️\n", - "from pytorch_lightning.loggers import WandbLogger\n", - "\n", - "wandb.login()" + "from pytorch_lightning.loggers import WandbLogger\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/pytorch-lightning/Transfer_Learning_Using_PyTorch_Lightning.ipynb b/colabs/pytorch-lightning/Transfer_Learning_Using_PyTorch_Lightning.ipynb index 3a4a744b..d7338d03 100644 --- a/colabs/pytorch-lightning/Transfer_Learning_Using_PyTorch_Lightning.ipynb +++ b/colabs/pytorch-lightning/Transfer_Learning_Using_PyTorch_Lightning.ipynb @@ -89,7 +89,7 @@ "metadata": {}, "outputs": [], "source": [ - "wandb.login()" + "!wandb login" ] }, { diff --git a/colabs/pytorch-lightning/Wandb_End_to_End_with_PyTorch_Lightning.ipynb b/colabs/pytorch-lightning/Wandb_End_to_End_with_PyTorch_Lightning.ipynb index fdd961eb..8e06d5f8 100644 --- a/colabs/pytorch-lightning/Wandb_End_to_End_with_PyTorch_Lightning.ipynb +++ b/colabs/pytorch-lightning/Wandb_End_to_End_with_PyTorch_Lightning.ipynb @@ -62,8 +62,16 @@ "metadata": {}, "outputs": [], "source": [ - "import wandb\n", - "wandb.login()" + "import wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/pytorch/How_does_adding_dropout_affect_model_performance.ipynb b/colabs/pytorch/How_does_adding_dropout_affect_model_performance.ipynb index 41b8b3fa..b3e47ed0 100644 --- a/colabs/pytorch/How_does_adding_dropout_affect_model_performance.ipynb +++ b/colabs/pytorch/How_does_adding_dropout_affect_model_performance.ipynb @@ -48,9 +48,16 @@ "metadata": {}, "outputs": [], "source": [ - "import wandb\n", - "\n", - "wandb.login()" + "import wandb\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/pytorch/Organizing_Hyperparameter_Sweeps_in_PyTorch_with_W&B.ipynb b/colabs/pytorch/Organizing_Hyperparameter_Sweeps_in_PyTorch_with_W&B.ipynb index 00a27712..249fb14a 100644 --- a/colabs/pytorch/Organizing_Hyperparameter_Sweeps_in_PyTorch_with_W&B.ipynb +++ b/colabs/pytorch/Organizing_Hyperparameter_Sweeps_in_PyTorch_with_W&B.ipynb @@ -98,9 +98,16 @@ "metadata": {}, "outputs": [], "source": [ - "import wandb\n", - "\n", - "wandb.login()" + "import wandb\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/pytorch/Simple_PyTorch_Integration.ipynb b/colabs/pytorch/Simple_PyTorch_Integration.ipynb index f261fd3a..65da1292 100644 --- a/colabs/pytorch/Simple_PyTorch_Integration.ipynb +++ b/colabs/pytorch/Simple_PyTorch_Integration.ipynb @@ -186,9 +186,16 @@ "metadata": {}, "outputs": [], "source": [ - "import wandb\n", - "\n", - "wandb.login()" + "import wandb\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/raytune/RayTune_with_wandb.ipynb b/colabs/raytune/RayTune_with_wandb.ipynb index 42b6e272..b5489dd2 100644 --- a/colabs/raytune/RayTune_with_wandb.ipynb +++ b/colabs/raytune/RayTune_with_wandb.ipynb @@ -135,7 +135,7 @@ "metadata": {}, "outputs": [], "source": [ - "wandb.login()" + "!wandb login" ] }, { diff --git a/colabs/raytune/tune-wandb.ipynb b/colabs/raytune/tune-wandb.ipynb index dd141d2c..6c176a77 100644 --- a/colabs/raytune/tune-wandb.ipynb +++ b/colabs/raytune/tune-wandb.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "76091766", "metadata": {}, "source": [ "\"Open\n" @@ -9,7 +10,7 @@ }, { "cell_type": "markdown", - "id": "ecad719c", + "id": "c280549a", "metadata": {}, "source": [ "# Using Weights & Biases with Tune\n", @@ -40,7 +41,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3db798d0", + "id": "dcdda930", "metadata": {}, "outputs": [], "source": [ @@ -50,7 +51,7 @@ { "cell_type": "code", "execution_count": null, - "id": "100bcf8a", + "id": "15e30d7a", "metadata": {}, "outputs": [], "source": [ @@ -70,16 +71,16 @@ { "cell_type": "code", "execution_count": null, - "id": "1b4dcae4", + "id": "5c6020e1", "metadata": {}, "outputs": [], "source": [ - "wandb.login()" + "!wandb login" ] }, { "cell_type": "markdown", - "id": "9346c0f6", + "id": "51d2fa38", "metadata": {}, "source": [ "Next, let's define an easy `objective` function (a Tune `Trainable`) that reports a random loss to Tune.\n", @@ -90,7 +91,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e8b4fc4d", + "id": "19c570d4", "metadata": {}, "outputs": [], "source": [ @@ -102,7 +103,7 @@ }, { "cell_type": "markdown", - "id": "831eed42", + "id": "845c1f78", "metadata": {}, "source": [ "Given that you provide an `api_key_file` pointing to your Weights & Biases API key, you cna define a\n", @@ -112,7 +113,7 @@ { "cell_type": "code", "execution_count": null, - "id": "52988599", + "id": "47f08927", "metadata": {}, "outputs": [], "source": [ @@ -141,7 +142,7 @@ }, { "cell_type": "markdown", - "id": "e24c05fa", + "id": "f1956489", "metadata": {}, "source": [ "To use the `wandb_mixin` decorator, you can simply decorate the objective function from earlier.\n", @@ -152,7 +153,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5e30d5e7", + "id": "828aad02", "metadata": {}, "outputs": [], "source": [ @@ -166,7 +167,7 @@ }, { "cell_type": "markdown", - "id": "04040bcb", + "id": "ed339068", "metadata": {}, "source": [ "With the `decorated_objective` defined, running a Tune experiment is as simple as providing this objective and\n", @@ -176,7 +177,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d4fbd368", + "id": "ee3e03f9", "metadata": {}, "outputs": [], "source": [ @@ -201,7 +202,7 @@ }, { "cell_type": "markdown", - "id": "f9521481", + "id": "d86cc3aa", "metadata": {}, "source": [ "Finally, you can also define a class-based Tune `Trainable` by using the `WandbTrainableMixin` to define your objective:" @@ -210,7 +211,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d27a7a35", + "id": "a33fad87", "metadata": {}, "outputs": [], "source": [ @@ -224,7 +225,7 @@ }, { "cell_type": "markdown", - "id": "fa189bb2", + "id": "aa889a96", "metadata": {}, "source": [ "Running Tune with this `WandbTrainable` works exactly the same as with the function API.\n", @@ -235,7 +236,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6e546cc2", + "id": "5d568289", "metadata": {}, "outputs": [], "source": [ @@ -260,7 +261,7 @@ }, { "cell_type": "markdown", - "id": "0b736172", + "id": "03a7d821", "metadata": {}, "source": [ "Since you may not have an API key for Wandb, we can _mock_ the Wandb logger and test all three of our training\n", @@ -271,7 +272,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e0e7f481", + "id": "1ab08008", "metadata": {}, "outputs": [], "source": [ @@ -302,7 +303,7 @@ }, { "cell_type": "markdown", - "id": "2f6e9138", + "id": "9960a29b", "metadata": {}, "source": [ "This completes our Tune and Wandb walk-through.\n", diff --git a/colabs/rdkit/wb_rdkit.ipynb b/colabs/rdkit/wb_rdkit.ipynb index d23c2b03..09c6c0b6 100644 --- a/colabs/rdkit/wb_rdkit.ipynb +++ b/colabs/rdkit/wb_rdkit.ipynb @@ -41,8 +41,16 @@ "outputs": [], "source": [ "\n", - "import wandb\n", - "wandb.login()" + "import wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/scikit/Simple_Scikit_Integration.ipynb b/colabs/scikit/Simple_Scikit_Integration.ipynb index c5a35d54..62fc5cde 100644 --- a/colabs/scikit/Simple_Scikit_Integration.ipynb +++ b/colabs/scikit/Simple_Scikit_Integration.ipynb @@ -91,9 +91,16 @@ "metadata": {}, "outputs": [], "source": [ - "import wandb\n", - "\n", - "wandb.login()" + "import wandb\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/scikit/w-b-k-means-clustering.ipynb b/colabs/scikit/w-b-k-means-clustering.ipynb index 8dc86436..fb4f6474 100644 --- a/colabs/scikit/w-b-k-means-clustering.ipynb +++ b/colabs/scikit/w-b-k-means-clustering.ipynb @@ -114,7 +114,7 @@ "metadata": {}, "outputs": [], "source": [ - "wandb.login()" + "!wandb login" ] }, { diff --git a/colabs/simpletransformers/SimpleTransformersQA.ipynb b/colabs/simpletransformers/SimpleTransformersQA.ipynb index f5e22c82..b6926525 100644 --- a/colabs/simpletransformers/SimpleTransformersQA.ipynb +++ b/colabs/simpletransformers/SimpleTransformersQA.ipynb @@ -115,10 +115,18 @@ "source": [ "import wandb\n", "\n", - "wandb.login()\n", "wandb_project = \"SimpleTransformers-QA\"" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" + ] + }, { "cell_type": "markdown", "metadata": {}, diff --git a/colabs/spacy/SpaCy_v3_and_W&B.ipynb b/colabs/spacy/SpaCy_v3_and_W&B.ipynb index 9ffe78a2..02f1d522 100644 --- a/colabs/spacy/SpaCy_v3_and_W&B.ipynb +++ b/colabs/spacy/SpaCy_v3_and_W&B.ipynb @@ -103,8 +103,16 @@ "metadata": {}, "outputs": [], "source": [ - "import wandb\n", - "wandb.login()" + "import wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/spacy/spaCy_v3_and_W&B_Sweeps.ipynb b/colabs/spacy/spaCy_v3_and_W&B_Sweeps.ipynb index d4b3e561..8ae69d20 100644 --- a/colabs/spacy/spaCy_v3_and_W&B_Sweeps.ipynb +++ b/colabs/spacy/spaCy_v3_and_W&B_Sweeps.ipynb @@ -84,9 +84,16 @@ "metadata": {}, "outputs": [], "source": [ - "import wandb\n", - "\n", - "wandb.login()" + "import wandb\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/stable_baselines3/Stable_Baselines3_wandb_experiment_tracking.ipynb b/colabs/stable_baselines3/Stable_Baselines3_wandb_experiment_tracking.ipynb index 8d0103e9..943a8471 100644 --- a/colabs/stable_baselines3/Stable_Baselines3_wandb_experiment_tracking.ipynb +++ b/colabs/stable_baselines3/Stable_Baselines3_wandb_experiment_tracking.ipynb @@ -123,11 +123,6 @@ "\n", "![](https://user-images.githubusercontent.com/5555347/122989248-97b5bd00-d370-11eb-95d6-52d56cfbce19.gif)" ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [] } ], "metadata": { diff --git a/colabs/stylegan_nada/StyleGAN-NADA.ipynb b/colabs/stylegan_nada/StyleGAN-NADA.ipynb index 3dcb606b..6bfe879d 100644 --- a/colabs/stylegan_nada/StyleGAN-NADA.ipynb +++ b/colabs/stylegan_nada/StyleGAN-NADA.ipynb @@ -81,8 +81,16 @@ "metadata": {}, "outputs": [], "source": [ - "import wandb\n", - "wandb.login()" + "import wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/super-gradients/yolo_nas.ipynb b/colabs/super-gradients/yolo_nas.ipynb index a79ad316..c360af91 100644 --- a/colabs/super-gradients/yolo_nas.ipynb +++ b/colabs/super-gradients/yolo_nas.ipynb @@ -362,8 +362,7 @@ "metadata": { "language_info": { "name": "python" - }, - "orig_nbformat": 4 + } }, "nbformat": 4, "nbformat_minor": 2 diff --git a/colabs/tables/AlphaFold_with_W&B_Align,_Fold,_Log.ipynb b/colabs/tables/AlphaFold_with_W&B_Align,_Fold,_Log.ipynb index fce4e1f3..ad454a6a 100644 --- a/colabs/tables/AlphaFold_with_W&B_Align,_Fold,_Log.ipynb +++ b/colabs/tables/AlphaFold_with_W&B_Align,_Fold,_Log.ipynb @@ -89,9 +89,16 @@ "outputs": [], "source": [ "# now run this cell (again by pressing the play button)\n", - "# to log in to Weights & Biases (by following any prompts)\n", - "\n", - "wandb.login()" + "# to log in to Weights & Biases (by following any prompts)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/tables/Log_Tables_Incrementally.ipynb b/colabs/tables/Log_Tables_Incrementally.ipynb index fdb44e7b..3be00f69 100644 --- a/colabs/tables/Log_Tables_Incrementally.ipynb +++ b/colabs/tables/Log_Tables_Incrementally.ipynb @@ -38,9 +38,16 @@ "metadata": {}, "outputs": [], "source": [ - "import wandb\n", - "\n", - "wandb.login()" + "import wandb\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/tables/W&B_Tables_Quickstart.ipynb b/colabs/tables/W&B_Tables_Quickstart.ipynb index 8814ca0a..95a578ab 100644 --- a/colabs/tables/W&B_Tables_Quickstart.ipynb +++ b/colabs/tables/W&B_Tables_Quickstart.ipynb @@ -46,8 +46,16 @@ "metadata": {}, "outputs": [], "source": [ - "import wandb\n", - "wandb.login()" + "import wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/tensorboard/Accelerator_W&B_Tensorboard.ipynb b/colabs/tensorboard/Accelerator_W&B_Tensorboard.ipynb index 5f8806be..aa3e3869 100644 --- a/colabs/tensorboard/Accelerator_W&B_Tensorboard.ipynb +++ b/colabs/tensorboard/Accelerator_W&B_Tensorboard.ipynb @@ -88,8 +88,16 @@ "source": [ "import wandb\n", "\n", - "# log to Weights and biases\n", - "wandb.login()" + "# log to Weights and biases" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/tensorboard/TensorBoard_and_Weights_and_Biases.ipynb b/colabs/tensorboard/TensorBoard_and_Weights_and_Biases.ipynb index 0bb947f4..c3f953b5 100644 --- a/colabs/tensorboard/TensorBoard_and_Weights_and_Biases.ipynb +++ b/colabs/tensorboard/TensorBoard_and_Weights_and_Biases.ipynb @@ -152,13 +152,6 @@ "\n", "You can find additional documentation of how to use [Weights & Biases with Tensorboard here](https://docs.wandb.ai/guides/integrations/tensorboard)" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/colabs/tensorflow/Hyperparameter_Optimization_in_TensorFlow_using_W&B_Sweeps.ipynb b/colabs/tensorflow/Hyperparameter_Optimization_in_TensorFlow_using_W&B_Sweeps.ipynb index bec47201..eb7e4e8d 100644 --- a/colabs/tensorflow/Hyperparameter_Optimization_in_TensorFlow_using_W&B_Sweeps.ipynb +++ b/colabs/tensorflow/Hyperparameter_Optimization_in_TensorFlow_using_W&B_Sweeps.ipynb @@ -141,9 +141,16 @@ "outputs": [], "source": [ "import wandb\n", - "from wandb.keras import WandbCallback\n", - "\n", - "wandb.login()" + "from wandb.keras import WandbCallback\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/tensorflow/Simple_TensorFlow_Integration.ipynb b/colabs/tensorflow/Simple_TensorFlow_Integration.ipynb index 70313d0d..5265cdeb 100644 --- a/colabs/tensorflow/Simple_TensorFlow_Integration.ipynb +++ b/colabs/tensorflow/Simple_TensorFlow_Integration.ipynb @@ -136,9 +136,16 @@ "outputs": [], "source": [ "import wandb\n", - "from wandb.keras import WandbCallback\n", - "\n", - "wandb.login()" + "from wandb.keras import WandbCallback\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/tensorflow/convert_imagenette_tfrecord.ipynb b/colabs/tensorflow/convert_imagenette_tfrecord.ipynb index cb82bc7d..36503727 100644 --- a/colabs/tensorflow/convert_imagenette_tfrecord.ipynb +++ b/colabs/tensorflow/convert_imagenette_tfrecord.ipynb @@ -264,13 +264,6 @@ "source": [ "wandb.finish()" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/colabs/ultralytics/00_inference.ipynb b/colabs/ultralytics/00_inference.ipynb index 7814eed7..630daac8 100644 --- a/colabs/ultralytics/00_inference.ipynb +++ b/colabs/ultralytics/00_inference.ipynb @@ -153,8 +153,7 @@ "metadata": { "language_info": { "name": "python" - }, - "orig_nbformat": 4 + } }, "nbformat": 4, "nbformat_minor": 2 diff --git a/colabs/ultralytics/01_train_val.ipynb b/colabs/ultralytics/01_train_val.ipynb index 22021e16..ab5a1f26 100644 --- a/colabs/ultralytics/01_train_val.ipynb +++ b/colabs/ultralytics/01_train_val.ipynb @@ -142,8 +142,7 @@ "metadata": { "language_info": { "name": "python" - }, - "orig_nbformat": 4 + } }, "nbformat": 4, "nbformat_minor": 2 diff --git a/colabs/wandb-artifacts/Artifacts_Quickstart_with_W&B.ipynb b/colabs/wandb-artifacts/Artifacts_Quickstart_with_W&B.ipynb index a18817ac..fb1a3ba8 100644 --- a/colabs/wandb-artifacts/Artifacts_Quickstart_with_W&B.ipynb +++ b/colabs/wandb-artifacts/Artifacts_Quickstart_with_W&B.ipynb @@ -126,8 +126,16 @@ "outputs": [], "source": [ "!pip install wandb -qq\n", - "import wandb\n", - "wandb.login()" + "import wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/wandb-artifacts/Basic_Artifacts_with_W&B.ipynb b/colabs/wandb-artifacts/Basic_Artifacts_with_W&B.ipynb index a5a6acf5..43716302 100644 --- a/colabs/wandb-artifacts/Basic_Artifacts_with_W&B.ipynb +++ b/colabs/wandb-artifacts/Basic_Artifacts_with_W&B.ipynb @@ -82,8 +82,16 @@ "outputs": [], "source": [ "!pip install wandb -qq\n", - "import wandb\n", - "wandb.login()" + "import wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/wandb-artifacts/W&B_artifacts_for_auditing_purposes.ipynb b/colabs/wandb-artifacts/W&B_artifacts_for_auditing_purposes.ipynb index 5818a0a7..34a6c6d7 100644 --- a/colabs/wandb-artifacts/W&B_artifacts_for_auditing_purposes.ipynb +++ b/colabs/wandb-artifacts/W&B_artifacts_for_auditing_purposes.ipynb @@ -84,9 +84,16 @@ "metadata": {}, "outputs": [], "source": [ - "import wandb\n", - "\n", - "wandb.login()" + "import wandb\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/wandb-artifacts/train_val_test_split_with_tabular_data.ipynb b/colabs/wandb-artifacts/train_val_test_split_with_tabular_data.ipynb index e7375e69..e0f553f1 100644 --- a/colabs/wandb-artifacts/train_val_test_split_with_tabular_data.ipynb +++ b/colabs/wandb-artifacts/train_val_test_split_with_tabular_data.ipynb @@ -60,7 +60,6 @@ "source": [ "!pip install --upgrade wandb -qqq\n", "import wandb\n", - "wandb.login()\n", "\n", "import random\n", "from collections import OrderedDict\n", @@ -90,6 +89,15 @@ "set_seeds(0)" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" + ] + }, { "cell_type": "code", "execution_count": null, diff --git a/colabs/wandb-log/Configs_in_W&B.ipynb b/colabs/wandb-log/Configs_in_W&B.ipynb index 819736e4..4b289da0 100644 --- a/colabs/wandb-log/Configs_in_W&B.ipynb +++ b/colabs/wandb-log/Configs_in_W&B.ipynb @@ -47,8 +47,16 @@ "metadata": {}, "outputs": [], "source": [ - "import wandb\n", - "wandb.login()" + "import wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/wandb-log/Customize_metric_logging_with_define_metric.ipynb b/colabs/wandb-log/Customize_metric_logging_with_define_metric.ipynb index d9d4a932..c9d7efe1 100644 --- a/colabs/wandb-log/Customize_metric_logging_with_define_metric.ipynb +++ b/colabs/wandb-log/Customize_metric_logging_with_define_metric.ipynb @@ -41,9 +41,16 @@ "source": [ "\n", "import wandb\n", - "import random\n", - "\n", - "wandb.login()" + "import random\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/wandb-log/Generate_gifs_from_logged_images_on_wandb.ipynb b/colabs/wandb-log/Generate_gifs_from_logged_images_on_wandb.ipynb index 7c1c2f5e..203d70dd 100644 --- a/colabs/wandb-log/Generate_gifs_from_logged_images_on_wandb.ipynb +++ b/colabs/wandb-log/Generate_gifs_from_logged_images_on_wandb.ipynb @@ -46,8 +46,16 @@ "metadata": {}, "outputs": [], "source": [ - "import wandb\n", - "wandb.login()" + "import wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { @@ -224,13 +232,6 @@ "run = api.run(RUN_PATH)\n", "make_and_display_gifs(run)" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/colabs/wandb-log/Image_Logging_de_duplication.ipynb b/colabs/wandb-log/Image_Logging_de_duplication.ipynb index 25af66a9..77d3e89d 100644 --- a/colabs/wandb-log/Image_Logging_de_duplication.ipynb +++ b/colabs/wandb-log/Image_Logging_de_duplication.ipynb @@ -80,13 +80,6 @@ "wandb.log({\"image\": img_1})\n", "wandb.finish()" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/colabs/wandb-log/Log_(Almost)_Anything_with_W&B_Media.ipynb b/colabs/wandb-log/Log_(Almost)_Anything_with_W&B_Media.ipynb index b0df9a05..f77347a4 100644 --- a/colabs/wandb-log/Log_(Almost)_Anything_with_W&B_Media.ipynb +++ b/colabs/wandb-log/Log_(Almost)_Anything_with_W&B_Media.ipynb @@ -80,7 +80,7 @@ "metadata": {}, "outputs": [], "source": [ - "wandb.login()" + "!wandb login" ] }, { diff --git a/colabs/wandb-log/Log_a_Confusion_Matrix_with_W&B.ipynb b/colabs/wandb-log/Log_a_Confusion_Matrix_with_W&B.ipynb index 4bfc7979..49742e2e 100644 --- a/colabs/wandb-log/Log_a_Confusion_Matrix_with_W&B.ipynb +++ b/colabs/wandb-log/Log_a_Confusion_Matrix_with_W&B.ipynb @@ -86,8 +86,16 @@ "metadata": {}, "outputs": [], "source": [ - "import wandb\n", - "wandb.login()" + "import wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/wandb-log/Logging_Strategies_for_High_Frequency_Data.ipynb b/colabs/wandb-log/Logging_Strategies_for_High_Frequency_Data.ipynb index eb45d0cc..6c87bec8 100644 --- a/colabs/wandb-log/Logging_Strategies_for_High_Frequency_Data.ipynb +++ b/colabs/wandb-log/Logging_Strategies_for_High_Frequency_Data.ipynb @@ -86,9 +86,16 @@ "metadata": {}, "outputs": [], "source": [ - "# login\n", - "import wandb\n", - "wandb.login()" + "import wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/wandb-log/Plot_Precision_Recall_Curves_with_W&B.ipynb b/colabs/wandb-log/Plot_Precision_Recall_Curves_with_W&B.ipynb index 5ecd9eb5..c40dc8bc 100644 --- a/colabs/wandb-log/Plot_Precision_Recall_Curves_with_W&B.ipynb +++ b/colabs/wandb-log/Plot_Precision_Recall_Curves_with_W&B.ipynb @@ -83,8 +83,16 @@ "metadata": {}, "outputs": [], "source": [ - "import wandb\n", - "wandb.login()" + "import wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/wandb-log/Plot_ROC_Curves_with_W&B.ipynb b/colabs/wandb-log/Plot_ROC_Curves_with_W&B.ipynb index 87bd5971..5f7771b0 100644 --- a/colabs/wandb-log/Plot_ROC_Curves_with_W&B.ipynb +++ b/colabs/wandb-log/Plot_ROC_Curves_with_W&B.ipynb @@ -83,9 +83,16 @@ "metadata": {}, "outputs": [], "source": [ - "# login\n", - "import wandb\n", - "wandb.login()" + "import wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/wandb-log/Run_names_visualized_using_min_dalle.ipynb b/colabs/wandb-log/Run_names_visualized_using_min_dalle.ipynb index 18c9c64e..cde79b63 100644 --- a/colabs/wandb-log/Run_names_visualized_using_min_dalle.ipynb +++ b/colabs/wandb-log/Run_names_visualized_using_min_dalle.ipynb @@ -39,10 +39,18 @@ "! git lfs install\n", "! git clone https://huggingface.co/dalle-mini/vqgan_imagenet_f16_16384 /content/min-dalle/pretrained/vqgan\n", "!pip install torch flax==0.4.2 wandb\n", - "! wandb login\n", "! wandb artifact get --root=/content/min-dalle/pretrained/dalle_bart_mini dalle-mini/dalle-mini/mini-1:v0" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" + ] + }, { "cell_type": "markdown", "metadata": {}, diff --git a/colabs/wandb-log/Set_Alerts_with_W_&_B.ipynb b/colabs/wandb-log/Set_Alerts_with_W_&_B.ipynb index 19fedfe8..bbfff44d 100644 --- a/colabs/wandb-log/Set_Alerts_with_W_&_B.ipynb +++ b/colabs/wandb-log/Set_Alerts_with_W_&_B.ipynb @@ -53,8 +53,16 @@ "outputs": [], "source": [ "# Log in to your W&B account\n", - "import wandb\n", - "wandb.login()" + "import wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/colabs/wandb-model-registry/Model_Registry_E2E.ipynb b/colabs/wandb-model-registry/Model_Registry_E2E.ipynb index 5ecae223..207aa7a7 100644 --- a/colabs/wandb-model-registry/Model_Registry_E2E.ipynb +++ b/colabs/wandb-model-registry/Model_Registry_E2E.ipynb @@ -75,8 +75,16 @@ "source": [ "import wandb\n", "\n", - "# Login to W&B\n", - "wandb.login()" + "# Login to W&B" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { @@ -594,13 +602,6 @@ "artifact_dir = artifact.download()\n", "wandb.finish()" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/colabs/yolox/Train_and_Debug_YOLOX_Models_with_Weights_&_Biases.ipynb b/colabs/yolox/Train_and_Debug_YOLOX_Models_with_Weights_&_Biases.ipynb index 8442213f..c87972fb 100644 --- a/colabs/yolox/Train_and_Debug_YOLOX_Models_with_Weights_&_Biases.ipynb +++ b/colabs/yolox/Train_and_Debug_YOLOX_Models_with_Weights_&_Biases.ipynb @@ -89,8 +89,16 @@ "metadata": {}, "outputs": [], "source": [ - "import wandb\n", - "wandb.login()" + "import wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/examples/jax/jax-llm/create_tokenizer.ipynb b/examples/jax/jax-llm/create_tokenizer.ipynb index d3c1fd83..0d5aeabc 100644 --- a/examples/jax/jax-llm/create_tokenizer.ipynb +++ b/examples/jax/jax-llm/create_tokenizer.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "eedce872-2204-49f8-8a52-ef338f7a6654", + "id": "bbe946e5", "metadata": {}, "outputs": [], "source": [ @@ -22,7 +22,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2f30468d-36f7-448c-8a09-6e2637dc5c3c", + "id": "98df9b13", "metadata": {}, "outputs": [], "source": [ @@ -32,7 +32,7 @@ { "cell_type": "code", "execution_count": null, - "id": "db2f7eeb-9178-4fa9-9156-1a86072add3e", + "id": "31a4368f", "metadata": {}, "outputs": [], "source": [ @@ -43,7 +43,7 @@ { "cell_type": "code", "execution_count": null, - "id": "80520842-7460-4f0b-bf6c-c586506f8bc8", + "id": "8e68a2b7", "metadata": {}, "outputs": [], "source": [ @@ -53,7 +53,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7216dd5d-25bf-4b6d-b677-157eb7e85756", + "id": "0bb7b8b2", "metadata": {}, "outputs": [], "source": [ @@ -71,7 +71,7 @@ { "cell_type": "code", "execution_count": null, - "id": "49ff186a-ecfc-4492-b457-053734ed5970", + "id": "ba4d2d50", "metadata": {}, "outputs": [], "source": [ @@ -81,7 +81,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2a2fffc0-8bd5-4558-b0db-655271ced251", + "id": "2dd6d371", "metadata": {}, "outputs": [], "source": [ @@ -103,7 +103,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6784b4d7-22dc-4726-bedf-a655a7ae2982", + "id": "b0884429", "metadata": {}, "outputs": [], "source": [ @@ -116,7 +116,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7d9804dd-3fb1-42f5-80cc-9d392c9a229a", + "id": "4dcade17", "metadata": {}, "outputs": [], "source": [ @@ -126,7 +126,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e2298511-f2ba-4863-95dd-f592c78499ed", + "id": "bfc09cc1", "metadata": {}, "outputs": [], "source": [ @@ -136,7 +136,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b5ed4e7f-3218-4f72-b407-4369dd9df3d2", + "id": "6f7a1c3a", "metadata": {}, "outputs": [], "source": [ @@ -146,7 +146,7 @@ { "cell_type": "code", "execution_count": null, - "id": "aa0fb355-304b-48be-8585-ba246a553ad4", + "id": "450f71bd", "metadata": {}, "outputs": [], "source": [ diff --git a/examples/keras/keras-tensorflow2/TensorFlow_2_0_+_Keras_Crash_Course_+_W&B.ipynb b/examples/keras/keras-tensorflow2/TensorFlow_2_0_+_Keras_Crash_Course_+_W&B.ipynb index 37560e3a..2a1cc50f 100644 --- a/examples/keras/keras-tensorflow2/TensorFlow_2_0_+_Keras_Crash_Course_+_W&B.ipynb +++ b/examples/keras/keras-tensorflow2/TensorFlow_2_0_+_Keras_Crash_Course_+_W&B.ipynb @@ -1679,11 +1679,6 @@ "model.compile(optimizer=optimizer, loss=loss, metrics=[accuracy])" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [] - }, { "cell_type": "markdown", "metadata": {}, diff --git a/examples/pytorch/pytorch-cifar10-sagemaker/train.ipynb b/examples/pytorch/pytorch-cifar10-sagemaker/train.ipynb index b9ec4e33..8ae13f31 100644 --- a/examples/pytorch/pytorch-cifar10-sagemaker/train.ipynb +++ b/examples/pytorch/pytorch-cifar10-sagemaker/train.ipynb @@ -32,8 +32,7 @@ "metadata": {}, "outputs": [], "source": [ - "# login\n", - "wandb.login()" + "!wandb login" ] }, { diff --git a/examples/pytorch/pytorch-intro/intro.ipynb b/examples/pytorch/pytorch-intro/intro.ipynb index 29e80825..a3ad850d 100755 --- a/examples/pytorch/pytorch-intro/intro.ipynb +++ b/examples/pytorch/pytorch-intro/intro.ipynb @@ -112,9 +112,16 @@ "metadata": {}, "outputs": [], "source": [ - "import wandb\n", - "\n", - "wandb.login()" + "import wandb\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, { diff --git a/examples/pytorch/pytorch-mnist-sagemaker/pytorch_mnist.ipynb b/examples/pytorch/pytorch-mnist-sagemaker/pytorch_mnist.ipynb index b6dca598..05fcd7ee 100644 --- a/examples/pytorch/pytorch-mnist-sagemaker/pytorch_mnist.ipynb +++ b/examples/pytorch/pytorch-mnist-sagemaker/pytorch_mnist.ipynb @@ -491,11 +491,19 @@ ], "source": [ "import wandb\n", - "wandb.login()\n", "settings = wandb.setup().settings\n", "current_api_key = wandb.wandb_lib.apikey.api_key(settings=settings)" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" + ] + }, { "cell_type": "code", "execution_count": null, diff --git a/examples/sagemaker/text_classification/text_classification.ipynb b/examples/sagemaker/text_classification/text_classification.ipynb index b9bf3487..8bc7c8fd 100644 --- a/examples/sagemaker/text_classification/text_classification.ipynb +++ b/examples/sagemaker/text_classification/text_classification.ipynb @@ -98,8 +98,16 @@ "metadata": {}, "outputs": [], "source": [ - "import wandb\n", - "wandb.login()" + "import wandb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wandb login" ] }, {