From cb9a32d3dfb2fd46255c166c8f50eab1329607cb Mon Sep 17 00:00:00 2001 From: Thomas Capelle Date: Fri, 22 Sep 2023 12:50:16 +0200 Subject: [PATCH] revert login and clean up --- .github/nb_scripts/fix_login.ipynb | 140 +- colabs/audiocraft/AudioCraft.ipynb | 51 +- ...edit_Scorecards_with_XGBoost_and_W&B.ipynb | 2 +- .../Simple_LightGBM_Integration.ipynb | 2 +- .../Using_W&B_Sweeps_with_XGBoost.ipynb | 2 +- .../Image_Classification_with_Tables.ipynb | 2 +- .../Logging_Timbre_Transfer_with_W&B.ipynb | 2 +- .../W&B_Dataset_Visualization.ipynb | 2 +- .../W&B_Tables_Quickstart.ipynb | 2 +- colabs/deepchem/W&B_x_DeepChem.ipynb | 2 +- ...W&B_Dataset_and_Predictions_Viz_Demo.ipynb | 2 +- .../Semantic_Segmentation_Demo_with_W&B.ipynb | 2 +- .../fastai/Weights_&_Biases_with_fastai.ipynb | 2 +- colabs/huggingface/Huggingface_wandb.ipynb | 2 +- ...ng_Face_models_with_Weights_&_Biases.ipynb | 2 +- .../Simple_accelerate_integration_wandb.ipynb | 32 +- ...ging_Face_data_with_Weights_&_Biases.ipynb | 2 +- colabs/huggingface/wandb_hf_example.ipynb | 2 +- ...o_to_Weights_&_Biases_CV,_NLP_and_RL.ipynb | 2 +- colabs/intro/Intro_to_Weights_&_Biases.ipynb | 2 +- .../Intro_to_Weights_&_Biases_keras.ipynb | 2 +- ...nteractive_W&B_Charts_Inside_Jupyter.ipynb | 2 +- ...une_Vision_Transformer_using_KerasCV.ipynb | 2 +- .../keras/Image_Segmentation_with_Keras.ipynb | 2 +- .../keras/Keras_param_opti_using_sweeps.ipynb | 2 +- ...ras_pipeline_with_Weights_and_Biases.ipynb | 2 +- colabs/keras/Simple_Keras_Integration.ipynb | 2 +- ...bEvalCallback_in_your_Keras_workflow.ipynb | 2 +- ...bMetricLogger_in_your_Keras_workflow.ipynb | 2 +- ...delCheckpoint_in_your_Keras_workflow.ipynb | 2 +- colabs/keras/cosine_decay_using_keras.ipynb | 2 +- .../keras_nsynth_instrument_prediction.ipynb | 2 +- ...ct_Detector_with_MMDetection_and_W&B.ipynb | 2 +- ...ation_Model_with_MMDetection_and_W&B.ipynb | 2 +- .../MosaicML_Composer_and_wandb.ipynb | 72 +- ..._tune_OpenAI_with_Weights_and_Biases.ipynb | 14 +- .../OpenAI_API_Autologger_Quickstart.ipynb | 30 +- ...stop_for_everything_object_detection.ipynb | 2 +- ...ur_OCR_Models_with_PaddleOCR_and_W&B.ipynb | 2 +- colabs/paella/Image-Variations.ipynb | 22 +- colabs/paella/Inpainting.ipynb | 22 +- .../paella/Latent-Space-Interpolation.ipynb | 20 +- colabs/paella/Multi-Conditioning.ipynb | 20 +- ...rientation-Guided-Multi-Conditioning.ipynb | 20 +- colabs/paella/Outpainting.ipynb | 22 +- colabs/paella/Structural-Morphing.ipynb | 22 +- colabs/paella/Text-Conditional.ipynb | 20 +- .../W&B_Prompts_with_Custom_Columns.ipynb | 1141 ++++++++--------- colabs/prompts/WandB_LLM_QA_bot.ipynb | 2 +- ...Prediction_Using_W&B_Pycaret_FastAPI.ipynb | 2 +- .../8_Node_Classification_(with_W&B).ipynb | 2 +- .../pyg/point-cloud-segmentation/00_eda.ipynb | 34 +- .../01_dgcnn_train.ipynb | 54 +- .../02_dgcnn_evaluate.ipynb | 50 +- .../pyg/pointnet-classification/00_eda.ipynb | 34 +- .../01_compare_sampling.ipynb | 24 +- .../02_pointnet_plus_plus.ipynb | 44 +- .../pointnet-classification/03_sweep.ipynb | 32 +- ...a_Transformer_with_Pytorch_Lightning.ipynb | 2 +- ...assification_using_PyTorch_Lightning.ipynb | 2 +- ...ghtning_models_with_Weights_&_Biases.ipynb | 2 +- .../Profile_PyTorch_Code.ipynb | 2 +- ...rch_Lightning_and_Weights_and_Biases.ipynb | 2 +- ...fer_Learning_Using_PyTorch_Lightning.ipynb | 2 +- ...db_End_to_End_with_PyTorch_Lightning.ipynb | 2 +- ...ing_dropout_affect_model_performance.ipynb | 2 +- ...parameter_Sweeps_in_PyTorch_with_W&B.ipynb | 2 +- .../pytorch/Simple_PyTorch_Integration.ipynb | 2 +- colabs/raytune/RayTune_with_wandb.ipynb | 2 +- colabs/raytune/tune-wandb.ipynb | 42 +- colabs/rdkit/wb_rdkit.ipynb | 2 +- colabs/scikit/Simple_Scikit_Integration.ipynb | 2 +- colabs/scikit/w-b-k-means-clustering.ipynb | 2 +- .../SimpleTransformersQA.ipynb | 2 +- colabs/spacy/SpaCy_v3_and_W&B.ipynb | 2 +- colabs/spacy/spaCy_v3_and_W&B_Sweeps.ipynb | 2 +- colabs/stylegan_nada/StyleGAN-NADA.ipynb | 2 +- colabs/super-gradients/yolo_nas.ipynb | 6 +- .../AlphaFold_with_W&B_Align,_Fold,_Log.ipynb | 2 +- colabs/tables/Log_Tables_Incrementally.ipynb | 2 +- colabs/tables/W&B_Tables_Quickstart.ipynb | 2 +- .../Accelerator_W&B_Tensorboard.ipynb | 2 +- ...ation_in_TensorFlow_using_W&B_Sweeps.ipynb | 2 +- .../Simple_TensorFlow_Integration.ipynb | 2 +- colabs/ultralytics/00_inference.ipynb | 6 +- colabs/ultralytics/01_train_val.ipynb | 6 +- .../Artifacts_Quickstart_with_W&B.ipynb | 2 +- .../Basic_Artifacts_with_W&B.ipynb | 2 +- .../W&B_artifacts_for_auditing_purposes.ipynb | 2 +- ...ain_val_test_split_with_tabular_data.ipynb | 2 +- colabs/wandb-log/Configs_in_W&B.ipynb | 2 +- ...ze_metric_logging_with_define_metric.ipynb | 2 +- ...ate_gifs_from_logged_images_on_wandb.ipynb | 2 +- ...Log_(Almost)_Anything_with_W&B_Media.ipynb | 2 +- .../Log_a_Confusion_Matrix_with_W&B.ipynb | 2 +- ...g_Strategies_for_High_Frequency_Data.ipynb | 2 +- ...lot_Precision_Recall_Curves_with_W&B.ipynb | 2 +- .../wandb-log/Plot_ROC_Curves_with_W&B.ipynb | 2 +- ...Run_names_visualized_using_min_dalle.ipynb | 2 +- colabs/wandb-log/Set_Alerts_with_W_&_B.ipynb | 2 +- .../Model_Registry_E2E.ipynb | 2 +- ...g_YOLOX_Models_with_Weights_&_Biases.ipynb | 2 +- examples/jax/jax-llm/create_tokenizer.ipynb | 41 +- .../pytorch-cifar10-sagemaker/train.ipynb | 2 +- examples/pytorch/pytorch-intro/intro.ipynb | 2 +- .../pytorch_mnist.ipynb | 2 +- .../text_classification.ipynb | 2 +- 107 files changed, 976 insertions(+), 1205 deletions(-) diff --git a/.github/nb_scripts/fix_login.ipynb b/.github/nb_scripts/fix_login.ipynb index 613614a3..2559088f 100644 --- a/.github/nb_scripts/fix_login.ipynb +++ b/.github/nb_scripts/fix_login.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 51, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -13,16 +13,28 @@ }, { "cell_type": "code", - "execution_count": 52, + "execution_count": null, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "152" + ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "nbs_paths = find_nbs(Path.cwd())" + "nbs_paths = find_nbs(\"../../\")\n", + "len(nbs_paths)" ] }, { "cell_type": "code", - "execution_count": 53, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -31,36 +43,28 @@ "Path('/Users/tcapelle/work/examples/colabs/boosting/Credit_Scorecards_with_XGBoost_and_W&B.ipynb')" ] }, - "execution_count": 53, + "execution_count": null, "metadata": {}, "output_type": "execute_result" } ], "source": [ "nb_path = nbs_paths[3]\n", - "nb_path" + "nb_path\n" ] }, { "cell_type": "code", - "execution_count": 54, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "def idx_login_cell(nb, delete_line=True):\n", - " for i, cell in enumerate(nb[\"cells\"]):\n", - " if cell[\"cell_type\"] == \"code\":\n", - " if \"login\" in cell[\"source\"]:\n", - " if delete_line:\n", - " cell_content = cell[\"source\"].split(\"\\n\")\n", - " cell_content = [line for line in cell_content if \"login\" not in line]\n", - " cell[\"source\"] = \"\\n\".join(cell_content) \n", - " return i" + "wandb.login()" ] }, { "cell_type": "code", - "execution_count": 55, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -69,72 +73,34 @@ }, { "cell_type": "code", - "execution_count": 56, - "metadata": {}, - "outputs": [], - "source": [ - "cell = nb[\"cells\"][1]" - ] - }, - { - "cell_type": "code", - "execution_count": 57, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "nbformat.notebooknode.NotebookNode" - ] - }, - "execution_count": 57, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "type(cell)" - ] - }, - { - "cell_type": "code", - "execution_count": 58, + "execution_count": null, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "{'attachments': {},\n", - " 'cell_type': 'markdown',\n", - " 'metadata': {},\n", - " 'source': '\"Weights
\\n\\n\\n\\n\"Weights'}" + "18" ] }, - "execution_count": 58, + "execution_count": null, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "cell" + "login_idx = idx_login_cell(nb, delete_line=False)\n", + "login_idx" ] }, { "cell_type": "code", - "execution_count": 59, - "metadata": {}, - "outputs": [], - "source": [ - "login_idx = idx_login_cell(nb)" - ] - }, - { - "cell_type": "code", - "execution_count": 60, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "def insert_login_cell(nb, idx, code=\"!wandb login\"):\n", + "login_code = \"wandb.login()\" # or !wandb login\n", + "\n", + "def insert_login_cell(nb, idx, code=login_code, replace=True):\n", " n_cells = len(nb[\"cells\"])\n", " login_cell = nbformat.v4.new_code_cell(source=code)\n", " nb[\"cells\"].insert(idx+1, login_cell)\n", @@ -143,7 +109,7 @@ }, { "cell_type": "code", - "execution_count": 61, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -152,7 +118,7 @@ }, { "cell_type": "code", - "execution_count": 62, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -162,10 +128,10 @@ " 'execution_count': None,\n", " 'metadata': {},\n", " 'outputs': [],\n", - " 'source': \"import wandb\\n\\nWANDB_PROJECT ='vehicle_loan_default'\"}" + " 'source': '!wandb login'}" ] }, - "execution_count": 62, + "execution_count": null, "metadata": {}, "output_type": "execute_result" } @@ -176,7 +142,7 @@ }, { "cell_type": "code", - "execution_count": 63, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -190,7 +156,7 @@ " 'outputs': []}" ] }, - "execution_count": 63, + "execution_count": null, "metadata": {}, "output_type": "execute_result" } @@ -201,7 +167,7 @@ }, { "cell_type": "code", - "execution_count": 64, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -213,7 +179,7 @@ " 'source': '## Vehicle Loan Dataset\\n\\nWe will be using a simplified version of the [Vehicle Loan Default Prediction dataset](https://www.kaggle.com/sneharshinde/ltfs-av-data) from L&T which has been stored in W&B Artifacts. '}" ] }, - "execution_count": 64, + "execution_count": null, "metadata": {}, "output_type": "execute_result" } @@ -224,7 +190,7 @@ }, { "cell_type": "code", - "execution_count": 65, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -241,7 +207,7 @@ }, { "cell_type": "code", - "execution_count": 66, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -251,7 +217,7 @@ }, { "cell_type": "code", - "execution_count": 67, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -267,13 +233,6 @@ " nbformat.validate(nb)\n", " nbformat.write(nb, nb_path)" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { @@ -281,20 +240,7 @@ "display_name": "nbdev2", "language": "python", "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.6" - }, - "orig_nbformat": 4 + } }, "nbformat": 4, "nbformat_minor": 2 diff --git a/colabs/audiocraft/AudioCraft.ipynb b/colabs/audiocraft/AudioCraft.ipynb index 9bb30020..af385a28 100644 --- a/colabs/audiocraft/AudioCraft.ipynb +++ b/colabs/audiocraft/AudioCraft.ipynb @@ -3,9 +3,7 @@ { "attachments": {}, "cell_type": "markdown", - "metadata": { - "id": "W-26KlXuiXul" - }, + "metadata": {}, "source": [ "\"Weights\n", "\n", @@ -24,15 +22,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "cellView": "form", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 1000 - }, - "id": "EZU3hg4B1om6", - "outputId": "3311a45d-35c3-49e8-cbd5-4618386fa2a1" - }, + "metadata": {}, "outputs": [], "source": [ "# @title Install AudioCraft + WandB\n", @@ -43,10 +33,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "cellView": "form", - "id": "RerQaiZt14r8" - }, + "metadata": {}, "outputs": [], "source": [ "# @title\n", @@ -71,15 +58,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "cellView": "form", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 162 - }, - "id": "3MTX8GoE7AzN", - "outputId": "ebe733d0-3a21-47e5-d217-89a622cafc62" - }, + "metadata": {}, "outputs": [], "source": [ "# @title ## Audio Generation Configs\n", @@ -182,14 +161,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "cellView": "form", - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "SfM8rhVX7ES9", - "outputId": "a935173b-382a-4514-97fc-eec12e188379" - }, + "metadata": {}, "outputs": [], "source": [ "# @title Generate Audio using MusicGen\n", @@ -266,15 +238,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "cellView": "form", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 104 - }, - "id": "_n-1RthFVPYN", - "outputId": "ac7bbc70-8114-4ef7-ef64-ecae9ba898cf" - }, + "metadata": {}, "outputs": [], "source": [ "# @title Log Audio to Weights & Biases Dashboard\n", @@ -421,9 +385,6 @@ "kernelspec": { "display_name": "Python 3", "name": "python3" - }, - "language_info": { - "name": "python" } }, "nbformat": 4, diff --git a/colabs/boosting/Credit_Scorecards_with_XGBoost_and_W&B.ipynb b/colabs/boosting/Credit_Scorecards_with_XGBoost_and_W&B.ipynb index 14966715..57b392ef 100644 --- a/colabs/boosting/Credit_Scorecards_with_XGBoost_and_W&B.ipynb +++ b/colabs/boosting/Credit_Scorecards_with_XGBoost_and_W&B.ipynb @@ -221,7 +221,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/boosting/Simple_LightGBM_Integration.ipynb b/colabs/boosting/Simple_LightGBM_Integration.ipynb index e3a6e637..263e64df 100644 --- a/colabs/boosting/Simple_LightGBM_Integration.ipynb +++ b/colabs/boosting/Simple_LightGBM_Integration.ipynb @@ -120,7 +120,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/boosting/Using_W&B_Sweeps_with_XGBoost.ipynb b/colabs/boosting/Using_W&B_Sweeps_with_XGBoost.ipynb index 310ace6b..5e1a2a8d 100644 --- a/colabs/boosting/Using_W&B_Sweeps_with_XGBoost.ipynb +++ b/colabs/boosting/Using_W&B_Sweeps_with_XGBoost.ipynb @@ -106,7 +106,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/datasets-predictions/Image_Classification_with_Tables.ipynb b/colabs/datasets-predictions/Image_Classification_with_Tables.ipynb index 2b81b3f0..8def8a28 100644 --- a/colabs/datasets-predictions/Image_Classification_with_Tables.ipynb +++ b/colabs/datasets-predictions/Image_Classification_with_Tables.ipynb @@ -149,7 +149,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/datasets-predictions/Logging_Timbre_Transfer_with_W&B.ipynb b/colabs/datasets-predictions/Logging_Timbre_Transfer_with_W&B.ipynb index 23add602..85d83e12 100644 --- a/colabs/datasets-predictions/Logging_Timbre_Transfer_with_W&B.ipynb +++ b/colabs/datasets-predictions/Logging_Timbre_Transfer_with_W&B.ipynb @@ -202,7 +202,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/datasets-predictions/W&B_Dataset_Visualization.ipynb b/colabs/datasets-predictions/W&B_Dataset_Visualization.ipynb index 4f548d63..a9818a49 100644 --- a/colabs/datasets-predictions/W&B_Dataset_Visualization.ipynb +++ b/colabs/datasets-predictions/W&B_Dataset_Visualization.ipynb @@ -102,7 +102,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/datasets-predictions/W&B_Tables_Quickstart.ipynb b/colabs/datasets-predictions/W&B_Tables_Quickstart.ipynb index 8e847a4d..8b0cd943 100644 --- a/colabs/datasets-predictions/W&B_Tables_Quickstart.ipynb +++ b/colabs/datasets-predictions/W&B_Tables_Quickstart.ipynb @@ -92,7 +92,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/deepchem/W&B_x_DeepChem.ipynb b/colabs/deepchem/W&B_x_DeepChem.ipynb index a6412e2b..84303756 100644 --- a/colabs/deepchem/W&B_x_DeepChem.ipynb +++ b/colabs/deepchem/W&B_x_DeepChem.ipynb @@ -69,7 +69,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/dsviz/W&B_Dataset_and_Predictions_Viz_Demo.ipynb b/colabs/dsviz/W&B_Dataset_and_Predictions_Viz_Demo.ipynb index c17221a5..4c713086 100644 --- a/colabs/dsviz/W&B_Dataset_and_Predictions_Viz_Demo.ipynb +++ b/colabs/dsviz/W&B_Dataset_and_Predictions_Viz_Demo.ipynb @@ -121,7 +121,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/fastai/Semantic_Segmentation_Demo_with_W&B.ipynb b/colabs/fastai/Semantic_Segmentation_Demo_with_W&B.ipynb index 4dd645a0..a6d95eb1 100644 --- a/colabs/fastai/Semantic_Segmentation_Demo_with_W&B.ipynb +++ b/colabs/fastai/Semantic_Segmentation_Demo_with_W&B.ipynb @@ -91,7 +91,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/fastai/Weights_&_Biases_with_fastai.ipynb b/colabs/fastai/Weights_&_Biases_with_fastai.ipynb index 9dc02117..7327755b 100644 --- a/colabs/fastai/Weights_&_Biases_with_fastai.ipynb +++ b/colabs/fastai/Weights_&_Biases_with_fastai.ipynb @@ -89,7 +89,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/huggingface/Huggingface_wandb.ipynb b/colabs/huggingface/Huggingface_wandb.ipynb index 5f17ac0f..bdf21058 100644 --- a/colabs/huggingface/Huggingface_wandb.ipynb +++ b/colabs/huggingface/Huggingface_wandb.ipynb @@ -135,7 +135,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/huggingface/Optimize_Hugging_Face_models_with_Weights_&_Biases.ipynb b/colabs/huggingface/Optimize_Hugging_Face_models_with_Weights_&_Biases.ipynb index 55b2a1d1..4b186151 100644 --- a/colabs/huggingface/Optimize_Hugging_Face_models_with_Weights_&_Biases.ipynb +++ b/colabs/huggingface/Optimize_Hugging_Face_models_with_Weights_&_Biases.ipynb @@ -118,7 +118,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/huggingface/Simple_accelerate_integration_wandb.ipynb b/colabs/huggingface/Simple_accelerate_integration_wandb.ipynb index bf2b5093..5898e68c 100644 --- a/colabs/huggingface/Simple_accelerate_integration_wandb.ipynb +++ b/colabs/huggingface/Simple_accelerate_integration_wandb.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "a7b2cb7b", + "id": "205e453e", "metadata": {}, "source": [ "\"Open\n", @@ -12,7 +12,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "457a3515", + "id": "fe7c4702", "metadata": {}, "source": [ "# Using Huggingface Accelerate with Weights and Biases\n", @@ -21,7 +21,7 @@ }, { "cell_type": "markdown", - "id": "434e4e75", + "id": "04b2cd3c", "metadata": {}, "source": [ "[Accelerate](https://github.com/huggingface/accelerate) is this amazing little framework that simplifies your PyTorch training scripts enabling you to train with all the tricks out there!\n", @@ -69,7 +69,7 @@ }, { "cell_type": "markdown", - "id": "0a62d793", + "id": "179d31c1", "metadata": {}, "source": [ "## Training and Image Classifier" @@ -78,7 +78,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8446561f", + "id": "1a080fa1", "metadata": {}, "outputs": [], "source": [ @@ -88,7 +88,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7b308699", + "id": "a62d276f", "metadata": {}, "outputs": [], "source": [ @@ -112,7 +112,7 @@ }, { "cell_type": "markdown", - "id": "f9042940", + "id": "6f2923f8", "metadata": {}, "source": [ "Store your configuration parameters" @@ -121,7 +121,7 @@ { "cell_type": "code", "execution_count": null, - "id": "08cae1f3", + "id": "f2e5118b", "metadata": {}, "outputs": [], "source": [ @@ -138,7 +138,7 @@ }, { "cell_type": "markdown", - "id": "9d513fa3", + "id": "6e038818", "metadata": {}, "source": [ "setup transforms" @@ -147,7 +147,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3dfeb520", + "id": "8424eb9a", "metadata": {}, "outputs": [], "source": [ @@ -160,7 +160,7 @@ }, { "cell_type": "markdown", - "id": "534a5f7e", + "id": "43a359aa", "metadata": {}, "source": [ "Create a simple CNN" @@ -169,7 +169,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f0cab9be", + "id": "3e903bb8", "metadata": {}, "outputs": [], "source": [ @@ -189,7 +189,7 @@ }, { "cell_type": "markdown", - "id": "3d6f2cf8", + "id": "d07a7cf2", "metadata": {}, "source": [ "Wrap everything into a training functions (this is necessary to run on multiple GPUS, if it is only one, you can skip the wrapping)" @@ -198,7 +198,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0fd6d359", + "id": "78346075", "metadata": {}, "outputs": [], "source": [ @@ -251,7 +251,7 @@ }, { "cell_type": "markdown", - "id": "a8e34c91", + "id": "2e162db1", "metadata": {}, "source": [ "Let's train on 2 GPUs! This is really nice, as accelerate will take care of only calling `log` on the main process, so only one run get's created, so no need to manually check the rank of the process when using multiple GPUs." @@ -260,7 +260,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b0922368", + "id": "027b2d42", "metadata": {}, "outputs": [], "source": [ diff --git a/colabs/huggingface/Visualize_your_Hugging_Face_data_with_Weights_&_Biases.ipynb b/colabs/huggingface/Visualize_your_Hugging_Face_data_with_Weights_&_Biases.ipynb index c386b743..529ce653 100644 --- a/colabs/huggingface/Visualize_your_Hugging_Face_data_with_Weights_&_Biases.ipynb +++ b/colabs/huggingface/Visualize_your_Hugging_Face_data_with_Weights_&_Biases.ipynb @@ -71,7 +71,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/huggingface/wandb_hf_example.ipynb b/colabs/huggingface/wandb_hf_example.ipynb index 51436229..b1bcf54f 100644 --- a/colabs/huggingface/wandb_hf_example.ipynb +++ b/colabs/huggingface/wandb_hf_example.ipynb @@ -98,7 +98,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/intro/3_in_1_Intro_to_Weights_&_Biases_CV,_NLP_and_RL.ipynb b/colabs/intro/3_in_1_Intro_to_Weights_&_Biases_CV,_NLP_and_RL.ipynb index 24b56715..75becb55 100644 --- a/colabs/intro/3_in_1_Intro_to_Weights_&_Biases_CV,_NLP_and_RL.ipynb +++ b/colabs/intro/3_in_1_Intro_to_Weights_&_Biases_CV,_NLP_and_RL.ipynb @@ -135,7 +135,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/intro/Intro_to_Weights_&_Biases.ipynb b/colabs/intro/Intro_to_Weights_&_Biases.ipynb index ad267b72..e2705b2c 100644 --- a/colabs/intro/Intro_to_Weights_&_Biases.ipynb +++ b/colabs/intro/Intro_to_Weights_&_Biases.ipynb @@ -85,7 +85,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/intro/Intro_to_Weights_&_Biases_keras.ipynb b/colabs/intro/Intro_to_Weights_&_Biases_keras.ipynb index 566a2747..7c80dc77 100644 --- a/colabs/intro/Intro_to_Weights_&_Biases_keras.ipynb +++ b/colabs/intro/Intro_to_Weights_&_Biases_keras.ipynb @@ -82,7 +82,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/jupyter/Interactive_W&B_Charts_Inside_Jupyter.ipynb b/colabs/jupyter/Interactive_W&B_Charts_Inside_Jupyter.ipynb index 1870d3dd..c7b29721 100644 --- a/colabs/jupyter/Interactive_W&B_Charts_Inside_Jupyter.ipynb +++ b/colabs/jupyter/Interactive_W&B_Charts_Inside_Jupyter.ipynb @@ -104,7 +104,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/keras/Fine_tune_Vision_Transformer_using_KerasCV.ipynb b/colabs/keras/Fine_tune_Vision_Transformer_using_KerasCV.ipynb index 84d07626..68db1c28 100644 --- a/colabs/keras/Fine_tune_Vision_Transformer_using_KerasCV.ipynb +++ b/colabs/keras/Fine_tune_Vision_Transformer_using_KerasCV.ipynb @@ -56,7 +56,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/keras/Image_Segmentation_with_Keras.ipynb b/colabs/keras/Image_Segmentation_with_Keras.ipynb index 3ab79047..7caba275 100644 --- a/colabs/keras/Image_Segmentation_with_Keras.ipynb +++ b/colabs/keras/Image_Segmentation_with_Keras.ipynb @@ -43,7 +43,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/keras/Keras_param_opti_using_sweeps.ipynb b/colabs/keras/Keras_param_opti_using_sweeps.ipynb index 2e05fb1b..a8b755f3 100644 --- a/colabs/keras/Keras_param_opti_using_sweeps.ipynb +++ b/colabs/keras/Keras_param_opti_using_sweeps.ipynb @@ -111,7 +111,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/keras/Legacy_Keras_pipeline_with_Weights_and_Biases.ipynb b/colabs/keras/Legacy_Keras_pipeline_with_Weights_and_Biases.ipynb index ec3d3a0a..97836a06 100644 --- a/colabs/keras/Legacy_Keras_pipeline_with_Weights_and_Biases.ipynb +++ b/colabs/keras/Legacy_Keras_pipeline_with_Weights_and_Biases.ipynb @@ -121,7 +121,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/keras/Simple_Keras_Integration.ipynb b/colabs/keras/Simple_Keras_Integration.ipynb index bd1e97a2..bf5d8739 100644 --- a/colabs/keras/Simple_Keras_Integration.ipynb +++ b/colabs/keras/Simple_Keras_Integration.ipynb @@ -129,7 +129,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/keras/Use_WandbEvalCallback_in_your_Keras_workflow.ipynb b/colabs/keras/Use_WandbEvalCallback_in_your_Keras_workflow.ipynb index 58092c99..4cbce176 100644 --- a/colabs/keras/Use_WandbEvalCallback_in_your_Keras_workflow.ipynb +++ b/colabs/keras/Use_WandbEvalCallback_in_your_Keras_workflow.ipynb @@ -100,7 +100,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/keras/Use_WandbMetricLogger_in_your_Keras_workflow.ipynb b/colabs/keras/Use_WandbMetricLogger_in_your_Keras_workflow.ipynb index 8bef5ab4..3c3ddec2 100644 --- a/colabs/keras/Use_WandbMetricLogger_in_your_Keras_workflow.ipynb +++ b/colabs/keras/Use_WandbMetricLogger_in_your_Keras_workflow.ipynb @@ -98,7 +98,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/keras/Use_WandbModelCheckpoint_in_your_Keras_workflow.ipynb b/colabs/keras/Use_WandbModelCheckpoint_in_your_Keras_workflow.ipynb index fd5258b4..3ca4c90c 100644 --- a/colabs/keras/Use_WandbModelCheckpoint_in_your_Keras_workflow.ipynb +++ b/colabs/keras/Use_WandbModelCheckpoint_in_your_Keras_workflow.ipynb @@ -98,7 +98,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/keras/cosine_decay_using_keras.ipynb b/colabs/keras/cosine_decay_using_keras.ipynb index ea9cbcf7..17e2d329 100644 --- a/colabs/keras/cosine_decay_using_keras.ipynb +++ b/colabs/keras/cosine_decay_using_keras.ipynb @@ -49,7 +49,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/keras/keras_nsynth_instrument_prediction.ipynb b/colabs/keras/keras_nsynth_instrument_prediction.ipynb index 7f9153e7..6a4ec1f8 100644 --- a/colabs/keras/keras_nsynth_instrument_prediction.ipynb +++ b/colabs/keras/keras_nsynth_instrument_prediction.ipynb @@ -45,7 +45,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/mmdetection/Train_Object_Detector_with_MMDetection_and_W&B.ipynb b/colabs/mmdetection/Train_Object_Detector_with_MMDetection_and_W&B.ipynb index 13127841..eb3ab018 100644 --- a/colabs/mmdetection/Train_Object_Detector_with_MMDetection_and_W&B.ipynb +++ b/colabs/mmdetection/Train_Object_Detector_with_MMDetection_and_W&B.ipynb @@ -155,7 +155,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/mmdetection/Train_an_Object_Detection+Semantic_Segmentation_Model_with_MMDetection_and_W&B.ipynb b/colabs/mmdetection/Train_an_Object_Detection+Semantic_Segmentation_Model_with_MMDetection_and_W&B.ipynb index db94670d..62b6e301 100644 --- a/colabs/mmdetection/Train_an_Object_Detection+Semantic_Segmentation_Model_with_MMDetection_and_W&B.ipynb +++ b/colabs/mmdetection/Train_an_Object_Detection+Semantic_Segmentation_Model_with_MMDetection_and_W&B.ipynb @@ -153,7 +153,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/mosaicml/MosaicML_Composer_and_wandb.ipynb b/colabs/mosaicml/MosaicML_Composer_and_wandb.ipynb index dc2e0f46..6b427d89 100644 --- a/colabs/mosaicml/MosaicML_Composer_and_wandb.ipynb +++ b/colabs/mosaicml/MosaicML_Composer_and_wandb.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "5e6559e8", + "id": "0196e78a", "metadata": {}, "source": [ "\"Open\n", @@ -11,7 +11,7 @@ }, { "cell_type": "markdown", - "id": "acbebc67", + "id": "362c2ed6", "metadata": {}, "source": [ "\"Weights\n", @@ -24,7 +24,7 @@ }, { "cell_type": "markdown", - "id": "57b84004", + "id": "c740179e", "metadata": {}, "source": [ "[MosaicML Composer](https://docs.mosaicml.com) is a library for training neural networks better, faster, and cheaper. It contains many state-of-the-art methods for accelerating neural network training and improving generalization, along with an optional Trainer API that makes composing many different enhancements easy.\n", @@ -39,7 +39,7 @@ }, { "cell_type": "markdown", - "id": "eec2c407", + "id": "0eca27e4", "metadata": {}, "source": [ "W&B integration with Composer can automatically:\n", @@ -53,7 +53,7 @@ }, { "cell_type": "markdown", - "id": "66988495", + "id": "5640b1a0", "metadata": {}, "source": [ "### 🛠️ Installation and set-up\n", @@ -66,7 +66,7 @@ { "cell_type": "code", "execution_count": null, - "id": "135bc088", + "id": "764b0904", "metadata": {}, "outputs": [], "source": [ @@ -75,7 +75,7 @@ }, { "cell_type": "markdown", - "id": "8574cf66", + "id": "deb25a49", "metadata": {}, "source": [ "## Getting Started with Composer 🔥" @@ -83,7 +83,7 @@ }, { "cell_type": "markdown", - "id": "620b12c7", + "id": "e713fe81", "metadata": {}, "source": [ "Composer gives you access to a set of functions to speedup your models and infuse them with state of the art methods. For instance, you can insert [BlurPool](https://docs.mosaicml.com/en/latest/method_cards/blurpool.html) into your CNN by calling `CF.apply_blurpool(model)` into your PyTorch model. Take a look at all the [functional](https://docs.mosaicml.com/en/latest/functional_api.html) methods available." @@ -92,7 +92,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5b42d839", + "id": "2c9413bd", "metadata": {}, "outputs": [], "source": [ @@ -111,7 +111,7 @@ }, { "cell_type": "markdown", - "id": "64177537", + "id": "0eb3e57b", "metadata": {}, "source": [ "> 💡 you can use this upgraded model with your favourite PyTorch training or... " @@ -119,7 +119,7 @@ }, { "cell_type": "markdown", - "id": "887ed891", + "id": "02c31ab7", "metadata": {}, "source": [ "## Use the `Trainer` class with Weights and Biases 🏋️‍♀️\n", @@ -135,7 +135,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5e156cae", + "id": "1e12ce9d", "metadata": {}, "outputs": [], "source": [ @@ -146,7 +146,7 @@ { "cell_type": "code", "execution_count": null, - "id": "023ddcaa", + "id": "4dbc1493", "metadata": {}, "outputs": [], "source": [ @@ -165,7 +165,7 @@ }, { "cell_type": "markdown", - "id": "6bed58ce", + "id": "ef7be365", "metadata": {}, "source": [ "let's grab a copy of MNIST from `torchvision`" @@ -174,7 +174,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1dd6466a", + "id": "b298a861", "metadata": {}, "outputs": [], "source": [ @@ -187,7 +187,7 @@ }, { "cell_type": "markdown", - "id": "74834fa6", + "id": "b798a9ed", "metadata": {}, "source": [ "we can import a simple ConvNet model to try" @@ -196,7 +196,7 @@ { "cell_type": "code", "execution_count": null, - "id": "53023ea7", + "id": "6498cf78", "metadata": {}, "outputs": [], "source": [ @@ -205,7 +205,7 @@ }, { "cell_type": "markdown", - "id": "602b9245", + "id": "8e9daaa5", "metadata": {}, "source": [ "### 📊 Tracking the experiment\n", @@ -215,7 +215,7 @@ { "cell_type": "code", "execution_count": null, - "id": "bf267683", + "id": "73bccc9f", "metadata": {}, "outputs": [], "source": [ @@ -240,7 +240,7 @@ }, { "cell_type": "markdown", - "id": "40dd887d", + "id": "0dc1f7f6", "metadata": {}, "source": [ "we are able to tweak what are we logging using `Callbacks` into the `Trainer` class." @@ -249,7 +249,7 @@ { "cell_type": "code", "execution_count": null, - "id": "a2ae3b59", + "id": "9b470f1a", "metadata": {}, "outputs": [], "source": [ @@ -260,7 +260,7 @@ }, { "cell_type": "markdown", - "id": "0ab089d5", + "id": "28920cc2", "metadata": {}, "source": [ "we include callbacks that measure the model throughput (and the learning rate) and logs them to Weights & Biases. [Callbacks](https://docs.mosaicml.com/en/latest/trainer/callbacks.html) control what is being logged, whereas loggers specify where the information is being saved. For more information on loggers, see [Logging](https://docs.mosaicml.com/en/latest/trainer/logging.html)." @@ -269,7 +269,7 @@ { "cell_type": "code", "execution_count": null, - "id": "592ad057", + "id": "d1cd982a", "metadata": {}, "outputs": [], "source": [ @@ -288,7 +288,7 @@ }, { "cell_type": "markdown", - "id": "06e6218a", + "id": "0888b0c5", "metadata": {}, "source": [ "once we are ready to train we call `fit`" @@ -297,7 +297,7 @@ { "cell_type": "code", "execution_count": null, - "id": "01b772f1", + "id": "2ca3468a", "metadata": {}, "outputs": [], "source": [ @@ -306,7 +306,7 @@ }, { "cell_type": "markdown", - "id": "ad73682a", + "id": "598495ee", "metadata": {}, "source": [ "## ⚙️ Advanced: Using callbacks to log sample predictions\n", @@ -319,7 +319,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1a4e5088", + "id": "00401b44", "metadata": {}, "outputs": [], "source": [ @@ -348,7 +348,7 @@ }, { "cell_type": "markdown", - "id": "5a9f37e4", + "id": "6161475c", "metadata": {}, "source": [ "we add `LogPredictions` to the other callbacks" @@ -357,7 +357,7 @@ { "cell_type": "code", "execution_count": null, - "id": "085d175d", + "id": "94d39bd5", "metadata": {}, "outputs": [], "source": [ @@ -367,7 +367,7 @@ { "cell_type": "code", "execution_count": null, - "id": "23c76cbc", + "id": "8ea986a8", "metadata": {}, "outputs": [], "source": [ @@ -386,7 +386,7 @@ }, { "cell_type": "markdown", - "id": "277b0e09", + "id": "a4deb712", "metadata": {}, "source": [ "Once we're ready to train, we just call the `fit` method." @@ -395,7 +395,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7ff55a74", + "id": "c8b00679", "metadata": {}, "outputs": [], "source": [ @@ -404,7 +404,7 @@ }, { "cell_type": "markdown", - "id": "8afd1f8f", + "id": "2c2e89f6", "metadata": {}, "source": [ "We can monitor losses, metrics, gradients, parameters and sample predictions as the model trains." @@ -412,7 +412,7 @@ }, { "cell_type": "markdown", - "id": "1d84a154", + "id": "f4889215", "metadata": {}, "source": [ "![composer.png](https://i.imgur.com/VFZLOB3.png?1)" @@ -420,7 +420,7 @@ }, { "cell_type": "markdown", - "id": "83f37a08", + "id": "97f57640", "metadata": {}, "source": [ "## 📚 Resources\n", @@ -430,7 +430,7 @@ }, { "cell_type": "markdown", - "id": "8abc3031", + "id": "c6722706", "metadata": {}, "source": [ "## ❓ Questions about W&B\n", diff --git a/colabs/openai/Fine_tune_OpenAI_with_Weights_and_Biases.ipynb b/colabs/openai/Fine_tune_OpenAI_with_Weights_and_Biases.ipynb index a2ec1217..f8eda926 100644 --- a/colabs/openai/Fine_tune_OpenAI_with_Weights_and_Biases.ipynb +++ b/colabs/openai/Fine_tune_OpenAI_with_Weights_and_Biases.ipynb @@ -41,7 +41,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -890,18 +890,6 @@ "kernelspec": { "display_name": "python3", "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.2" } }, "nbformat": 4, diff --git a/colabs/openai/OpenAI_API_Autologger_Quickstart.ipynb b/colabs/openai/OpenAI_API_Autologger_Quickstart.ipynb index b3767dc8..2e793dee 100644 --- a/colabs/openai/OpenAI_API_Autologger_Quickstart.ipynb +++ b/colabs/openai/OpenAI_API_Autologger_Quickstart.ipynb @@ -47,11 +47,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "python" - } - }, + "metadata": {}, "outputs": [], "source": [ "!pip install wandb openai -qU" @@ -73,11 +69,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "python" - } - }, + "metadata": {}, "outputs": [], "source": [ "import openai\n", @@ -105,11 +97,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "python" - } - }, + "metadata": {}, "outputs": [], "source": [ "# pass your OpenAI key\n", @@ -120,11 +108,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "python" - } - }, + "metadata": {}, "outputs": [], "source": [ "# make some calls to OpenAI \n", @@ -178,11 +162,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "vscode": { - "languageId": "python" - } - }, + "metadata": {}, "outputs": [], "source": [ "autolog.disable()" diff --git a/colabs/paddlepaddle/paddledetection/PaddleDetection_and_W&B_Your_one_stop_for_everything_object_detection.ipynb b/colabs/paddlepaddle/paddledetection/PaddleDetection_and_W&B_Your_one_stop_for_everything_object_detection.ipynb index ef757a1a..8bb85c06 100644 --- a/colabs/paddlepaddle/paddledetection/PaddleDetection_and_W&B_Your_one_stop_for_everything_object_detection.ipynb +++ b/colabs/paddlepaddle/paddledetection/PaddleDetection_and_W&B_Your_one_stop_for_everything_object_detection.ipynb @@ -119,7 +119,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/paddlepaddle/paddleocr/Train_and_Debug_Your_OCR_Models_with_PaddleOCR_and_W&B.ipynb b/colabs/paddlepaddle/paddleocr/Train_and_Debug_Your_OCR_Models_with_PaddleOCR_and_W&B.ipynb index 484f7cc8..f3fef3e8 100644 --- a/colabs/paddlepaddle/paddleocr/Train_and_Debug_Your_OCR_Models_with_PaddleOCR_and_W&B.ipynb +++ b/colabs/paddlepaddle/paddleocr/Train_and_Debug_Your_OCR_Models_with_PaddleOCR_and_W&B.ipynb @@ -103,7 +103,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/paella/Image-Variations.ipynb b/colabs/paella/Image-Variations.ipynb index 2f3ee438..33c0ba5d 100644 --- a/colabs/paella/Image-Variations.ipynb +++ b/colabs/paella/Image-Variations.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "4d6fcb46", + "id": "e6551fcc", "metadata": {}, "source": [ "\"Open\n", @@ -11,7 +11,7 @@ }, { "cell_type": "markdown", - "id": "00f3f799", + "id": "7df20a25", "metadata": {}, "source": [ "# 🔥🔥 Image Variations with Paella + WandB Playground 🪄🐝\n", @@ -24,7 +24,7 @@ { "cell_type": "code", "execution_count": null, - "id": "903d4fb4", + "id": "03036e33", "metadata": {}, "outputs": [], "source": [ @@ -53,7 +53,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14d37dc7", + "id": "e10c4b4f", "metadata": {}, "outputs": [], "source": [ @@ -89,7 +89,7 @@ { "cell_type": "code", "execution_count": null, - "id": "fcf96a10", + "id": "4bcf061f", "metadata": {}, "outputs": [], "source": [ @@ -100,7 +100,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22f0c5ca", + "id": "72953d5d", "metadata": {}, "outputs": [], "source": [ @@ -125,7 +125,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5043abfe", + "id": "c1d41153", "metadata": {}, "outputs": [], "source": [ @@ -198,7 +198,7 @@ { "cell_type": "code", "execution_count": null, - "id": "20019eac", + "id": "2ea9c73d", "metadata": {}, "outputs": [], "source": [ @@ -232,7 +232,7 @@ { "cell_type": "code", "execution_count": null, - "id": "39485bc8", + "id": "3fb179ef", "metadata": {}, "outputs": [], "source": [ @@ -262,7 +262,7 @@ { "cell_type": "code", "execution_count": null, - "id": "a51835d4", + "id": "d3d53f21", "metadata": {}, "outputs": [], "source": [ @@ -282,7 +282,7 @@ { "cell_type": "code", "execution_count": null, - "id": "bccc3c86", + "id": "0e163076", "metadata": {}, "outputs": [], "source": [ diff --git a/colabs/paella/Inpainting.ipynb b/colabs/paella/Inpainting.ipynb index b7f0fc96..ae21ed4f 100644 --- a/colabs/paella/Inpainting.ipynb +++ b/colabs/paella/Inpainting.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "f9c62921", + "id": "a51db41e", "metadata": {}, "source": [ "\"Open\n", @@ -12,7 +12,7 @@ { "cell_type": "code", "execution_count": null, - "id": "dbb0aa27", + "id": "752bdb8a", "metadata": {}, "outputs": [], "source": [ @@ -26,7 +26,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e22c1740", + "id": "b128ff01", "metadata": {}, "outputs": [], "source": [ @@ -55,7 +55,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1a821410", + "id": "8ff4273f", "metadata": {}, "outputs": [], "source": [ @@ -90,7 +90,7 @@ { "cell_type": "code", "execution_count": null, - "id": "88c43a1a", + "id": "c3d88d2d", "metadata": {}, "outputs": [], "source": [ @@ -101,7 +101,7 @@ { "cell_type": "code", "execution_count": null, - "id": "521c9242", + "id": "44df1809", "metadata": {}, "outputs": [], "source": [ @@ -140,7 +140,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3fcd0c06", + "id": "616acee0", "metadata": {}, "outputs": [], "source": [ @@ -213,7 +213,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d8a07be6", + "id": "d96ec8b5", "metadata": {}, "outputs": [], "source": [ @@ -247,7 +247,7 @@ { "cell_type": "code", "execution_count": null, - "id": "45c4c80c", + "id": "5eebe4fa", "metadata": {}, "outputs": [], "source": [ @@ -265,7 +265,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e93c67bb", + "id": "6731402d", "metadata": {}, "outputs": [], "source": [ @@ -297,7 +297,7 @@ { "cell_type": "code", "execution_count": null, - "id": "a39f1b2c", + "id": "2a99ace7", "metadata": {}, "outputs": [], "source": [ diff --git a/colabs/paella/Latent-Space-Interpolation.ipynb b/colabs/paella/Latent-Space-Interpolation.ipynb index 64ef0a24..b6725d1c 100644 --- a/colabs/paella/Latent-Space-Interpolation.ipynb +++ b/colabs/paella/Latent-Space-Interpolation.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "631fa4cf", + "id": "b9d2a9e5", "metadata": {}, "source": [ "\"Open\n", @@ -12,7 +12,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0723d1d7", + "id": "c9292760", "metadata": {}, "outputs": [], "source": [ @@ -26,7 +26,7 @@ { "cell_type": "code", "execution_count": null, - "id": "593e5b91", + "id": "ef1d9a73", "metadata": {}, "outputs": [], "source": [ @@ -55,7 +55,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1c49c27a", + "id": "aa426f22", "metadata": {}, "outputs": [], "source": [ @@ -88,7 +88,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6b34c686", + "id": "f201d581", "metadata": {}, "outputs": [], "source": [ @@ -99,7 +99,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c4807b36", + "id": "2cc157dd", "metadata": {}, "outputs": [], "source": [ @@ -113,7 +113,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d9cf7b37", + "id": "53fe0dd0", "metadata": {}, "outputs": [], "source": [ @@ -187,7 +187,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21ffec76", + "id": "6a8c2393", "metadata": {}, "outputs": [], "source": [ @@ -221,7 +221,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d89b136c", + "id": "a11b5b7c", "metadata": {}, "outputs": [], "source": [ @@ -278,7 +278,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f8492571", + "id": "4d5f7b63", "metadata": {}, "outputs": [], "source": [ diff --git a/colabs/paella/Multi-Conditioning.ipynb b/colabs/paella/Multi-Conditioning.ipynb index 8a24f6ab..873ca63d 100644 --- a/colabs/paella/Multi-Conditioning.ipynb +++ b/colabs/paella/Multi-Conditioning.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "361d4376", + "id": "fe90fd24", "metadata": {}, "source": [ "\"Open\n", @@ -11,7 +11,7 @@ }, { "cell_type": "markdown", - "id": "cc74bb07", + "id": "318fd50a", "metadata": {}, "source": [ "# 🔥🔥 Multi-Conditional Image Generation with Paella + WandB Playground 🪄🐝\n", @@ -24,7 +24,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e7a9ba4f", + "id": "d3d461f8", "metadata": {}, "outputs": [], "source": [ @@ -53,7 +53,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2d3bef06", + "id": "bb52df32", "metadata": {}, "outputs": [], "source": [ @@ -89,7 +89,7 @@ { "cell_type": "code", "execution_count": null, - "id": "79d039cd", + "id": "e3533942", "metadata": {}, "outputs": [], "source": [ @@ -100,7 +100,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1b4882d7", + "id": "ff2b0cc2", "metadata": {}, "outputs": [], "source": [ @@ -120,7 +120,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5c46b95e", + "id": "2aeb8cd4", "metadata": {}, "outputs": [], "source": [ @@ -194,7 +194,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1a931f01", + "id": "dcef1dac", "metadata": {}, "outputs": [], "source": [ @@ -228,7 +228,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4e177084", + "id": "11a0548a", "metadata": {}, "outputs": [], "source": [ @@ -259,7 +259,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6c50f803", + "id": "9b20984f", "metadata": {}, "outputs": [], "source": [ diff --git a/colabs/paella/Orientation-Guided-Multi-Conditioning.ipynb b/colabs/paella/Orientation-Guided-Multi-Conditioning.ipynb index f37d3592..1392accc 100644 --- a/colabs/paella/Orientation-Guided-Multi-Conditioning.ipynb +++ b/colabs/paella/Orientation-Guided-Multi-Conditioning.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "9c1c12f0", + "id": "4caab47d", "metadata": {}, "source": [ "\"Open\n", @@ -11,7 +11,7 @@ }, { "cell_type": "markdown", - "id": "0876094f", + "id": "c39d2d61", "metadata": {}, "source": [ "# 🔥🔥 Orientation Guided Multi-Conditional Image Generation with Paella + WandB Playground 🪄🐝\n", @@ -24,7 +24,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5c73aeff", + "id": "e162b915", "metadata": {}, "outputs": [], "source": [ @@ -53,7 +53,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d98ac2a7", + "id": "769bbf69", "metadata": {}, "outputs": [], "source": [ @@ -87,7 +87,7 @@ { "cell_type": "code", "execution_count": null, - "id": "620506d5", + "id": "e14a4831", "metadata": {}, "outputs": [], "source": [ @@ -98,7 +98,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2f317b6e", + "id": "5a75569e", "metadata": {}, "outputs": [], "source": [ @@ -124,7 +124,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1ab1b0a6", + "id": "e9bb0938", "metadata": {}, "outputs": [], "source": [ @@ -197,7 +197,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3b79d1c9", + "id": "f0151079", "metadata": {}, "outputs": [], "source": [ @@ -231,7 +231,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5865c4c9", + "id": "d6c6d26a", "metadata": {}, "outputs": [], "source": [ @@ -278,7 +278,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8f3a239b", + "id": "de3c09ea", "metadata": {}, "outputs": [], "source": [ diff --git a/colabs/paella/Outpainting.ipynb b/colabs/paella/Outpainting.ipynb index 253b79db..386d6906 100644 --- a/colabs/paella/Outpainting.ipynb +++ b/colabs/paella/Outpainting.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "3f469752", + "id": "e205baf0", "metadata": {}, "source": [ "\"Open\n", @@ -11,7 +11,7 @@ }, { "cell_type": "markdown", - "id": "4c7732a8", + "id": "def788f4", "metadata": {}, "source": [ "# 🔥🔥 Image Outpainting with Paella + WandB Playground 🪄🐝\n", @@ -24,7 +24,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0500c760", + "id": "19414087", "metadata": {}, "outputs": [], "source": [ @@ -53,7 +53,7 @@ { "cell_type": "code", "execution_count": null, - "id": "086082b2", + "id": "4ef82556", "metadata": {}, "outputs": [], "source": [ @@ -89,7 +89,7 @@ { "cell_type": "code", "execution_count": null, - "id": "717de70c", + "id": "7274ce33", "metadata": {}, "outputs": [], "source": [ @@ -100,7 +100,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7691dcc3", + "id": "8cd2036d", "metadata": {}, "outputs": [], "source": [ @@ -139,7 +139,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e1caa2c4", + "id": "1057acf3", "metadata": {}, "outputs": [], "source": [ @@ -212,7 +212,7 @@ { "cell_type": "code", "execution_count": null, - "id": "fae7b4d1", + "id": "b6a7beac", "metadata": {}, "outputs": [], "source": [ @@ -246,7 +246,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ec739af3", + "id": "757f8a53", "metadata": {}, "outputs": [], "source": [ @@ -264,7 +264,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b727ff64", + "id": "2574a92e", "metadata": {}, "outputs": [], "source": [ @@ -301,7 +301,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14aeafb1", + "id": "afff6dd4", "metadata": {}, "outputs": [], "source": [ diff --git a/colabs/paella/Structural-Morphing.ipynb b/colabs/paella/Structural-Morphing.ipynb index 19ab1ef9..c25b6dd4 100644 --- a/colabs/paella/Structural-Morphing.ipynb +++ b/colabs/paella/Structural-Morphing.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "65899be7", + "id": "b3a265c8", "metadata": {}, "source": [ "\"Open\n", @@ -11,7 +11,7 @@ }, { "cell_type": "markdown", - "id": "f8776d95", + "id": "c4165d97", "metadata": {}, "source": [ "# 🔥🔥 Structural Morphing of Images with Paella + WandB Playground 🪄🐝\n", @@ -24,7 +24,7 @@ { "cell_type": "code", "execution_count": null, - "id": "904fb436", + "id": "a28f9cb0", "metadata": {}, "outputs": [], "source": [ @@ -53,7 +53,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9f0bd618", + "id": "966479fa", "metadata": {}, "outputs": [], "source": [ @@ -88,7 +88,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9a7f2514", + "id": "32ceb3d7", "metadata": {}, "outputs": [], "source": [ @@ -99,7 +99,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9031290b", + "id": "4bc326f1", "metadata": {}, "outputs": [], "source": [ @@ -124,7 +124,7 @@ { "cell_type": "code", "execution_count": null, - "id": "349f8255", + "id": "09aba79c", "metadata": {}, "outputs": [], "source": [ @@ -197,7 +197,7 @@ { "cell_type": "code", "execution_count": null, - "id": "08b7c8b4", + "id": "399194b3", "metadata": {}, "outputs": [], "source": [ @@ -231,7 +231,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c10f09cc", + "id": "97287634", "metadata": {}, "outputs": [], "source": [ @@ -249,7 +249,7 @@ { "cell_type": "code", "execution_count": null, - "id": "982332f3", + "id": "c4b4daf8", "metadata": {}, "outputs": [], "source": [ @@ -278,7 +278,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8a647254", + "id": "87f06f8a", "metadata": {}, "outputs": [], "source": [ diff --git a/colabs/paella/Text-Conditional.ipynb b/colabs/paella/Text-Conditional.ipynb index 48dcf23b..2b076f69 100644 --- a/colabs/paella/Text-Conditional.ipynb +++ b/colabs/paella/Text-Conditional.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "ca9fdb97", + "id": "ffc4c8a0", "metadata": {}, "source": [ "\"Open\n", @@ -11,7 +11,7 @@ }, { "cell_type": "markdown", - "id": "8efa3520", + "id": "33ecf3a8", "metadata": {}, "source": [ "# 🔥🔥 Text-Conditional Image Generation with Paella + WandB Playground 🪄🐝\n", @@ -24,7 +24,7 @@ { "cell_type": "code", "execution_count": null, - "id": "756c31f4", + "id": "9842f6c0", "metadata": {}, "outputs": [], "source": [ @@ -53,7 +53,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1b3402e6", + "id": "94352978", "metadata": {}, "outputs": [], "source": [ @@ -83,7 +83,7 @@ { "cell_type": "code", "execution_count": null, - "id": "89d05080", + "id": "cd216ad2", "metadata": {}, "outputs": [], "source": [ @@ -94,7 +94,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4b4b2e95", + "id": "43772a65", "metadata": {}, "outputs": [], "source": [ @@ -108,7 +108,7 @@ { "cell_type": "code", "execution_count": null, - "id": "848691aa", + "id": "1d5662b1", "metadata": {}, "outputs": [], "source": [ @@ -182,7 +182,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b65d3b6d", + "id": "23700dc8", "metadata": {}, "outputs": [], "source": [ @@ -216,7 +216,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8826240b", + "id": "42c83c7e", "metadata": {}, "outputs": [], "source": [ @@ -238,7 +238,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c30cda12", + "id": "cddd0c03", "metadata": {}, "outputs": [], "source": [ diff --git a/colabs/prompts/W&B_Prompts_with_Custom_Columns.ipynb b/colabs/prompts/W&B_Prompts_with_Custom_Columns.ipynb index ebc811cf..55708888 100644 --- a/colabs/prompts/W&B_Prompts_with_Custom_Columns.ipynb +++ b/colabs/prompts/W&B_Prompts_with_Custom_Columns.ipynb @@ -1,618 +1,543 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "e-ZYaV5KGVmA" - }, - "source": [ - "\"Open\n", - "" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "gJSVEAGWGVmA" - }, - "source": [ - "\"Weights\n", - "" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "9f7yMKLwGVmA" - }, - "source": [ - "**[Weights & Biases Prompts](https://docs.wandb.ai/guides/prompts?utm_source=code&utm_medium=colab&utm_campaign=prompts)** is a suite of LLMOps tools built for the development of LLM-powered applications.\n", - "\n", - "Use W&B Prompts to visualize and inspect the execution flow of your LLMs, analyze the inputs and outputs of your LLMs, view the intermediate results and securely store and manage your prompts and LLM chain configurations.\n", - "\n", - "#### [🪄 View Prompts In Action](https://wandb.ai/timssweeney/prompts-demo/)\n", - "\n", - "**In this notebook we will demostrate W&B Prompts:**\n", - "\n", - "- Using our 1-line LangChain integration\n", - "- Using our Trace class when building your own LLM Pipelines\n", - "\n", - "See here for the full [W&B Prompts documentation](https://docs.wandb.ai/guides/prompts)\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "A4wI3b_8GVmB" - }, - "source": [ - "## Installation" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": { - "id": "nDoIqQ8_GVmB" - }, - "outputs": [], - "source": [ - "!pip install \"wandb>=0.15.4\" -qqq\n", - "!pip install \"langchain>=0.0.218\" openai -qqq" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": { - "id": "PcGiSWBSGVmB" - }, - "outputs": [], - "source": [ - "import langchain\n", - "assert langchain.__version__ >= \"0.0.218\", \"Please ensure you are using LangChain v0.0.188 or higher\"" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "pbmQIsjJGVmB" - }, - "source": [ - "## Setup\n", - "\n", - "This demo requires that you have an [OpenAI key](https://platform.openai.com)" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "ZH4g2B0lGVmB", - "outputId": "22295db6-5369-474d-a8ea-fb45c4c92085" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Paste your OpenAI key from: https://platform.openai.com/account/api-keys\n", - "··········\n", - "OpenAI API key configured\n" - ] - } - ], - "source": [ - "import os\n", - "from getpass import getpass\n", - "\n", - "if os.getenv(\"OPENAI_API_KEY\") is None:\n", - " os.environ[\"OPENAI_API_KEY\"] = getpass(\"Paste your OpenAI key from: https://platform.openai.com/account/api-keys\\n\")\n", - "assert os.getenv(\"OPENAI_API_KEY\", \"\").startswith(\"sk-\"), \"This doesn't look like a valid OpenAI API key\"\n", - "print(\"OpenAI API key configured\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "79KOB2EhGVmB" - }, - "source": [ - "# W&B Prompts\n", - "\n", - "W&B Prompts consists of three main components:\n", - "\n", - "**Trace table**: Overview of the inputs and outputs of a chain.\n", - "\n", - "**Trace timeline**: Displays the execution flow of the chain and is color-coded according to component types.\n", - "\n", - "**Model architecture**: View details about the structure of the chain and the parameters used to initialize each component of the chain.\n", - "\n", - "After running this section, you will see a new panel automatically created in your workspace, showing each execution, the trace, and the model architecture" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "5kxmdm3zGVmC" - }, - "source": [ - "\"Weights" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "9u97K5vVGVmC" - }, - "source": [ - "## Maths with LangChain" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "oneRFmv6GVmC" - }, - "source": [ - "Set the `LANGCHAIN_WANDB_TRACING` environment variable as well as any other relevant [W&B environment variables](https://docs.wandb.ai/guides/track/environment-variables). This could includes a W&B project name, team name, and more. See [wandb.init](https://docs.wandb.ai/ref/python/init) for a full list of arguments." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": { - "id": "ACl-rMtAGVmC" - }, - "outputs": [], - "source": [ - "os.environ[\"LANGCHAIN_WANDB_TRACING\"] = \"true\"\n", - "os.environ[\"WANDB_PROJECT\"] = \"langchain-testing\"" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": { - "id": "csp3MXG4GVmC" - }, - "outputs": [], - "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.agents import load_tools, initialize_agent, AgentType" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "2hWU2GcAGVmC" - }, - "source": [ - "Create a standard math Agent using LangChain" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": { - "id": "l_JkVMlRGVmC" - }, - "outputs": [], - "source": [ - "llm = ChatOpenAI(temperature=0)\n", - "tools = load_tools([\"llm-math\"], llm=llm)\n", - "math_agent = initialize_agent(tools,\n", - " llm,\n", - " agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "9FFviwCPGVmC" - }, - "source": [ - "Use LangChain as normal by calling your Agent.\n", - "\n", - " You will see a Weights & Biases run start and you will be asked for your [Weights & Biases API key](wwww.wandb.ai/authorize). Once your enter your API key, the inputs and outputs of your Agent calls will start to be streamed to the Weights & Biases App." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 178 - }, - "id": "y-RHjVN4GVmC", - "outputId": "5ccd5f32-6137-46c3-9abd-d458dbdbacca" - }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\u001b[34m\u001b[1mwandb\u001b[0m: Streaming LangChain activity to W&B at https://wandb.ai/carey/langchain-testing/runs/lcznj5lg\n", - "\u001b[34m\u001b[1mwandb\u001b[0m: `WandbTracer` is currently in beta.\n", - "\u001b[34m\u001b[1mwandb\u001b[0m: Please report any issues to https://github.com/wandb/wandb/issues with the tag `langchain`.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "LLMMathChain._evaluate(\"\n", - "import math\n", - "math.sqrt(5.4)\n", - "\") raised error: invalid syntax (, line 1). Please try again with a valid numerical expression\n", - "0.005720801417544866\n", - "0.15096209512635608\n" - ] - } - ], - "source": [ - "# some sample maths questions\n", - "questions = [\n", - " \"Find the square root of 5.4.\",\n", - " \"What is 3 divided by 7.34 raised to the power of pi?\",\n", - " \"What is the sin of 0.47 radians, divided by the cube root of 27?\"\n", - "]\n", - "\n", - "for question in questions:\n", - " try:\n", - " # call your Agent as normal\n", - " answer = math_agent.run(question)\n", - " print(answer)\n", - " except Exception as e:\n", - " # any errors will be also logged to Weights & Biases\n", - " print(e)\n", - " pass" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "SNYFSaUrGVmC" - }, - "source": [ - "Once each Agent execution completes, all calls in your LangChain object will be logged to Weights & Biases" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "m0bL1xpkGVmC" - }, - "source": [ - "### LangChain Context Manager\n", - "Depending on your use case, you might instead prefer to use a context manager to manage your logging to W&B.\n", - "\n", - "**✨ New: Custom columns** can be logged directly to W&B to display in the same Trace Table with this snippet:\n", - "```python\n", - "import wandb\n", - "wandb.log(custom_metrics_dict, commit=False})\n", - "```\n", - "Use `commit=False` to make sure that metadata is logged to the same row of the Trace Table as the LangChain output." - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 35 - }, - "id": "7i9Pj1NKGVmC", - "outputId": "b44f3ae7-fd49-437f-af7b-fb8f82056bd0" - }, - "outputs": [ - { - "data": { - "application/vnd.google.colaboratory.intrinsic+json": { - "type": "string" - }, - "text/plain": [ - "'1.0891804557407723'" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from langchain.callbacks import wandb_tracing_enabled\n", - "import wandb # To enable custom column logging with wandb.run.log()\n", - "\n", - "# unset the environment variable and use a context manager instead\n", - "if \"LANGCHAIN_WANDB_TRACING\" in os.environ:\n", - " del os.environ[\"LANGCHAIN_WANDB_TRACING\"]\n", - "\n", - "# enable tracing using a context manager\n", - "with wandb_tracing_enabled():\n", - " for i in range (10):\n", - " # Log any custom columns you'd like to add to the Trace Table\n", - " wandb.log({\"custom_column\": i}, commit=False)\n", - " try:\n", - " math_agent.run(f\"What is {i} raised to .123243 power?\") # this should be traced\n", - " except:\n", - " pass\n", - "\n", - "math_agent.run(\"What is 2 raised to .123243 power?\") # this should not be traced" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "JDLzoorhGVmC" - }, - "source": [ - "# Non-Lang Chain Implementation\n", - "\n", - "\n", - "A W&B Trace is created by logging 1 or more \"spans\". A root span is expected, which can accept nested child spans, which can in turn accept their own child spans. A Span represents a unit of work, Spans can have type `AGENT`, `TOOL`, `LLM` or `CHAIN`\n", - "\n", - "When logging with Trace, a single W&B run can have multiple calls to a LLM, Tool, Chain or Agent logged to it, there is no need to start a new W&B run after each generation from your model or pipeline, instead each call will be appended to the Trace Table.\n", - "\n", - "In this quickstart, we will how to log a single call to an OpenAI model to W&B Trace as a single span. Then we will show how to log a more complex series of nested spans.\n", - "\n", - "## Logging with W&B Trace" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "7z98yfoqGVmD" - }, - "source": [ - "Call wandb.init to start a W&B run. Here you can pass a W&B project name as well as an entity name (if logging to a W&B Team), as well as a config and more. See wandb.init for the full list of arguments.\n", - "\n", - "You will see a Weights & Biases run start and be asked for your [Weights & Biases API key](wwww.wandb.ai/authorize). Once your enter your API key, the inputs and outputs of your Agent calls will start to be streamed to the Weights & Biases App.\n", - "\n", - "**Note:** A W&B run supports logging as many traces you needed to a single run, i.e. you can make multiple calls of `run.log` without the need to create a new run each time" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "ZcvgzZ55GVmD" - }, - "outputs": [], - "source": [ - "import wandb\n", - "\n", - "# start a wandb run to log to\n", - "wandb.init(project=\"trace-example\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "4_3Wrg2YGVmD" - }, - "source": [ - "You can also set the entity argument in wandb.init if logging to a W&B Team.\n", - "\n", - "### Logging a single Span\n", - "Now we will query OpenAI times and log the results to a W&B Trace. We will log the inputs and outputs, start and end times, whether the OpenAI call was successful, the token usage, and additional metadata.\n", - "\n", - "You can see the full description of the arguments to the Trace class [here](https://soumik12345.github.io/wandb-addons/prompts/tracer/)." - ] - }, + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\"Open\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\"Weights\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**[Weights & Biases Prompts](https://docs.wandb.ai/guides/prompts?utm_source=code&utm_medium=colab&utm_campaign=prompts)** is a suite of LLMOps tools built for the development of LLM-powered applications.\n", + "\n", + "Use W&B Prompts to visualize and inspect the execution flow of your LLMs, analyze the inputs and outputs of your LLMs, view the intermediate results and securely store and manage your prompts and LLM chain configurations.\n", + "\n", + "#### [🪄 View Prompts In Action](https://wandb.ai/timssweeney/prompts-demo/)\n", + "\n", + "**In this notebook we will demostrate W&B Prompts:**\n", + "\n", + "- Using our 1-line LangChain integration\n", + "- Using our Trace class when building your own LLM Pipelines\n", + "\n", + "See here for the full [W&B Prompts documentation](https://docs.wandb.ai/guides/prompts)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Installation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pip install \"wandb>=0.15.4\" -qqq\n", + "!pip install \"langchain>=0.0.218\" openai -qqq" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import langchain\n", + "assert langchain.__version__ >= \"0.0.218\", \"Please ensure you are using LangChain v0.0.188 or higher\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup\n", + "\n", + "This demo requires that you have an [OpenAI key](https://platform.openai.com)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "q2pkMhpMGVmD" - }, - "outputs": [], - "source": [ - "import openai\n", - "import datetime\n", - "from wandb.sdk.data_types.trace_tree import Trace\n", - "\n", - "openai.api_key = os.environ[\"OPENAI_API_KEY\"]\n", - "\n", - "# define your conifg\n", - "model_name = \"gpt-3.5-turbo\"\n", - "temperature = 0.7\n", - "system_message = \"You are a helpful assistant that always replies in 3 concise bullet points using markdown.\"\n", - "\n", - "queries_ls = [\n", - " \"What is the capital of France?\",\n", - " \"How do I boil an egg?\" * 10000, # deliberately trigger an openai error\n", - " \"What to do if the aliens arrive?\"\n", - "]\n", - "\n", - "for query in queries_ls:\n", - " messages=[\n", - " {\"role\": \"system\", \"content\": system_message},\n", - " {\"role\": \"user\", \"content\": query}\n", - " ]\n", - "\n", - " start_time_ms = datetime.datetime.now().timestamp() * 1000\n", - " try:\n", - " response = openai.ChatCompletion.create(model=model_name,\n", - " messages=messages,\n", - " temperature=temperature\n", - " )\n", - "\n", - " end_time_ms = round(datetime.datetime.now().timestamp() * 1000) # logged in milliseconds\n", - " status=\"success\"\n", - " status_message=None,\n", - " response_text = response[\"choices\"][0][\"message\"][\"content\"]\n", - " token_usage = response[\"usage\"].to_dict()\n", - "\n", - "\n", - " except Exception as e:\n", - " end_time_ms = round(datetime.datetime.now().timestamp() * 1000) # logged in milliseconds\n", - " status=\"error\"\n", - " status_message=str(e)\n", - " response_text = \"\"\n", - " token_usage = {}\n", - "\n", - " # create a span in wandb\n", - " root_span = Trace(\n", - " name=\"root_span\",\n", - " kind=\"llm\", # kind can be \"llm\", \"chain\", \"agent\" or \"tool\"\n", - " status_code=status,\n", - " status_message=status_message,\n", - " metadata={\"temperature\": temperature,\n", - " \"token_usage\": token_usage,\n", - " \"model_name\": model_name},\n", - " start_time_ms=start_time_ms,\n", - " end_time_ms=end_time_ms,\n", - " inputs={\"system_prompt\": system_message, \"query\": query},\n", - " outputs={\"response\": response_text},\n", - " )\n", - "\n", - " # log the span to wandb\n", - " root_span.log(name=\"openai_trace\")" - ] - }, + "name": "stdout", + "output_type": "stream", + "text": [ + "Paste your OpenAI key from: https://platform.openai.com/account/api-keys\n", + "··········\n", + "OpenAI API key configured\n" + ] + } + ], + "source": [ + "import os\n", + "from getpass import getpass\n", + "\n", + "if os.getenv(\"OPENAI_API_KEY\") is None:\n", + " os.environ[\"OPENAI_API_KEY\"] = getpass(\"Paste your OpenAI key from: https://platform.openai.com/account/api-keys\\n\")\n", + "assert os.getenv(\"OPENAI_API_KEY\", \"\").startswith(\"sk-\"), \"This doesn't look like a valid OpenAI API key\"\n", + "print(\"OpenAI API key configured\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# W&B Prompts\n", + "\n", + "W&B Prompts consists of three main components:\n", + "\n", + "**Trace table**: Overview of the inputs and outputs of a chain.\n", + "\n", + "**Trace timeline**: Displays the execution flow of the chain and is color-coded according to component types.\n", + "\n", + "**Model architecture**: View details about the structure of the chain and the parameters used to initialize each component of the chain.\n", + "\n", + "After running this section, you will see a new panel automatically created in your workspace, showing each execution, the trace, and the model architecture" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\"Weights" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Maths with LangChain" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Set the `LANGCHAIN_WANDB_TRACING` environment variable as well as any other relevant [W&B environment variables](https://docs.wandb.ai/guides/track/environment-variables). This could includes a W&B project name, team name, and more. See [wandb.init](https://docs.wandb.ai/ref/python/init) for a full list of arguments." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "os.environ[\"LANGCHAIN_WANDB_TRACING\"] = \"true\"\n", + "os.environ[\"WANDB_PROJECT\"] = \"langchain-testing\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.chat_models import ChatOpenAI\n", + "from langchain.agents import load_tools, initialize_agent, AgentType" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Create a standard math Agent using LangChain" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "llm = ChatOpenAI(temperature=0)\n", + "tools = load_tools([\"llm-math\"], llm=llm)\n", + "math_agent = initialize_agent(tools,\n", + " llm,\n", + " agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Use LangChain as normal by calling your Agent.\n", + "\n", + " You will see a Weights & Biases run start and you will be asked for your [Weights & Biases API key](wwww.wandb.ai/authorize). Once your enter your API key, the inputs and outputs of your Agent calls will start to be streamed to the Weights & Biases App." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ { - "cell_type": "markdown", - "metadata": { - "id": "XFcwFgaDGVmD" - }, - "source": [ - "### Logging a LLM pipeline using nested Spans\n", - "\n", - "In this example we will simulate an Agent being called, which then calls a LLM Chain, which calls an OpenAI LLM and then the Agent \"calls\" a Calculator tool.\n", - "\n", - "The inputs, outputs and metadata for each step in the execution of our \"Agent\" is logged in its own span. Spans can have child" - ] + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mwandb\u001b[0m: Streaming LangChain activity to W&B at https://wandb.ai/carey/langchain-testing/runs/lcznj5lg\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: `WandbTracer` is currently in beta.\n", + "\u001b[34m\u001b[1mwandb\u001b[0m: Please report any issues to https://github.com/wandb/wandb/issues with the tag `langchain`.\n" + ] }, { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "ACMaGuYUGVmD" - }, - "outputs": [], - "source": [ - "import time\n", - "\n", - "openai.api_key = os.environ[\"OPENAI_API_KEY\"]\n", - "\n", - "# The query our agent has to answer\n", - "query = \"How many days until the next US election?\"\n", - "\n", - "# part 1 - an Agent is started...\n", - "start_time_ms = round(datetime.datetime.now().timestamp() * 1000)\n", - "\n", - "root_span = Trace(\n", - " name=\"MyAgent\",\n", - " kind=\"agent\",\n", - " start_time_ms=start_time_ms,\n", - " metadata={\"user\": \"optimus_12\"})\n", - "\n", - "\n", - "# part 2 - The Agent calls into a LLMChain..\n", - "chain_span = Trace(\n", - " name=\"LLMChain\",\n", - " kind=\"chain\",\n", - " start_time_ms=start_time_ms)\n", - "\n", - "# add the Chain span as a child of the root\n", - "root_span.add_child(chain_span)\n", - "\n", - "\n", - "# part 3 - the LLMChain calls an OpenAI LLM...\n", - "messages=[\n", - " {\"role\": \"system\", \"content\": system_message},\n", - " {\"role\": \"user\", \"content\": query}\n", - "]\n", - "\n", - "response = openai.ChatCompletion.create(model=model_name,\n", - " messages=messages,\n", - " temperature=temperature)\n", - "\n", - "llm_end_time_ms = round(datetime.datetime.now().timestamp() * 1000)\n", - "response_text = response[\"choices\"][0][\"message\"][\"content\"]\n", - "token_usage = response[\"usage\"].to_dict()\n", - "\n", - "llm_span = Trace(\n", - " name=\"OpenAI\",\n", - " kind=\"llm\",\n", - " status_code=\"success\",\n", - " metadata={\"temperature\":temperature,\n", - " \"token_usage\": token_usage,\n", - " \"model_name\":model_name},\n", - " start_time_ms=start_time_ms,\n", - " end_time_ms=llm_end_time_ms,\n", - " inputs={\"system_prompt\":system_message, \"query\":query},\n", - " outputs={\"response\": response_text},\n", - " )\n", - "\n", - "# add the LLM span as a child of the Chain span...\n", - "chain_span.add_child(llm_span)\n", - "\n", - "# update the end time of the Chain span\n", - "chain_span.add_inputs_and_outputs(\n", - " inputs={\"query\":query},\n", - " outputs={\"response\": response_text})\n", - "\n", - "# update the Chain span's end time\n", - "chain_span._span.end_time_ms = llm_end_time_ms\n", - "\n", - "\n", - "# part 4 - the Agent then calls a Tool...\n", - "time.sleep(3)\n", - "days_to_election = 117\n", - "tool_end_time_ms = round(datetime.datetime.now().timestamp() * 1000)\n", - "\n", - "# create a Tool span\n", - "tool_span = Trace(\n", - " name=\"Calculator\",\n", - " kind=\"tool\",\n", - " status_code=\"success\",\n", - " start_time_ms=llm_end_time_ms,\n", - " end_time_ms=tool_end_time_ms,\n", - " inputs={\"input\": response_text},\n", - " outputs={\"result\": days_to_election})\n", - "\n", - "# add the TOOL span as a child of the root\n", - "root_span.add_child(tool_span)\n", - "\n", - "\n", - "# part 5 - the final results from the tool are added\n", - "root_span.add_inputs_and_outputs(inputs={\"query\": query},\n", - " outputs={\"result\": days_to_election})\n", - "root_span._span.end_time_ms = tool_end_time_ms\n", - "\n", - "\n", - "# part 6 - log all spans to W&B by logging the root span\n", - "root_span.log(name=\"openai_trace\")" - ] - }, + "name": "stdout", + "output_type": "stream", + "text": [ + "LLMMathChain._evaluate(\"\n", + "import math\n", + "math.sqrt(5.4)\n", + "\") raised error: invalid syntax (, line 1). Please try again with a valid numerical expression\n", + "0.005720801417544866\n", + "0.15096209512635608\n" + ] + } + ], + "source": [ + "# some sample maths questions\n", + "questions = [\n", + " \"Find the square root of 5.4.\",\n", + " \"What is 3 divided by 7.34 raised to the power of pi?\",\n", + " \"What is the sin of 0.47 radians, divided by the cube root of 27?\"\n", + "]\n", + "\n", + "for question in questions:\n", + " try:\n", + " # call your Agent as normal\n", + " answer = math_agent.run(question)\n", + " print(answer)\n", + " except Exception as e:\n", + " # any errors will be also logged to Weights & Biases\n", + " print(e)\n", + " pass" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Once each Agent execution completes, all calls in your LangChain object will be logged to Weights & Biases" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### LangChain Context Manager\n", + "Depending on your use case, you might instead prefer to use a context manager to manage your logging to W&B.\n", + "\n", + "**✨ New: Custom columns** can be logged directly to W&B to display in the same Trace Table with this snippet:\n", + "```python\n", + "import wandb\n", + "wandb.log(custom_metrics_dict, commit=False})\n", + "```\n", + "Use `commit=False` to make sure that metadata is logged to the same row of the Trace Table as the LangChain output." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ { - "cell_type": "markdown", - "metadata": { - "id": "nBFVwawPGVmD" - }, - "source": [ - "Once each Agent execution completes, all calls in your LangChain object will be logged to Weights & Biases" + "data": { + "text/plain": [ + "'1.0891804557407723'" ] + }, + "execution_count": null, + "metadata": {}, + "output_type": "execute_result" } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "include_colab_link": true, - "provenance": [] - }, - "kernelspec": { - "display_name": "Python 3", - "name": "python3" - } + ], + "source": [ + "from langchain.callbacks import wandb_tracing_enabled\n", + "import wandb # To enable custom column logging with wandb.run.log()\n", + "\n", + "# unset the environment variable and use a context manager instead\n", + "if \"LANGCHAIN_WANDB_TRACING\" in os.environ:\n", + " del os.environ[\"LANGCHAIN_WANDB_TRACING\"]\n", + "\n", + "# enable tracing using a context manager\n", + "with wandb_tracing_enabled():\n", + " for i in range (10):\n", + " # Log any custom columns you'd like to add to the Trace Table\n", + " wandb.log({\"custom_column\": i}, commit=False)\n", + " try:\n", + " math_agent.run(f\"What is {i} raised to .123243 power?\") # this should be traced\n", + " except:\n", + " pass\n", + "\n", + "math_agent.run(\"What is 2 raised to .123243 power?\") # this should not be traced" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Non-Lang Chain Implementation\n", + "\n", + "\n", + "A W&B Trace is created by logging 1 or more \"spans\". A root span is expected, which can accept nested child spans, which can in turn accept their own child spans. A Span represents a unit of work, Spans can have type `AGENT`, `TOOL`, `LLM` or `CHAIN`\n", + "\n", + "When logging with Trace, a single W&B run can have multiple calls to a LLM, Tool, Chain or Agent logged to it, there is no need to start a new W&B run after each generation from your model or pipeline, instead each call will be appended to the Trace Table.\n", + "\n", + "In this quickstart, we will how to log a single call to an OpenAI model to W&B Trace as a single span. Then we will show how to log a more complex series of nested spans.\n", + "\n", + "## Logging with W&B Trace" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Call wandb.init to start a W&B run. Here you can pass a W&B project name as well as an entity name (if logging to a W&B Team), as well as a config and more. See wandb.init for the full list of arguments.\n", + "\n", + "You will see a Weights & Biases run start and be asked for your [Weights & Biases API key](wwww.wandb.ai/authorize). Once your enter your API key, the inputs and outputs of your Agent calls will start to be streamed to the Weights & Biases App.\n", + "\n", + "**Note:** A W&B run supports logging as many traces you needed to a single run, i.e. you can make multiple calls of `run.log` without the need to create a new run each time" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import wandb\n", + "\n", + "# start a wandb run to log to\n", + "wandb.init(project=\"trace-example\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can also set the entity argument in wandb.init if logging to a W&B Team.\n", + "\n", + "### Logging a single Span\n", + "Now we will query OpenAI times and log the results to a W&B Trace. We will log the inputs and outputs, start and end times, whether the OpenAI call was successful, the token usage, and additional metadata.\n", + "\n", + "You can see the full description of the arguments to the Trace class [here](https://soumik12345.github.io/wandb-addons/prompts/tracer/)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import openai\n", + "import datetime\n", + "from wandb.sdk.data_types.trace_tree import Trace\n", + "\n", + "openai.api_key = os.environ[\"OPENAI_API_KEY\"]\n", + "\n", + "# define your conifg\n", + "model_name = \"gpt-3.5-turbo\"\n", + "temperature = 0.7\n", + "system_message = \"You are a helpful assistant that always replies in 3 concise bullet points using markdown.\"\n", + "\n", + "queries_ls = [\n", + " \"What is the capital of France?\",\n", + " \"How do I boil an egg?\" * 10000, # deliberately trigger an openai error\n", + " \"What to do if the aliens arrive?\"\n", + "]\n", + "\n", + "for query in queries_ls:\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": system_message},\n", + " {\"role\": \"user\", \"content\": query}\n", + " ]\n", + "\n", + " start_time_ms = datetime.datetime.now().timestamp() * 1000\n", + " try:\n", + " response = openai.ChatCompletion.create(model=model_name,\n", + " messages=messages,\n", + " temperature=temperature\n", + " )\n", + "\n", + " end_time_ms = round(datetime.datetime.now().timestamp() * 1000) # logged in milliseconds\n", + " status=\"success\"\n", + " status_message=None,\n", + " response_text = response[\"choices\"][0][\"message\"][\"content\"]\n", + " token_usage = response[\"usage\"].to_dict()\n", + "\n", + "\n", + " except Exception as e:\n", + " end_time_ms = round(datetime.datetime.now().timestamp() * 1000) # logged in milliseconds\n", + " status=\"error\"\n", + " status_message=str(e)\n", + " response_text = \"\"\n", + " token_usage = {}\n", + "\n", + " # create a span in wandb\n", + " root_span = Trace(\n", + " name=\"root_span\",\n", + " kind=\"llm\", # kind can be \"llm\", \"chain\", \"agent\" or \"tool\"\n", + " status_code=status,\n", + " status_message=status_message,\n", + " metadata={\"temperature\": temperature,\n", + " \"token_usage\": token_usage,\n", + " \"model_name\": model_name},\n", + " start_time_ms=start_time_ms,\n", + " end_time_ms=end_time_ms,\n", + " inputs={\"system_prompt\": system_message, \"query\": query},\n", + " outputs={\"response\": response_text},\n", + " )\n", + "\n", + " # log the span to wandb\n", + " root_span.log(name=\"openai_trace\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Logging a LLM pipeline using nested Spans\n", + "\n", + "In this example we will simulate an Agent being called, which then calls a LLM Chain, which calls an OpenAI LLM and then the Agent \"calls\" a Calculator tool.\n", + "\n", + "The inputs, outputs and metadata for each step in the execution of our \"Agent\" is logged in its own span. Spans can have child" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import time\n", + "\n", + "openai.api_key = os.environ[\"OPENAI_API_KEY\"]\n", + "\n", + "# The query our agent has to answer\n", + "query = \"How many days until the next US election?\"\n", + "\n", + "# part 1 - an Agent is started...\n", + "start_time_ms = round(datetime.datetime.now().timestamp() * 1000)\n", + "\n", + "root_span = Trace(\n", + " name=\"MyAgent\",\n", + " kind=\"agent\",\n", + " start_time_ms=start_time_ms,\n", + " metadata={\"user\": \"optimus_12\"})\n", + "\n", + "\n", + "# part 2 - The Agent calls into a LLMChain..\n", + "chain_span = Trace(\n", + " name=\"LLMChain\",\n", + " kind=\"chain\",\n", + " start_time_ms=start_time_ms)\n", + "\n", + "# add the Chain span as a child of the root\n", + "root_span.add_child(chain_span)\n", + "\n", + "\n", + "# part 3 - the LLMChain calls an OpenAI LLM...\n", + "messages=[\n", + " {\"role\": \"system\", \"content\": system_message},\n", + " {\"role\": \"user\", \"content\": query}\n", + "]\n", + "\n", + "response = openai.ChatCompletion.create(model=model_name,\n", + " messages=messages,\n", + " temperature=temperature)\n", + "\n", + "llm_end_time_ms = round(datetime.datetime.now().timestamp() * 1000)\n", + "response_text = response[\"choices\"][0][\"message\"][\"content\"]\n", + "token_usage = response[\"usage\"].to_dict()\n", + "\n", + "llm_span = Trace(\n", + " name=\"OpenAI\",\n", + " kind=\"llm\",\n", + " status_code=\"success\",\n", + " metadata={\"temperature\":temperature,\n", + " \"token_usage\": token_usage,\n", + " \"model_name\":model_name},\n", + " start_time_ms=start_time_ms,\n", + " end_time_ms=llm_end_time_ms,\n", + " inputs={\"system_prompt\":system_message, \"query\":query},\n", + " outputs={\"response\": response_text},\n", + " )\n", + "\n", + "# add the LLM span as a child of the Chain span...\n", + "chain_span.add_child(llm_span)\n", + "\n", + "# update the end time of the Chain span\n", + "chain_span.add_inputs_and_outputs(\n", + " inputs={\"query\":query},\n", + " outputs={\"response\": response_text})\n", + "\n", + "# update the Chain span's end time\n", + "chain_span._span.end_time_ms = llm_end_time_ms\n", + "\n", + "\n", + "# part 4 - the Agent then calls a Tool...\n", + "time.sleep(3)\n", + "days_to_election = 117\n", + "tool_end_time_ms = round(datetime.datetime.now().timestamp() * 1000)\n", + "\n", + "# create a Tool span\n", + "tool_span = Trace(\n", + " name=\"Calculator\",\n", + " kind=\"tool\",\n", + " status_code=\"success\",\n", + " start_time_ms=llm_end_time_ms,\n", + " end_time_ms=tool_end_time_ms,\n", + " inputs={\"input\": response_text},\n", + " outputs={\"result\": days_to_election})\n", + "\n", + "# add the TOOL span as a child of the root\n", + "root_span.add_child(tool_span)\n", + "\n", + "\n", + "# part 5 - the final results from the tool are added\n", + "root_span.add_inputs_and_outputs(inputs={\"query\": query},\n", + " outputs={\"result\": days_to_election})\n", + "root_span._span.end_time_ms = tool_end_time_ms\n", + "\n", + "\n", + "# part 6 - log all spans to W&B by logging the root span\n", + "root_span.log(name=\"openai_trace\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Once each Agent execution completes, all calls in your LangChain object will be logged to Weights & Biases" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "include_colab_link": true, + "provenance": [] }, - "nbformat": 4, - "nbformat_minor": 0 + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } diff --git a/colabs/prompts/WandB_LLM_QA_bot.ipynb b/colabs/prompts/WandB_LLM_QA_bot.ipynb index f1924ff7..efc614e8 100644 --- a/colabs/prompts/WandB_LLM_QA_bot.ipynb +++ b/colabs/prompts/WandB_LLM_QA_bot.ipynb @@ -123,7 +123,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/pycaret/Default_Credit_Prediction_Using_W&B_Pycaret_FastAPI.ipynb b/colabs/pycaret/Default_Credit_Prediction_Using_W&B_Pycaret_FastAPI.ipynb index 8b1fe03e..1050389b 100644 --- a/colabs/pycaret/Default_Credit_Prediction_Using_W&B_Pycaret_FastAPI.ipynb +++ b/colabs/pycaret/Default_Credit_Prediction_Using_W&B_Pycaret_FastAPI.ipynb @@ -126,7 +126,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/pyg/8_Node_Classification_(with_W&B).ipynb b/colabs/pyg/8_Node_Classification_(with_W&B).ipynb index 7583e56b..3c386a9a 100644 --- a/colabs/pyg/8_Node_Classification_(with_W&B).ipynb +++ b/colabs/pyg/8_Node_Classification_(with_W&B).ipynb @@ -54,7 +54,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/pyg/point-cloud-segmentation/00_eda.ipynb b/colabs/pyg/point-cloud-segmentation/00_eda.ipynb index 89a81516..8f4b86b7 100644 --- a/colabs/pyg/point-cloud-segmentation/00_eda.ipynb +++ b/colabs/pyg/point-cloud-segmentation/00_eda.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "5ef81b15", + "id": "04fdcaca", "metadata": {}, "source": [ "\"Open\n", @@ -12,7 +12,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "e0e9e05d", + "id": "aead1b54", "metadata": {}, "source": [ "# 🔥🔥 Explore ShapeNet Dataset using PyTorch Geometric and Weights & Biases 🪄🐝\n", @@ -32,7 +32,7 @@ }, { "cell_type": "markdown", - "id": "281c2001", + "id": "e4c9ae80", "metadata": {}, "source": [ "# Install Required Packages" @@ -41,7 +41,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d0ee5b86", + "id": "73e2ae10", "metadata": {}, "outputs": [], "source": [ @@ -54,7 +54,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f5a3bf42", + "id": "d47b401b", "metadata": {}, "outputs": [], "source": [ @@ -67,7 +67,7 @@ }, { "cell_type": "markdown", - "id": "b4907ca7", + "id": "1a4901c4", "metadata": {}, "source": [ "## Import Libraries" @@ -76,7 +76,7 @@ { "cell_type": "code", "execution_count": null, - "id": "740d3dca", + "id": "5fd069a9", "metadata": {}, "outputs": [], "source": [ @@ -100,7 +100,7 @@ { "cell_type": "code", "execution_count": null, - "id": "61fa78fc", + "id": "86448a88", "metadata": {}, "outputs": [], "source": [ @@ -120,7 +120,7 @@ }, { "cell_type": "markdown", - "id": "21bdbd6e", + "id": "b21a452e", "metadata": {}, "source": [ "## Visualize Train-Val Dataset" @@ -129,7 +129,7 @@ { "cell_type": "code", "execution_count": null, - "id": "728f4672", + "id": "a23f6557", "metadata": {}, "outputs": [], "source": [ @@ -146,7 +146,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f0660a21", + "id": "e4d8432b", "metadata": {}, "outputs": [], "source": [ @@ -172,7 +172,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c11cd1ea", + "id": "983e9511", "metadata": {}, "outputs": [], "source": [ @@ -188,7 +188,7 @@ }, { "cell_type": "markdown", - "id": "6704bb3a", + "id": "385112bc", "metadata": {}, "source": [ "## Visualize Test Dataset" @@ -197,7 +197,7 @@ { "cell_type": "code", "execution_count": null, - "id": "49b04315", + "id": "b201d63f", "metadata": {}, "outputs": [], "source": [ @@ -212,7 +212,7 @@ { "cell_type": "code", "execution_count": null, - "id": "714a6358", + "id": "f12b2662", "metadata": {}, "outputs": [], "source": [ @@ -237,7 +237,7 @@ { "cell_type": "code", "execution_count": null, - "id": "bbf6c666", + "id": "79838e77", "metadata": {}, "outputs": [], "source": [ @@ -254,7 +254,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4e179ef9", + "id": "2b9e5b69", "metadata": {}, "outputs": [], "source": [ diff --git a/colabs/pyg/point-cloud-segmentation/01_dgcnn_train.ipynb b/colabs/pyg/point-cloud-segmentation/01_dgcnn_train.ipynb index be383d14..473f8eea 100644 --- a/colabs/pyg/point-cloud-segmentation/01_dgcnn_train.ipynb +++ b/colabs/pyg/point-cloud-segmentation/01_dgcnn_train.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "dc42e2f0", + "id": "172865ee", "metadata": {}, "source": [ "\"Open\n", @@ -12,7 +12,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "ed3fb916", + "id": "75b3b204", "metadata": {}, "source": [ "# 🔥🔥 Train DGCNN Model using PyTorch Geometric and Weights & Biases 🪄🐝\n", @@ -30,7 +30,7 @@ }, { "cell_type": "markdown", - "id": "abb4ac7d", + "id": "90904194", "metadata": {}, "source": [ "# Install Required Packages" @@ -39,7 +39,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b3b74df1", + "id": "0fbee2a4", "metadata": {}, "outputs": [], "source": [ @@ -52,7 +52,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7b0e8997", + "id": "7e98bc08", "metadata": {}, "outputs": [], "source": [ @@ -65,7 +65,7 @@ }, { "cell_type": "markdown", - "id": "6a6c41d3", + "id": "937b7a90", "metadata": {}, "source": [ "## Import Libraries" @@ -74,7 +74,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8256d0a4", + "id": "ddbd6831", "metadata": {}, "outputs": [], "source": [ @@ -98,7 +98,7 @@ }, { "cell_type": "markdown", - "id": "bd995109", + "id": "c9b5bbe7", "metadata": {}, "source": [ "# Initialize Weights & Biases\n", @@ -109,7 +109,7 @@ { "cell_type": "code", "execution_count": null, - "id": "598d0d2f", + "id": "0f940177", "metadata": {}, "outputs": [], "source": [ @@ -148,7 +148,7 @@ }, { "cell_type": "markdown", - "id": "21b2e1dc", + "id": "b24e29f8", "metadata": {}, "source": [ "# Load ShapeNet Dataset using PyTorch Geometric\n", @@ -159,7 +159,7 @@ { "cell_type": "code", "execution_count": null, - "id": "901b65e4", + "id": "847e8f56", "metadata": {}, "outputs": [], "source": [ @@ -175,7 +175,7 @@ { "cell_type": "code", "execution_count": null, - "id": "593299c7", + "id": "70388abb", "metadata": {}, "outputs": [], "source": [ @@ -189,7 +189,7 @@ }, { "cell_type": "markdown", - "id": "ff838a22", + "id": "7c9998ca", "metadata": {}, "source": [ "Now, we need to offset the segmentation labels" @@ -198,7 +198,7 @@ { "cell_type": "code", "execution_count": null, - "id": "bc1bfc6c", + "id": "47f0e8f2", "metadata": {}, "outputs": [], "source": [ @@ -218,7 +218,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d9589c7a", + "id": "507ef52b", "metadata": {}, "outputs": [], "source": [ @@ -230,7 +230,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b3aa8265", + "id": "0a04d1ed", "metadata": {}, "outputs": [], "source": [ @@ -250,7 +250,7 @@ }, { "cell_type": "markdown", - "id": "f5e0dc65", + "id": "b3764ef6", "metadata": {}, "source": [ "# Implementing the DGCNN Model using PyTorch Geometric" @@ -259,7 +259,7 @@ { "cell_type": "code", "execution_count": null, - "id": "fd50b563", + "id": "95c329fc", "metadata": {}, "outputs": [], "source": [ @@ -291,7 +291,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f69f1f9a", + "id": "fc1beb09", "metadata": {}, "outputs": [], "source": [ @@ -310,7 +310,7 @@ }, { "cell_type": "markdown", - "id": "b9ec4b0b", + "id": "684a9044", "metadata": {}, "source": [ "# Training DGCNN and Logging Metrics on Weights & Biases" @@ -319,7 +319,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6e8d5486", + "id": "e775d74b", "metadata": {}, "outputs": [], "source": [ @@ -382,7 +382,7 @@ { "cell_type": "code", "execution_count": null, - "id": "28a8c910", + "id": "bbc46ccd", "metadata": {}, "outputs": [], "source": [ @@ -442,7 +442,7 @@ { "cell_type": "code", "execution_count": null, - "id": "04abc40e", + "id": "7d28329c", "metadata": {}, "outputs": [], "source": [ @@ -508,7 +508,7 @@ { "cell_type": "code", "execution_count": null, - "id": "23998504", + "id": "fbddd81a", "metadata": {}, "outputs": [], "source": [ @@ -534,7 +534,7 @@ { "cell_type": "code", "execution_count": null, - "id": "43338109", + "id": "aebb71ed", "metadata": {}, "outputs": [], "source": [ @@ -559,7 +559,7 @@ { "cell_type": "code", "execution_count": null, - "id": "30145cd7", + "id": "079d94f7", "metadata": {}, "outputs": [], "source": [ @@ -568,7 +568,7 @@ }, { "cell_type": "markdown", - "id": "87f7fe22", + "id": "00ba1b79", "metadata": {}, "source": [ "Next, you can check out the following notebook to learn how to evaluate the model on the ShapeNetCore dataset using Weights & Biases, you can check out the following notebook:\n", diff --git a/colabs/pyg/point-cloud-segmentation/02_dgcnn_evaluate.ipynb b/colabs/pyg/point-cloud-segmentation/02_dgcnn_evaluate.ipynb index 35b6f0c0..d591cc44 100644 --- a/colabs/pyg/point-cloud-segmentation/02_dgcnn_evaluate.ipynb +++ b/colabs/pyg/point-cloud-segmentation/02_dgcnn_evaluate.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "025dae63", + "id": "670bb5e6", "metadata": {}, "source": [ "\"Open\n", @@ -12,7 +12,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "1b4e7314", + "id": "f99cc608", "metadata": {}, "source": [ "# 🔥🔥 Evaluate DGCNN Model Weights & Biases 🪄🐝\n", @@ -28,7 +28,7 @@ }, { "cell_type": "markdown", - "id": "89db1834", + "id": "f26e6a2b", "metadata": {}, "source": [ "# Install Required Packages" @@ -37,7 +37,7 @@ { "cell_type": "code", "execution_count": null, - "id": "a77aebd4", + "id": "16a12686", "metadata": {}, "outputs": [], "source": [ @@ -50,7 +50,7 @@ { "cell_type": "code", "execution_count": null, - "id": "be70eea7", + "id": "bd471ac2", "metadata": {}, "outputs": [], "source": [ @@ -63,7 +63,7 @@ }, { "cell_type": "markdown", - "id": "7f81a149", + "id": "52865cb0", "metadata": {}, "source": [ "## Import Libraries" @@ -72,7 +72,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4b0853b8", + "id": "464e45dc", "metadata": {}, "outputs": [], "source": [ @@ -96,7 +96,7 @@ }, { "cell_type": "markdown", - "id": "ff18b9d1", + "id": "ec10ba94", "metadata": {}, "source": [ "# Initialize Weights & Biases\n", @@ -107,7 +107,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f6e08809", + "id": "88df5824", "metadata": {}, "outputs": [], "source": [ @@ -146,7 +146,7 @@ }, { "cell_type": "markdown", - "id": "d5c4fb1f", + "id": "7fb79f85", "metadata": {}, "source": [ "# Load ShapeNet Dataset using PyTorch Geometric\n", @@ -157,7 +157,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9aa02f55", + "id": "ed125325", "metadata": {}, "outputs": [], "source": [ @@ -173,7 +173,7 @@ { "cell_type": "code", "execution_count": null, - "id": "46d88e72", + "id": "ca5b0b24", "metadata": {}, "outputs": [], "source": [ @@ -192,7 +192,7 @@ { "cell_type": "code", "execution_count": null, - "id": "65a3b8f1", + "id": "827a5a3d", "metadata": {}, "outputs": [], "source": [ @@ -214,7 +214,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ca65ff1d", + "id": "0845c8e6", "metadata": {}, "outputs": [], "source": [ @@ -227,7 +227,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10bb8aeb", + "id": "5880ea56", "metadata": {}, "outputs": [], "source": [ @@ -243,7 +243,7 @@ }, { "cell_type": "markdown", - "id": "29385a29", + "id": "1923436a", "metadata": {}, "source": [ "# Load Checkpoint" @@ -252,7 +252,7 @@ { "cell_type": "code", "execution_count": null, - "id": "aad21a2d", + "id": "f2d53569", "metadata": {}, "outputs": [], "source": [ @@ -283,7 +283,7 @@ }, { "cell_type": "markdown", - "id": "d86f033f", + "id": "6c2b36ea", "metadata": {}, "source": [ "Since we saved the checkpoints as artifacts on our Weights & Biases workspace, we can now fetch and load them." @@ -292,7 +292,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c733c60c", + "id": "9dba90fc", "metadata": {}, "outputs": [], "source": [ @@ -313,7 +313,7 @@ }, { "cell_type": "markdown", - "id": "dbfc3726", + "id": "53abd015", "metadata": {}, "source": [ "# Evaluation" @@ -322,7 +322,7 @@ { "cell_type": "code", "execution_count": null, - "id": "25bf63ea", + "id": "563deea4", "metadata": {}, "outputs": [], "source": [ @@ -393,7 +393,7 @@ }, { "cell_type": "markdown", - "id": "d19e4e20", + "id": "59fb65f4", "metadata": {}, "source": [ "We evaluate the results and store them in a Weights & Biases Table." @@ -402,7 +402,7 @@ { "cell_type": "code", "execution_count": null, - "id": "46f08bc2", + "id": "fa945139", "metadata": {}, "outputs": [], "source": [ @@ -414,7 +414,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21b13d76", + "id": "e8995e37", "metadata": {}, "outputs": [], "source": [ @@ -424,7 +424,7 @@ { "cell_type": "code", "execution_count": null, - "id": "45a26573", + "id": "d4fddcf2", "metadata": {}, "outputs": [], "source": [ diff --git a/colabs/pyg/pointnet-classification/00_eda.ipynb b/colabs/pyg/pointnet-classification/00_eda.ipynb index 03e4fe77..d60acb6d 100644 --- a/colabs/pyg/pointnet-classification/00_eda.ipynb +++ b/colabs/pyg/pointnet-classification/00_eda.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "e0dda925", + "id": "5d2c4f72", "metadata": {}, "source": [ "\"Open\n", @@ -12,7 +12,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "411e99e0", + "id": "af680e31", "metadata": {}, "source": [ "# 🔥🔥 Explore ModelNet Datasets using PyTorch Geometric and Weights & Biases 🪄🐝\n", @@ -23,7 +23,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "ea78427f", + "id": "7a9396ad", "metadata": {}, "source": [ "## Install Required Libraries" @@ -32,7 +32,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4e0fd350", + "id": "b6c91a7f", "metadata": {}, "outputs": [], "source": [ @@ -45,7 +45,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "46b62412", + "id": "cde43ebd", "metadata": {}, "source": [ "We now install PyTorch Geometric according to our PyTorch Version. We also install Weights & Biases." @@ -54,7 +54,7 @@ { "cell_type": "code", "execution_count": null, - "id": "64b6b879", + "id": "9fff54b7", "metadata": {}, "outputs": [], "source": [ @@ -68,7 +68,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "448482ba", + "id": "aba4b30e", "metadata": {}, "source": [ "### Import Libraries" @@ -77,7 +77,7 @@ { "cell_type": "code", "execution_count": null, - "id": "dae389f0", + "id": "6acaec36", "metadata": {}, "outputs": [], "source": [ @@ -106,7 +106,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "09a1bfdd", + "id": "46895a21", "metadata": {}, "source": [ "## Initialize Weights & Biases\n", @@ -117,7 +117,7 @@ { "cell_type": "code", "execution_count": null, - "id": "faf958d9", + "id": "b2a7db44", "metadata": {}, "outputs": [], "source": [ @@ -147,7 +147,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "7d3f67fd", + "id": "efd2ad10", "metadata": {}, "source": [ "## Load ModelNet Dataset using PyTorch Geometric" @@ -156,7 +156,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2812970c", + "id": "3dfb1e50", "metadata": {}, "outputs": [], "source": [ @@ -181,7 +181,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "b372188e", + "id": "b038fb22", "metadata": {}, "source": [ "## Log Data to [`wandb.Table`](https://docs.wandb.ai/ref/python/data-types/table)\n", @@ -192,7 +192,7 @@ { "cell_type": "code", "execution_count": null, - "id": "df976f6b", + "id": "66342479", "metadata": {}, "outputs": [], "source": [ @@ -221,7 +221,7 @@ { "cell_type": "code", "execution_count": null, - "id": "880285e4", + "id": "21978686", "metadata": {}, "outputs": [], "source": [ @@ -251,7 +251,7 @@ { "cell_type": "code", "execution_count": null, - "id": "23e19ecb", + "id": "46c5fea5", "metadata": {}, "outputs": [], "source": [ @@ -261,7 +261,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "45f51adf", + "id": "e9efd334", "metadata": {}, "source": [ "Next, you can check out the following notebook to learn how to compare different sampling strategies in PyTorch Geometric using Weights & Biases\n", diff --git a/colabs/pyg/pointnet-classification/01_compare_sampling.ipynb b/colabs/pyg/pointnet-classification/01_compare_sampling.ipynb index 41c86d30..8930e642 100644 --- a/colabs/pyg/pointnet-classification/01_compare_sampling.ipynb +++ b/colabs/pyg/pointnet-classification/01_compare_sampling.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "540308b6", + "id": "fb05c584", "metadata": {}, "source": [ "\"Open\n", @@ -12,7 +12,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "25d4dadf", + "id": "5df60be8", "metadata": {}, "source": [ "# 🔥🔥 Explore Graph Sampling Techniques using PyTorch Geometric and Weights & Biases 🪄🐝\n", @@ -27,7 +27,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "5cfeb685", + "id": "1a0b2ded", "metadata": {}, "source": [ "## Install Required Libraries" @@ -36,7 +36,7 @@ { "cell_type": "code", "execution_count": null, - "id": "afcd8d80", + "id": "91f14e02", "metadata": {}, "outputs": [], "source": [ @@ -49,7 +49,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "7636011f", + "id": "dc2c1fd0", "metadata": {}, "source": [ "We now install PyTorch Geometric according to our PyTorch Version. We also install Weights & Biases." @@ -58,7 +58,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "0f6e59e4", + "id": "67d64c57", "metadata": {}, "source": [ "!pip install -q torch-scatter -f https://data.pyg.org/whl/torch-${TORCH}.html\n", @@ -71,7 +71,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "3bcb92b9", + "id": "94b145bd", "metadata": {}, "source": [ "### Import Libraries" @@ -80,7 +80,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5dc0e1de", + "id": "f2d40f2f", "metadata": {}, "outputs": [], "source": [ @@ -107,7 +107,7 @@ { "cell_type": "code", "execution_count": null, - "id": "80da5344", + "id": "c40b87cd", "metadata": {}, "outputs": [], "source": [ @@ -135,7 +135,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "3ebaa86f", + "id": "97c44de0", "metadata": {}, "source": [ "We take a single point cloud from the dataset and compare the KNN-sampled subgraph and radius-sampled subgraph by visualizing the subgraphs as [`wandb.Html`](https://docs.wandb.ai/ref/python/data-types/html) on a [Weights & Biases Table](https://docs.wandb.ai/guides/data-vis)." @@ -144,7 +144,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2a51314d", + "id": "a59c651e", "metadata": {}, "outputs": [], "source": [ @@ -184,7 +184,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "6d2ade6e", + "id": "0059f0b5", "metadata": {}, "source": [ "Next, you can check out the following notebook to learn how to train the PointNet++ architecture using PyTorch Geometric and Weights & Biases\n", diff --git a/colabs/pyg/pointnet-classification/02_pointnet_plus_plus.ipynb b/colabs/pyg/pointnet-classification/02_pointnet_plus_plus.ipynb index f3affb99..20542073 100644 --- a/colabs/pyg/pointnet-classification/02_pointnet_plus_plus.ipynb +++ b/colabs/pyg/pointnet-classification/02_pointnet_plus_plus.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "b93b0d32", + "id": "c0f2b7bb", "metadata": {}, "source": [ "\"Open\n", @@ -12,7 +12,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "3391c082", + "id": "0a089e90", "metadata": {}, "source": [ "# 🔥🔥 Train PointNet++ Model using PyTorch Geometric and Weights & Biases 🪄🐝\n", @@ -29,7 +29,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "ee4f787d", + "id": "74844944", "metadata": {}, "source": [ "## Install Required Libraries" @@ -38,7 +38,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ecf9a745", + "id": "316dcba0", "metadata": {}, "outputs": [], "source": [ @@ -52,7 +52,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "48bb8716", + "id": "4380ba4f", "metadata": {}, "source": [ "We now install PyTorch Geometric according to our PyTorch Version. We also install Weights & Biases." @@ -61,7 +61,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f039fce2", + "id": "1fda5066", "metadata": {}, "outputs": [], "source": [ @@ -75,7 +75,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "e1eadafb", + "id": "d6616543", "metadata": {}, "source": [ "### Import Libraries" @@ -84,7 +84,7 @@ { "cell_type": "code", "execution_count": null, - "id": "a89ab10a", + "id": "cbc13bbc", "metadata": {}, "outputs": [], "source": [ @@ -106,7 +106,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "014483ac", + "id": "46e5b154", "metadata": {}, "source": [ "## Initialize Weights & Biases\n", @@ -117,7 +117,7 @@ { "cell_type": "code", "execution_count": null, - "id": "cd601943", + "id": "783f6a3d", "metadata": {}, "outputs": [], "source": [ @@ -163,7 +163,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "1e3b7a37", + "id": "524732b9", "metadata": {}, "source": [ "## Load ModelNet Dataset using PyTorch Geometric\n", @@ -174,7 +174,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6aaa6cb3", + "id": "12a74c5f", "metadata": {}, "outputs": [], "source": [ @@ -225,7 +225,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "2896d8b7", + "id": "4960beac", "metadata": {}, "source": [ "## Implementing the PointNet++ Model using PyTorch Geometric" @@ -234,7 +234,7 @@ { "cell_type": "code", "execution_count": null, - "id": "93ad63ad", + "id": "ea4fe7e2", "metadata": {}, "outputs": [], "source": [ @@ -259,7 +259,7 @@ { "cell_type": "code", "execution_count": null, - "id": "63c5d023", + "id": "e266ddad", "metadata": {}, "outputs": [], "source": [ @@ -279,7 +279,7 @@ { "cell_type": "code", "execution_count": null, - "id": "95a8a971", + "id": "e838fc04", "metadata": {}, "outputs": [], "source": [ @@ -319,7 +319,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "5c4ec58d", + "id": "973729a2", "metadata": {}, "source": [ "## Training PointNet++ and Logging Metrics on Weights & Biases" @@ -328,7 +328,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12aa9785", + "id": "c524e338", "metadata": {}, "outputs": [], "source": [ @@ -350,7 +350,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d7d4ab03", + "id": "b753e2c0", "metadata": {}, "outputs": [], "source": [ @@ -464,7 +464,7 @@ { "cell_type": "code", "execution_count": null, - "id": "a7dbb213", + "id": "42b37a86", "metadata": {}, "outputs": [], "source": [ @@ -489,7 +489,7 @@ { "cell_type": "code", "execution_count": null, - "id": "40ad9e4a", + "id": "88bbb06a", "metadata": {}, "outputs": [], "source": [ @@ -499,7 +499,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "31a8a8ac", + "id": "e236382d", "metadata": {}, "source": [ "Next, you can check out the following notebook to learn how to run a hyperparameter sweep on our PointNet++ trainig loop using Weights & Biases:\n", diff --git a/colabs/pyg/pointnet-classification/03_sweep.ipynb b/colabs/pyg/pointnet-classification/03_sweep.ipynb index 3a8b58cf..3a14aa62 100644 --- a/colabs/pyg/pointnet-classification/03_sweep.ipynb +++ b/colabs/pyg/pointnet-classification/03_sweep.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "15297b92", + "id": "a24744b3", "metadata": {}, "source": [ "\"Open\n", @@ -12,7 +12,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "f529ef25", + "id": "7b2b8865", "metadata": {}, "source": [ "# 🔥🔥 Run a Hyperparamter Sweep on PointNet++ 🪄🐝\n", @@ -27,7 +27,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "f036a8ae", + "id": "c5d6108b", "metadata": {}, "source": [ "## Install Required Libraries" @@ -36,7 +36,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3ae1ec82", + "id": "c8467ae5", "metadata": {}, "outputs": [], "source": [ @@ -50,7 +50,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "5b7d66c7", + "id": "7d43577a", "metadata": {}, "source": [ "We now install PyTorch Geometric according to our PyTorch Version. We also install Weights & Biases." @@ -59,7 +59,7 @@ { "cell_type": "code", "execution_count": null, - "id": "19b5b8bf", + "id": "f6d2ea50", "metadata": {}, "outputs": [], "source": [ @@ -73,7 +73,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "22b0c260", + "id": "3ca54e60", "metadata": {}, "source": [ "### Import Libraries" @@ -82,7 +82,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c3f71cab", + "id": "e268c311", "metadata": {}, "outputs": [], "source": [ @@ -104,7 +104,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "df67a2eb", + "id": "452b8915", "metadata": {}, "source": [ "## Function to Build Data Loaders" @@ -113,7 +113,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1a450c72", + "id": "b941afd2", "metadata": {}, "outputs": [], "source": [ @@ -155,7 +155,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "cd89a7b6", + "id": "1739fa2d", "metadata": {}, "source": [ "## Implementing the PointNet++ Model using PyTorch Geometric" @@ -164,7 +164,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4efd3a6c", + "id": "5c7954c1", "metadata": {}, "outputs": [], "source": [ @@ -226,7 +226,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "b990617a", + "id": "377b785a", "metadata": {}, "source": [ "## Define a Training Function Instrumented with WandB" @@ -235,7 +235,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d790cb58", + "id": "127f4dcb", "metadata": {}, "outputs": [], "source": [ @@ -359,7 +359,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "e6671e92", + "id": "6f903aee", "metadata": {}, "source": [ "## Start the Hyperparameter Sweep" @@ -368,7 +368,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d31702ae", + "id": "9c4c673b", "metadata": {}, "outputs": [], "source": [ diff --git a/colabs/pytorch-lightning/Fine_tuning_a_Transformer_with_Pytorch_Lightning.ipynb b/colabs/pytorch-lightning/Fine_tuning_a_Transformer_with_Pytorch_Lightning.ipynb index 08f30055..9a2a1f10 100644 --- a/colabs/pytorch-lightning/Fine_tuning_a_Transformer_with_Pytorch_Lightning.ipynb +++ b/colabs/pytorch-lightning/Fine_tuning_a_Transformer_with_Pytorch_Lightning.ipynb @@ -113,7 +113,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/pytorch-lightning/Image_Classification_using_PyTorch_Lightning.ipynb b/colabs/pytorch-lightning/Image_Classification_using_PyTorch_Lightning.ipynb index 53c8e7d1..0b98bdfe 100644 --- a/colabs/pytorch-lightning/Image_Classification_using_PyTorch_Lightning.ipynb +++ b/colabs/pytorch-lightning/Image_Classification_using_PyTorch_Lightning.ipynb @@ -96,7 +96,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/pytorch-lightning/Optimize_Pytorch_Lightning_models_with_Weights_&_Biases.ipynb b/colabs/pytorch-lightning/Optimize_Pytorch_Lightning_models_with_Weights_&_Biases.ipynb index 364a36df..a4746eaf 100644 --- a/colabs/pytorch-lightning/Optimize_Pytorch_Lightning_models_with_Weights_&_Biases.ipynb +++ b/colabs/pytorch-lightning/Optimize_Pytorch_Lightning_models_with_Weights_&_Biases.ipynb @@ -89,7 +89,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/pytorch-lightning/Profile_PyTorch_Code.ipynb b/colabs/pytorch-lightning/Profile_PyTorch_Code.ipynb index 82694b32..b7968866 100644 --- a/colabs/pytorch-lightning/Profile_PyTorch_Code.ipynb +++ b/colabs/pytorch-lightning/Profile_PyTorch_Code.ipynb @@ -122,7 +122,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/pytorch-lightning/Supercharge_your_Training_with_Pytorch_Lightning_and_Weights_and_Biases.ipynb b/colabs/pytorch-lightning/Supercharge_your_Training_with_Pytorch_Lightning_and_Weights_and_Biases.ipynb index e5a6e36a..b150820c 100644 --- a/colabs/pytorch-lightning/Supercharge_your_Training_with_Pytorch_Lightning_and_Weights_and_Biases.ipynb +++ b/colabs/pytorch-lightning/Supercharge_your_Training_with_Pytorch_Lightning_and_Weights_and_Biases.ipynb @@ -159,7 +159,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/pytorch-lightning/Transfer_Learning_Using_PyTorch_Lightning.ipynb b/colabs/pytorch-lightning/Transfer_Learning_Using_PyTorch_Lightning.ipynb index d7338d03..3a4a744b 100644 --- a/colabs/pytorch-lightning/Transfer_Learning_Using_PyTorch_Lightning.ipynb +++ b/colabs/pytorch-lightning/Transfer_Learning_Using_PyTorch_Lightning.ipynb @@ -89,7 +89,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/pytorch-lightning/Wandb_End_to_End_with_PyTorch_Lightning.ipynb b/colabs/pytorch-lightning/Wandb_End_to_End_with_PyTorch_Lightning.ipynb index 8e06d5f8..7a3b7edd 100644 --- a/colabs/pytorch-lightning/Wandb_End_to_End_with_PyTorch_Lightning.ipynb +++ b/colabs/pytorch-lightning/Wandb_End_to_End_with_PyTorch_Lightning.ipynb @@ -71,7 +71,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/pytorch/How_does_adding_dropout_affect_model_performance.ipynb b/colabs/pytorch/How_does_adding_dropout_affect_model_performance.ipynb index b3e47ed0..be25a18c 100644 --- a/colabs/pytorch/How_does_adding_dropout_affect_model_performance.ipynb +++ b/colabs/pytorch/How_does_adding_dropout_affect_model_performance.ipynb @@ -57,7 +57,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/pytorch/Organizing_Hyperparameter_Sweeps_in_PyTorch_with_W&B.ipynb b/colabs/pytorch/Organizing_Hyperparameter_Sweeps_in_PyTorch_with_W&B.ipynb index 249fb14a..1ef18a83 100644 --- a/colabs/pytorch/Organizing_Hyperparameter_Sweeps_in_PyTorch_with_W&B.ipynb +++ b/colabs/pytorch/Organizing_Hyperparameter_Sweeps_in_PyTorch_with_W&B.ipynb @@ -107,7 +107,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/pytorch/Simple_PyTorch_Integration.ipynb b/colabs/pytorch/Simple_PyTorch_Integration.ipynb index 65da1292..9369972e 100644 --- a/colabs/pytorch/Simple_PyTorch_Integration.ipynb +++ b/colabs/pytorch/Simple_PyTorch_Integration.ipynb @@ -195,7 +195,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/raytune/RayTune_with_wandb.ipynb b/colabs/raytune/RayTune_with_wandb.ipynb index b5489dd2..42b6e272 100644 --- a/colabs/raytune/RayTune_with_wandb.ipynb +++ b/colabs/raytune/RayTune_with_wandb.ipynb @@ -135,7 +135,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/raytune/tune-wandb.ipynb b/colabs/raytune/tune-wandb.ipynb index 6c176a77..f35865f7 100644 --- a/colabs/raytune/tune-wandb.ipynb +++ b/colabs/raytune/tune-wandb.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "76091766", + "id": "91d48998", "metadata": {}, "source": [ "\"Open\n" @@ -10,7 +10,7 @@ }, { "cell_type": "markdown", - "id": "c280549a", + "id": "b9c22142", "metadata": {}, "source": [ "# Using Weights & Biases with Tune\n", @@ -41,7 +41,7 @@ { "cell_type": "code", "execution_count": null, - "id": "dcdda930", + "id": "2969a4cb", "metadata": {}, "outputs": [], "source": [ @@ -51,7 +51,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15e30d7a", + "id": "55026926", "metadata": {}, "outputs": [], "source": [ @@ -71,16 +71,16 @@ { "cell_type": "code", "execution_count": null, - "id": "5c6020e1", + "id": "e40e6ca6", "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { "cell_type": "markdown", - "id": "51d2fa38", + "id": "1d6c8d04", "metadata": {}, "source": [ "Next, let's define an easy `objective` function (a Tune `Trainable`) that reports a random loss to Tune.\n", @@ -91,7 +91,7 @@ { "cell_type": "code", "execution_count": null, - "id": "19c570d4", + "id": "e6e75c5c", "metadata": {}, "outputs": [], "source": [ @@ -103,7 +103,7 @@ }, { "cell_type": "markdown", - "id": "845c1f78", + "id": "ff78ca64", "metadata": {}, "source": [ "Given that you provide an `api_key_file` pointing to your Weights & Biases API key, you cna define a\n", @@ -113,7 +113,7 @@ { "cell_type": "code", "execution_count": null, - "id": "47f08927", + "id": "494e1c84", "metadata": {}, "outputs": [], "source": [ @@ -142,7 +142,7 @@ }, { "cell_type": "markdown", - "id": "f1956489", + "id": "de9b195d", "metadata": {}, "source": [ "To use the `wandb_mixin` decorator, you can simply decorate the objective function from earlier.\n", @@ -153,7 +153,7 @@ { "cell_type": "code", "execution_count": null, - "id": "828aad02", + "id": "e0275757", "metadata": {}, "outputs": [], "source": [ @@ -167,7 +167,7 @@ }, { "cell_type": "markdown", - "id": "ed339068", + "id": "59c23cff", "metadata": {}, "source": [ "With the `decorated_objective` defined, running a Tune experiment is as simple as providing this objective and\n", @@ -177,7 +177,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ee3e03f9", + "id": "e4d408df", "metadata": {}, "outputs": [], "source": [ @@ -202,7 +202,7 @@ }, { "cell_type": "markdown", - "id": "d86cc3aa", + "id": "6565559f", "metadata": {}, "source": [ "Finally, you can also define a class-based Tune `Trainable` by using the `WandbTrainableMixin` to define your objective:" @@ -211,7 +211,7 @@ { "cell_type": "code", "execution_count": null, - "id": "a33fad87", + "id": "1b140b28", "metadata": {}, "outputs": [], "source": [ @@ -225,7 +225,7 @@ }, { "cell_type": "markdown", - "id": "aa889a96", + "id": "cc42d1a4", "metadata": {}, "source": [ "Running Tune with this `WandbTrainable` works exactly the same as with the function API.\n", @@ -236,7 +236,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5d568289", + "id": "e2782ef4", "metadata": {}, "outputs": [], "source": [ @@ -261,7 +261,7 @@ }, { "cell_type": "markdown", - "id": "03a7d821", + "id": "58d4914b", "metadata": {}, "source": [ "Since you may not have an API key for Wandb, we can _mock_ the Wandb logger and test all three of our training\n", @@ -272,7 +272,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1ab08008", + "id": "fecd85ed", "metadata": {}, "outputs": [], "source": [ @@ -303,7 +303,7 @@ }, { "cell_type": "markdown", - "id": "9960a29b", + "id": "fbd9728a", "metadata": {}, "source": [ "This completes our Tune and Wandb walk-through.\n", diff --git a/colabs/rdkit/wb_rdkit.ipynb b/colabs/rdkit/wb_rdkit.ipynb index 09c6c0b6..678a516c 100644 --- a/colabs/rdkit/wb_rdkit.ipynb +++ b/colabs/rdkit/wb_rdkit.ipynb @@ -50,7 +50,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/scikit/Simple_Scikit_Integration.ipynb b/colabs/scikit/Simple_Scikit_Integration.ipynb index 62fc5cde..f68bb555 100644 --- a/colabs/scikit/Simple_Scikit_Integration.ipynb +++ b/colabs/scikit/Simple_Scikit_Integration.ipynb @@ -100,7 +100,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/scikit/w-b-k-means-clustering.ipynb b/colabs/scikit/w-b-k-means-clustering.ipynb index fb4f6474..8dc86436 100644 --- a/colabs/scikit/w-b-k-means-clustering.ipynb +++ b/colabs/scikit/w-b-k-means-clustering.ipynb @@ -114,7 +114,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/simpletransformers/SimpleTransformersQA.ipynb b/colabs/simpletransformers/SimpleTransformersQA.ipynb index b6926525..9ad3a6a8 100644 --- a/colabs/simpletransformers/SimpleTransformersQA.ipynb +++ b/colabs/simpletransformers/SimpleTransformersQA.ipynb @@ -124,7 +124,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/spacy/SpaCy_v3_and_W&B.ipynb b/colabs/spacy/SpaCy_v3_and_W&B.ipynb index 02f1d522..5435b742 100644 --- a/colabs/spacy/SpaCy_v3_and_W&B.ipynb +++ b/colabs/spacy/SpaCy_v3_and_W&B.ipynb @@ -112,7 +112,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/spacy/spaCy_v3_and_W&B_Sweeps.ipynb b/colabs/spacy/spaCy_v3_and_W&B_Sweeps.ipynb index 8ae69d20..e27a6e08 100644 --- a/colabs/spacy/spaCy_v3_and_W&B_Sweeps.ipynb +++ b/colabs/spacy/spaCy_v3_and_W&B_Sweeps.ipynb @@ -93,7 +93,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/stylegan_nada/StyleGAN-NADA.ipynb b/colabs/stylegan_nada/StyleGAN-NADA.ipynb index 6bfe879d..8db2c1a9 100644 --- a/colabs/stylegan_nada/StyleGAN-NADA.ipynb +++ b/colabs/stylegan_nada/StyleGAN-NADA.ipynb @@ -90,7 +90,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/super-gradients/yolo_nas.ipynb b/colabs/super-gradients/yolo_nas.ipynb index c360af91..2c953fdf 100644 --- a/colabs/super-gradients/yolo_nas.ipynb +++ b/colabs/super-gradients/yolo_nas.ipynb @@ -359,11 +359,7 @@ ] } ], - "metadata": { - "language_info": { - "name": "python" - } - }, + "metadata": {}, "nbformat": 4, "nbformat_minor": 2 } diff --git a/colabs/tables/AlphaFold_with_W&B_Align,_Fold,_Log.ipynb b/colabs/tables/AlphaFold_with_W&B_Align,_Fold,_Log.ipynb index ad454a6a..ceb8e2c6 100644 --- a/colabs/tables/AlphaFold_with_W&B_Align,_Fold,_Log.ipynb +++ b/colabs/tables/AlphaFold_with_W&B_Align,_Fold,_Log.ipynb @@ -98,7 +98,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/tables/Log_Tables_Incrementally.ipynb b/colabs/tables/Log_Tables_Incrementally.ipynb index 3be00f69..ab826995 100644 --- a/colabs/tables/Log_Tables_Incrementally.ipynb +++ b/colabs/tables/Log_Tables_Incrementally.ipynb @@ -47,7 +47,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/tables/W&B_Tables_Quickstart.ipynb b/colabs/tables/W&B_Tables_Quickstart.ipynb index 95a578ab..185c4766 100644 --- a/colabs/tables/W&B_Tables_Quickstart.ipynb +++ b/colabs/tables/W&B_Tables_Quickstart.ipynb @@ -55,7 +55,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/tensorboard/Accelerator_W&B_Tensorboard.ipynb b/colabs/tensorboard/Accelerator_W&B_Tensorboard.ipynb index aa3e3869..f6c28aac 100644 --- a/colabs/tensorboard/Accelerator_W&B_Tensorboard.ipynb +++ b/colabs/tensorboard/Accelerator_W&B_Tensorboard.ipynb @@ -97,7 +97,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/tensorflow/Hyperparameter_Optimization_in_TensorFlow_using_W&B_Sweeps.ipynb b/colabs/tensorflow/Hyperparameter_Optimization_in_TensorFlow_using_W&B_Sweeps.ipynb index eb7e4e8d..d45de2a0 100644 --- a/colabs/tensorflow/Hyperparameter_Optimization_in_TensorFlow_using_W&B_Sweeps.ipynb +++ b/colabs/tensorflow/Hyperparameter_Optimization_in_TensorFlow_using_W&B_Sweeps.ipynb @@ -150,7 +150,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/tensorflow/Simple_TensorFlow_Integration.ipynb b/colabs/tensorflow/Simple_TensorFlow_Integration.ipynb index 5265cdeb..a42a5a0c 100644 --- a/colabs/tensorflow/Simple_TensorFlow_Integration.ipynb +++ b/colabs/tensorflow/Simple_TensorFlow_Integration.ipynb @@ -145,7 +145,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/ultralytics/00_inference.ipynb b/colabs/ultralytics/00_inference.ipynb index 630daac8..4df64d38 100644 --- a/colabs/ultralytics/00_inference.ipynb +++ b/colabs/ultralytics/00_inference.ipynb @@ -150,11 +150,7 @@ ] } ], - "metadata": { - "language_info": { - "name": "python" - } - }, + "metadata": {}, "nbformat": 4, "nbformat_minor": 2 } diff --git a/colabs/ultralytics/01_train_val.ipynb b/colabs/ultralytics/01_train_val.ipynb index ab5a1f26..33ff1ff9 100644 --- a/colabs/ultralytics/01_train_val.ipynb +++ b/colabs/ultralytics/01_train_val.ipynb @@ -139,11 +139,7 @@ ] } ], - "metadata": { - "language_info": { - "name": "python" - } - }, + "metadata": {}, "nbformat": 4, "nbformat_minor": 2 } diff --git a/colabs/wandb-artifacts/Artifacts_Quickstart_with_W&B.ipynb b/colabs/wandb-artifacts/Artifacts_Quickstart_with_W&B.ipynb index fb1a3ba8..abfd131b 100644 --- a/colabs/wandb-artifacts/Artifacts_Quickstart_with_W&B.ipynb +++ b/colabs/wandb-artifacts/Artifacts_Quickstart_with_W&B.ipynb @@ -135,7 +135,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/wandb-artifacts/Basic_Artifacts_with_W&B.ipynb b/colabs/wandb-artifacts/Basic_Artifacts_with_W&B.ipynb index 43716302..2a566b88 100644 --- a/colabs/wandb-artifacts/Basic_Artifacts_with_W&B.ipynb +++ b/colabs/wandb-artifacts/Basic_Artifacts_with_W&B.ipynb @@ -91,7 +91,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/wandb-artifacts/W&B_artifacts_for_auditing_purposes.ipynb b/colabs/wandb-artifacts/W&B_artifacts_for_auditing_purposes.ipynb index 34a6c6d7..c7ee4332 100644 --- a/colabs/wandb-artifacts/W&B_artifacts_for_auditing_purposes.ipynb +++ b/colabs/wandb-artifacts/W&B_artifacts_for_auditing_purposes.ipynb @@ -93,7 +93,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/wandb-artifacts/train_val_test_split_with_tabular_data.ipynb b/colabs/wandb-artifacts/train_val_test_split_with_tabular_data.ipynb index e0f553f1..a23c6672 100644 --- a/colabs/wandb-artifacts/train_val_test_split_with_tabular_data.ipynb +++ b/colabs/wandb-artifacts/train_val_test_split_with_tabular_data.ipynb @@ -95,7 +95,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/wandb-log/Configs_in_W&B.ipynb b/colabs/wandb-log/Configs_in_W&B.ipynb index 4b289da0..67469509 100644 --- a/colabs/wandb-log/Configs_in_W&B.ipynb +++ b/colabs/wandb-log/Configs_in_W&B.ipynb @@ -56,7 +56,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/wandb-log/Customize_metric_logging_with_define_metric.ipynb b/colabs/wandb-log/Customize_metric_logging_with_define_metric.ipynb index c9d7efe1..0769958e 100644 --- a/colabs/wandb-log/Customize_metric_logging_with_define_metric.ipynb +++ b/colabs/wandb-log/Customize_metric_logging_with_define_metric.ipynb @@ -50,7 +50,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/wandb-log/Generate_gifs_from_logged_images_on_wandb.ipynb b/colabs/wandb-log/Generate_gifs_from_logged_images_on_wandb.ipynb index 203d70dd..5d69b6e6 100644 --- a/colabs/wandb-log/Generate_gifs_from_logged_images_on_wandb.ipynb +++ b/colabs/wandb-log/Generate_gifs_from_logged_images_on_wandb.ipynb @@ -55,7 +55,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/wandb-log/Log_(Almost)_Anything_with_W&B_Media.ipynb b/colabs/wandb-log/Log_(Almost)_Anything_with_W&B_Media.ipynb index f77347a4..b0df9a05 100644 --- a/colabs/wandb-log/Log_(Almost)_Anything_with_W&B_Media.ipynb +++ b/colabs/wandb-log/Log_(Almost)_Anything_with_W&B_Media.ipynb @@ -80,7 +80,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/wandb-log/Log_a_Confusion_Matrix_with_W&B.ipynb b/colabs/wandb-log/Log_a_Confusion_Matrix_with_W&B.ipynb index 49742e2e..d628ce43 100644 --- a/colabs/wandb-log/Log_a_Confusion_Matrix_with_W&B.ipynb +++ b/colabs/wandb-log/Log_a_Confusion_Matrix_with_W&B.ipynb @@ -95,7 +95,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/wandb-log/Logging_Strategies_for_High_Frequency_Data.ipynb b/colabs/wandb-log/Logging_Strategies_for_High_Frequency_Data.ipynb index 6c87bec8..15aa99cc 100644 --- a/colabs/wandb-log/Logging_Strategies_for_High_Frequency_Data.ipynb +++ b/colabs/wandb-log/Logging_Strategies_for_High_Frequency_Data.ipynb @@ -95,7 +95,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/wandb-log/Plot_Precision_Recall_Curves_with_W&B.ipynb b/colabs/wandb-log/Plot_Precision_Recall_Curves_with_W&B.ipynb index c40dc8bc..0e8d8498 100644 --- a/colabs/wandb-log/Plot_Precision_Recall_Curves_with_W&B.ipynb +++ b/colabs/wandb-log/Plot_Precision_Recall_Curves_with_W&B.ipynb @@ -92,7 +92,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/wandb-log/Plot_ROC_Curves_with_W&B.ipynb b/colabs/wandb-log/Plot_ROC_Curves_with_W&B.ipynb index 5f7771b0..6bda70c0 100644 --- a/colabs/wandb-log/Plot_ROC_Curves_with_W&B.ipynb +++ b/colabs/wandb-log/Plot_ROC_Curves_with_W&B.ipynb @@ -92,7 +92,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/wandb-log/Run_names_visualized_using_min_dalle.ipynb b/colabs/wandb-log/Run_names_visualized_using_min_dalle.ipynb index cde79b63..072ea979 100644 --- a/colabs/wandb-log/Run_names_visualized_using_min_dalle.ipynb +++ b/colabs/wandb-log/Run_names_visualized_using_min_dalle.ipynb @@ -48,7 +48,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/wandb-log/Set_Alerts_with_W_&_B.ipynb b/colabs/wandb-log/Set_Alerts_with_W_&_B.ipynb index bbfff44d..137fd17b 100644 --- a/colabs/wandb-log/Set_Alerts_with_W_&_B.ipynb +++ b/colabs/wandb-log/Set_Alerts_with_W_&_B.ipynb @@ -62,7 +62,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/wandb-model-registry/Model_Registry_E2E.ipynb b/colabs/wandb-model-registry/Model_Registry_E2E.ipynb index 207aa7a7..1124c5c3 100644 --- a/colabs/wandb-model-registry/Model_Registry_E2E.ipynb +++ b/colabs/wandb-model-registry/Model_Registry_E2E.ipynb @@ -84,7 +84,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/colabs/yolox/Train_and_Debug_YOLOX_Models_with_Weights_&_Biases.ipynb b/colabs/yolox/Train_and_Debug_YOLOX_Models_with_Weights_&_Biases.ipynb index c87972fb..4f9c3028 100644 --- a/colabs/yolox/Train_and_Debug_YOLOX_Models_with_Weights_&_Biases.ipynb +++ b/colabs/yolox/Train_and_Debug_YOLOX_Models_with_Weights_&_Biases.ipynb @@ -98,7 +98,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/examples/jax/jax-llm/create_tokenizer.ipynb b/examples/jax/jax-llm/create_tokenizer.ipynb index 0d5aeabc..090e18ef 100644 --- a/examples/jax/jax-llm/create_tokenizer.ipynb +++ b/examples/jax/jax-llm/create_tokenizer.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "bbe946e5", + "id": "20860c23", "metadata": {}, "outputs": [], "source": [ @@ -22,7 +22,7 @@ { "cell_type": "code", "execution_count": null, - "id": "98df9b13", + "id": "202d2317", "metadata": {}, "outputs": [], "source": [ @@ -32,7 +32,7 @@ { "cell_type": "code", "execution_count": null, - "id": "31a4368f", + "id": "8e472e8d", "metadata": {}, "outputs": [], "source": [ @@ -43,7 +43,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8e68a2b7", + "id": "8a4be9b2", "metadata": {}, "outputs": [], "source": [ @@ -53,7 +53,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0bb7b8b2", + "id": "9c9cfb89", "metadata": {}, "outputs": [], "source": [ @@ -71,7 +71,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ba4d2d50", + "id": "c280c1eb", "metadata": {}, "outputs": [], "source": [ @@ -81,7 +81,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2dd6d371", + "id": "3da91241", "metadata": {}, "outputs": [], "source": [ @@ -103,7 +103,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b0884429", + "id": "25df71ba", "metadata": {}, "outputs": [], "source": [ @@ -116,7 +116,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4dcade17", + "id": "bedeed59", "metadata": {}, "outputs": [], "source": [ @@ -126,7 +126,7 @@ { "cell_type": "code", "execution_count": null, - "id": "bfc09cc1", + "id": "a1d2330a", "metadata": {}, "outputs": [], "source": [ @@ -136,7 +136,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6f7a1c3a", + "id": "b1af6ef5", "metadata": {}, "outputs": [], "source": [ @@ -146,7 +146,7 @@ { "cell_type": "code", "execution_count": null, - "id": "450f71bd", + "id": "f2ff15a1", "metadata": {}, "outputs": [], "source": [ @@ -159,23 +159,6 @@ "display_name": "Python 3", "language": "python", "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.10" - }, - "vscode": { - "interpreter": { - "hash": "916dbcbb3f70747c44a77c7bcd40155683ae19c65e1c03b4aa3499c5328201f1" - } } }, "nbformat": 4, diff --git a/examples/pytorch/pytorch-cifar10-sagemaker/train.ipynb b/examples/pytorch/pytorch-cifar10-sagemaker/train.ipynb index 8ae13f31..e1b9dd71 100644 --- a/examples/pytorch/pytorch-cifar10-sagemaker/train.ipynb +++ b/examples/pytorch/pytorch-cifar10-sagemaker/train.ipynb @@ -32,7 +32,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/examples/pytorch/pytorch-intro/intro.ipynb b/examples/pytorch/pytorch-intro/intro.ipynb index a3ad850d..c2a9d5d0 100755 --- a/examples/pytorch/pytorch-intro/intro.ipynb +++ b/examples/pytorch/pytorch-intro/intro.ipynb @@ -121,7 +121,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/examples/pytorch/pytorch-mnist-sagemaker/pytorch_mnist.ipynb b/examples/pytorch/pytorch-mnist-sagemaker/pytorch_mnist.ipynb index 05fcd7ee..2e20fdf9 100644 --- a/examples/pytorch/pytorch-mnist-sagemaker/pytorch_mnist.ipynb +++ b/examples/pytorch/pytorch-mnist-sagemaker/pytorch_mnist.ipynb @@ -501,7 +501,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, { diff --git a/examples/sagemaker/text_classification/text_classification.ipynb b/examples/sagemaker/text_classification/text_classification.ipynb index 8bc7c8fd..19e7431a 100644 --- a/examples/sagemaker/text_classification/text_classification.ipynb +++ b/examples/sagemaker/text_classification/text_classification.ipynb @@ -107,7 +107,7 @@ "metadata": {}, "outputs": [], "source": [ - "!wandb login" + "wandb.login()" ] }, {