diff --git a/template/README.md b/template/README.md
index 23c8f24..10dc01e 100644
--- a/template/README.md
+++ b/template/README.md
@@ -139,7 +139,7 @@ zenml model list
This will show you a new `breast_cancer_classifier` model with two versions, `sgd` and `rf` created. You can find out how this was configured in the [YAML pipeline configuration files](configs/).
-If you are a [ZenML Cloud](https://zenml.io/cloud) user, you can see all of this visualized in the dashboard:
+If you are a [ZenML Pro](https://zenml.io/pro) user, you can see all of this visualized in the dashboard:
@@ -165,7 +165,7 @@ While we've demonstrated a manual promotion process for clarity, a more in-depth
-Again, if you are a [ZenML Cloud](https://zenml.io/cloud) user, you would be able to see all this in the cloud dashboard.
+Again, if you are a [ZenML Pro](https://zenml.io/pro) user, you would be able to see all this in the cloud dashboard.
@@ -184,7 +184,7 @@ that were returned in the pipeline. This completes the MLOps loop of training to
-You can also see all predictions ever created as a complete history in the dashboard (Again only for [ZenML Cloud](https://zenml.io/cloud) users):
+You can also see all predictions ever created as a complete history in the dashboard (Again only for [ZenML Pro](https://zenml.io/pro) users):
@@ -203,7 +203,7 @@ If you want to learn more about ZenML as a tool, then the
to get started. In particular, the [Production Guide](https://docs.zenml.io/user-guide/production-guide/)
goes into more detail as to how to transition these same pipelines into production on the cloud.
-The best way to get a production ZenML instance up and running with all batteries included is the [ZenML Cloud](https://zenml.io/cloud). Check it out!
+The best way to get a production ZenML instance up and running with all batteries included is the [ZenML Pro](https://zenml.io/pro). Check it out!
Also, make sure to join our
diff --git a/template/quickstart.ipynb b/template/quickstart.ipynb
index a91efa2..91737c9 100644
--- a/template/quickstart.ipynb
+++ b/template/quickstart.ipynb
@@ -101,14 +101,14 @@
"id": "966ce581",
"metadata": {},
"source": [
- "## ☁️ Step 1: Connect to ZenML Cloud\n",
+ "## ☁️ Step 1: Connect to ZenML Pro\n",
"\n",
- "If you are using [ZenML Cloud](https://zenml.io/cloud), execute the following\n",
+ "If you are using [ZenML Pro](https://zenml.io/pro), execute the following\n",
"cell with your tenant URL. Otherwise ignore.\n",
"\n",
- "ZenML Cloud is a managed service that provides a hosted ZenML environment. It\n",
+ "ZenML Pro is a managed service that provides a hosted ZenML environment. It\n",
"allows you to run your pipelines on the cloud, manage your metadata, and\n",
- "collaborate with your team. Sign up at [ZenML Cloud](https://zenml.io/cloud) for\n",
+ "collaborate with your team. Sign up [here](https://zenml.io/pro) for\n",
"a free trial and to get started!"
]
},
@@ -858,7 +858,7 @@
"id": "53517a9a",
"metadata": {},
"source": [
- "If you are a [ZenML Cloud](https://zenml.io/cloud) user, you can see all of this visualized in the dashboard:\n",
+ "If you are a [ZenML Pro](https://zenml.io/pro) user, you can see all of this visualized in the dashboard:\n",
"\n",
""
]
@@ -1102,7 +1102,7 @@
"## What next?\n",
"\n",
"* If you have questions or feedback... join our [**Slack Community**](https://zenml.io/slack) and become part of the ZenML family!\n",
- "* If you want to quickly get started with ZenML, check out the [ZenML Cloud](https://zenml.io/cloud)."
+ "* If you want to quickly get started with ZenML, check out [ZenML Pro](https://zenml.io/pro)."
]
}
],
diff --git a/tests/test_starter_template.py b/tests/test_starter_template.py
index a164db3..ce10645 100644
--- a/tests/test_starter_template.py
+++ b/tests/test_starter_template.py
@@ -66,7 +66,7 @@ def generate_and_run_project(
"--training-pipeline",
"--feature-pipeline",
"--inference-pipeline",
- "--no-cache"
+ "--no-cache",
]
try:
@@ -83,11 +83,15 @@ def generate_and_run_project(
) from e
# check the pipeline run is successful
- for pipeline_name in ["training", "inference", "feature_engineering"]:
+ for pipeline_name, run_count in [
+ ("training", 2),
+ ("inference", 1),
+ ("feature_engineering", 1),
+ ]:
pipeline = Client().get_pipeline(pipeline_name)
assert pipeline
runs = pipeline.runs
- assert len(runs) == 1
+ assert len(runs) == run_count
assert runs[0].status == ExecutionStatus.COMPLETED
# clean up