From e3aa67930114e4836559fd74e854f8fc17a4c9e4 Mon Sep 17 00:00:00 2001 From: Alex Strick van Linschoten Date: Wed, 25 Jun 2025 16:57:41 +0200 Subject: [PATCH 1/6] Sync config requirements with updated requirements.txt MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove openai dependency and add missing dependencies (zenml, exa-py, requests, anthropic) to match the updated requirements.txt file across all config files. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude --- deep_research/configs/balanced_research.yaml | 9 +++++++-- deep_research/configs/deep_research.yaml | 9 +++++++-- deep_research/configs/enhanced_research.yaml | 7 ++++++- .../configs/enhanced_research_with_approval.yaml | 9 +++++++-- deep_research/configs/quick_research.yaml | 7 ++++++- deep_research/configs/rapid_research.yaml | 7 ++++++- 6 files changed, 39 insertions(+), 9 deletions(-) diff --git a/deep_research/configs/balanced_research.yaml b/deep_research/configs/balanced_research.yaml index 4f8bfa23..c8c1870c 100644 --- a/deep_research/configs/balanced_research.yaml +++ b/deep_research/configs/balanced_research.yaml @@ -71,9 +71,14 @@ steps: settings: docker: requirements: - - openai>=1.0.0 + - zenml>=0.83.1 - tavily-python>=0.2.8 + - exa-py>=1.0.0 - PyYAML>=6.0 - click>=8.0.0 - pydantic>=2.0.0 - - typing_extensions>=4.0.0 \ No newline at end of file + - typing_extensions>=4.0.0 + - requests + - anthropic>=0.52.2 + - litellm==1.69.1 + - langfuse==2.60.8 \ No newline at end of file diff --git a/deep_research/configs/deep_research.yaml b/deep_research/configs/deep_research.yaml index 61cc4c2b..7b58aefe 100644 --- a/deep_research/configs/deep_research.yaml +++ b/deep_research/configs/deep_research.yaml @@ -73,9 +73,14 @@ steps: settings: docker: requirements: - - openai>=1.0.0 + - zenml>=0.83.1 - tavily-python>=0.2.8 + - exa-py>=1.0.0 - PyYAML>=6.0 - click>=8.0.0 - pydantic>=2.0.0 - - typing_extensions>=4.0.0 \ No newline at end of file + - typing_extensions>=4.0.0 + - requests + - anthropic>=0.52.2 + - litellm==1.69.1 + - langfuse==2.60.8 \ No newline at end of file diff --git a/deep_research/configs/enhanced_research.yaml b/deep_research/configs/enhanced_research.yaml index 1933efa9..1a744def 100644 --- a/deep_research/configs/enhanced_research.yaml +++ b/deep_research/configs/enhanced_research.yaml @@ -63,9 +63,14 @@ steps: settings: docker: requirements: - - openai>=1.0.0 + - zenml>=0.83.1 - tavily-python>=0.2.8 + - exa-py>=1.0.0 - PyYAML>=6.0 - click>=8.0.0 - pydantic>=2.0.0 - typing_extensions>=4.0.0 + - requests + - anthropic>=0.52.2 + - litellm==1.69.1 + - langfuse==2.60.8 diff --git a/deep_research/configs/enhanced_research_with_approval.yaml b/deep_research/configs/enhanced_research_with_approval.yaml index 73d6fe42..8d469b75 100644 --- a/deep_research/configs/enhanced_research_with_approval.yaml +++ b/deep_research/configs/enhanced_research_with_approval.yaml @@ -69,9 +69,14 @@ steps: settings: docker: requirements: - - openai>=1.0.0 + - zenml>=0.83.1 - tavily-python>=0.2.8 + - exa-py>=1.0.0 - PyYAML>=6.0 - click>=8.0.0 - pydantic>=2.0.0 - - typing_extensions>=4.0.0 \ No newline at end of file + - typing_extensions>=4.0.0 + - requests + - anthropic>=0.52.2 + - litellm==1.69.1 + - langfuse==2.60.8 \ No newline at end of file diff --git a/deep_research/configs/quick_research.yaml b/deep_research/configs/quick_research.yaml index b210f18f..1e20f087 100644 --- a/deep_research/configs/quick_research.yaml +++ b/deep_research/configs/quick_research.yaml @@ -51,9 +51,14 @@ steps: settings: docker: requirements: - - openai>=1.0.0 + - zenml>=0.83.1 - tavily-python>=0.2.8 + - exa-py>=1.0.0 - PyYAML>=6.0 - click>=8.0.0 - pydantic>=2.0.0 - typing_extensions>=4.0.0 + - requests + - anthropic>=0.52.2 + - litellm==1.69.1 + - langfuse==2.60.8 diff --git a/deep_research/configs/rapid_research.yaml b/deep_research/configs/rapid_research.yaml index e69982bf..9f7ae117 100644 --- a/deep_research/configs/rapid_research.yaml +++ b/deep_research/configs/rapid_research.yaml @@ -51,9 +51,14 @@ steps: settings: docker: requirements: - - openai>=1.0.0 + - zenml>=0.83.1 - tavily-python>=0.2.8 + - exa-py>=1.0.0 - PyYAML>=6.0 - click>=8.0.0 - pydantic>=2.0.0 - typing_extensions>=4.0.0 + - requests + - anthropic>=0.52.2 + - litellm==1.69.1 + - langfuse==2.60.8 From 72839c35a0c1559715fafbbee3607f3cbf9d214a Mon Sep 17 00:00:00 2001 From: Alex Strick van Linschoten Date: Wed, 25 Jun 2025 17:03:19 +0200 Subject: [PATCH 2/6] Add environment variables to config files and update README MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add docker environment variable placeholders to all config files for OPENROUTER_API_KEY, TAVILY_API_KEY, EXA_API_KEY, ANTHROPIC_API_KEY, and Langfuse credentials - Add cloud orchestrator configuration section to README explaining how to replace placeholder values with actual API keys for cloud deployments - Include security recommendation to use cloud provider secret management services 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude --- deep_research/README.md | 16 ++++++++ deep_research/configs/balanced_research.yaml | 39 +++++++++++-------- deep_research/configs/deep_research.yaml | 10 ++++- deep_research/configs/enhanced_research.yaml | 8 ++++ .../enhanced_research_with_approval.yaml | 10 ++++- deep_research/configs/quick_research.yaml | 8 ++++ deep_research/configs/rapid_research.yaml | 8 ++++ 7 files changed, 81 insertions(+), 18 deletions(-) diff --git a/deep_research/README.md b/deep_research/README.md index aa03937c..11a5591e 100644 --- a/deep_research/README.md +++ b/deep_research/README.md @@ -632,6 +632,22 @@ This pipeline can integrate with: - **Alerting Systems**: Schedule research on key topics and receive regular reports - **Other ZenML Pipelines**: Chain with downstream analysis or processing +## ☁️ Cloud Orchestrator Configuration + +When running the pipeline with a cloud orchestrator (like Kubernetes, AWS SageMaker, etc.), you'll need to update the environment variables in the configuration files with your actual API keys instead of placeholder values. + +The configuration files in `configs/` contain environment variable placeholders like: +```yaml +settings: + docker: + environment: + OPENROUTER_API_KEY: "YOUR_OPENROUTER_API_KEY" + TAVILY_API_KEY: "YOUR_TAVILY_API_KEY" + # ... other keys +``` + +Replace these placeholder values with your actual API keys when deploying to cloud environments. For security, consider using your cloud provider's secret management services (AWS Secrets Manager, Azure Key Vault, etc.) instead of hardcoding keys in configuration files. + ## 📄 License This project is licensed under the Apache License 2.0. diff --git a/deep_research/configs/balanced_research.yaml b/deep_research/configs/balanced_research.yaml index c8c1870c..c4e9f3a4 100644 --- a/deep_research/configs/balanced_research.yaml +++ b/deep_research/configs/balanced_research.yaml @@ -23,50 +23,49 @@ langfuse_project_name: "deep-research" # Research parameters for balanced research parameters: query: "Default research query" - + steps: initial_query_decomposition_step: parameters: llm_model: "sambanova/DeepSeek-R1-Distill-Llama-70B" - max_sub_questions: 10 # Balanced number of sub-questions - + max_sub_questions: 10 # Balanced number of sub-questions + process_sub_question_step: parameters: llm_model_search: "sambanova/Meta-Llama-3.3-70B-Instruct" llm_model_synthesis: "sambanova/DeepSeek-R1-Distill-Llama-70B" - cap_search_length: 20000 # Standard cap for search length - + cap_search_length: 20000 # Standard cap for search length + cross_viewpoint_analysis_step: parameters: llm_model: "sambanova/DeepSeek-R1-Distill-Llama-70B" - viewpoint_categories: - [ + viewpoint_categories: [ "scientific", "political", "economic", "social", "ethical", "historical", - ] # Standard viewpoints - + ] # Standard viewpoints + generate_reflection_step: parameters: llm_model: "sambanova/DeepSeek-R1-Distill-Llama-70B" - + get_research_approval_step: parameters: - timeout: 3600 # 1 hour timeout - max_queries: 2 # Moderate additional queries - + timeout: 3600 # 1 hour timeout + max_queries: 2 # Moderate additional queries + execute_approved_searches_step: parameters: llm_model: "sambanova/Meta-Llama-3.3-70B-Instruct" cap_search_length: 20000 - + pydantic_final_report_step: parameters: llm_model: "sambanova/DeepSeek-R1-Distill-Llama-70B" - + # Environment settings settings: docker: @@ -81,4 +80,12 @@ settings: - requests - anthropic>=0.52.2 - litellm==1.69.1 - - langfuse==2.60.8 \ No newline at end of file + - langfuse==2.60.8 + environment: + OPENROUTER_API_KEY: "YOUR_OPENROUTER_API_KEY" + TAVILY_API_KEY: "YOUR_TAVILY_API_KEY" + EXA_API_KEY: "YOUR_EXA_API_KEY" + ANTHROPIC_API_KEY: "YOUR_ANTHROPIC_API_KEY" + LANGFUSE_PUBLIC_KEY: "YOUR_LANGFUSE_PUBLIC_KEY" + LANGFUSE_SECRET_KEY: "YOUR_LANGFUSE_SECRET_KEY" + LANGFUSE_HOST: "YOUR_LANGFUSE_HOST" diff --git a/deep_research/configs/deep_research.yaml b/deep_research/configs/deep_research.yaml index 7b58aefe..462254dc 100644 --- a/deep_research/configs/deep_research.yaml +++ b/deep_research/configs/deep_research.yaml @@ -83,4 +83,12 @@ settings: - requests - anthropic>=0.52.2 - litellm==1.69.1 - - langfuse==2.60.8 \ No newline at end of file + - langfuse==2.60.8 + environment: + OPENROUTER_API_KEY: "YOUR_OPENROUTER_API_KEY" + TAVILY_API_KEY: "YOUR_TAVILY_API_KEY" + EXA_API_KEY: "YOUR_EXA_API_KEY" + ANTHROPIC_API_KEY: "YOUR_ANTHROPIC_API_KEY" + LANGFUSE_PUBLIC_KEY: "YOUR_LANGFUSE_PUBLIC_KEY" + LANGFUSE_SECRET_KEY: "YOUR_LANGFUSE_SECRET_KEY" + LANGFUSE_HOST: "YOUR_LANGFUSE_HOST" \ No newline at end of file diff --git a/deep_research/configs/enhanced_research.yaml b/deep_research/configs/enhanced_research.yaml index 1a744def..96baad0a 100644 --- a/deep_research/configs/enhanced_research.yaml +++ b/deep_research/configs/enhanced_research.yaml @@ -74,3 +74,11 @@ settings: - anthropic>=0.52.2 - litellm==1.69.1 - langfuse==2.60.8 + environment: + OPENROUTER_API_KEY: "YOUR_OPENROUTER_API_KEY" + TAVILY_API_KEY: "YOUR_TAVILY_API_KEY" + EXA_API_KEY: "YOUR_EXA_API_KEY" + ANTHROPIC_API_KEY: "YOUR_ANTHROPIC_API_KEY" + LANGFUSE_PUBLIC_KEY: "YOUR_LANGFUSE_PUBLIC_KEY" + LANGFUSE_SECRET_KEY: "YOUR_LANGFUSE_SECRET_KEY" + LANGFUSE_HOST: "YOUR_LANGFUSE_HOST" diff --git a/deep_research/configs/enhanced_research_with_approval.yaml b/deep_research/configs/enhanced_research_with_approval.yaml index 8d469b75..5b775f97 100644 --- a/deep_research/configs/enhanced_research_with_approval.yaml +++ b/deep_research/configs/enhanced_research_with_approval.yaml @@ -79,4 +79,12 @@ settings: - requests - anthropic>=0.52.2 - litellm==1.69.1 - - langfuse==2.60.8 \ No newline at end of file + - langfuse==2.60.8 + environment: + OPENROUTER_API_KEY: "YOUR_OPENROUTER_API_KEY" + TAVILY_API_KEY: "YOUR_TAVILY_API_KEY" + EXA_API_KEY: "YOUR_EXA_API_KEY" + ANTHROPIC_API_KEY: "YOUR_ANTHROPIC_API_KEY" + LANGFUSE_PUBLIC_KEY: "YOUR_LANGFUSE_PUBLIC_KEY" + LANGFUSE_SECRET_KEY: "YOUR_LANGFUSE_SECRET_KEY" + LANGFUSE_HOST: "YOUR_LANGFUSE_HOST" \ No newline at end of file diff --git a/deep_research/configs/quick_research.yaml b/deep_research/configs/quick_research.yaml index 1e20f087..41cd514c 100644 --- a/deep_research/configs/quick_research.yaml +++ b/deep_research/configs/quick_research.yaml @@ -62,3 +62,11 @@ settings: - anthropic>=0.52.2 - litellm==1.69.1 - langfuse==2.60.8 + environment: + OPENROUTER_API_KEY: "YOUR_OPENROUTER_API_KEY" + TAVILY_API_KEY: "YOUR_TAVILY_API_KEY" + EXA_API_KEY: "YOUR_EXA_API_KEY" + ANTHROPIC_API_KEY: "YOUR_ANTHROPIC_API_KEY" + LANGFUSE_PUBLIC_KEY: "YOUR_LANGFUSE_PUBLIC_KEY" + LANGFUSE_SECRET_KEY: "YOUR_LANGFUSE_SECRET_KEY" + LANGFUSE_HOST: "YOUR_LANGFUSE_HOST" diff --git a/deep_research/configs/rapid_research.yaml b/deep_research/configs/rapid_research.yaml index 9f7ae117..901cbb57 100644 --- a/deep_research/configs/rapid_research.yaml +++ b/deep_research/configs/rapid_research.yaml @@ -62,3 +62,11 @@ settings: - anthropic>=0.52.2 - litellm==1.69.1 - langfuse==2.60.8 + environment: + OPENROUTER_API_KEY: "YOUR_OPENROUTER_API_KEY" + TAVILY_API_KEY: "YOUR_TAVILY_API_KEY" + EXA_API_KEY: "YOUR_EXA_API_KEY" + ANTHROPIC_API_KEY: "YOUR_ANTHROPIC_API_KEY" + LANGFUSE_PUBLIC_KEY: "YOUR_LANGFUSE_PUBLIC_KEY" + LANGFUSE_SECRET_KEY: "YOUR_LANGFUSE_SECRET_KEY" + LANGFUSE_HOST: "YOUR_LANGFUSE_HOST" From 6a0c2dac8d9d4af040b56dafcf39e706d11a110b Mon Sep 17 00:00:00 2001 From: Alex Strick van Linschoten Date: Wed, 25 Jun 2025 17:13:11 +0200 Subject: [PATCH 3/6] Fix typo --- oncoclear/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/oncoclear/README.md b/oncoclear/README.md index b0090366..3b48ec62 100644 --- a/oncoclear/README.md +++ b/oncoclear/README.md @@ -388,11 +388,11 @@ OncoClear follows a modular architecture: ## 📚 Learn More -For detailed documentation on building MLOps pipelines with ZenML, visit the [ZenML Documentation](https://docs.zenml.io/). In particular, the [Production Guide](https://docs.zenml.io/user-guide/production-guide/) goes into more detail about transitioning pipelines to production in the cloud. +For detailed documentation on building MLOps pipelines with ZenML, visit the [ZenML Documentation](https://docs.zenml.io/). In particular, the [Production Guide](https://docs.zenml.io/user-guides/production-guide/) goes into more detail about transitioning pipelines to production in the cloud. The best way to get a production ZenML instance up and running with all batteries included is with [ZenML Pro](https://zenml.io/pro). Check it out! Also, make sure to join our Slack Slack Community - to become part of the ZenML family! \ No newline at end of file + to become part of the ZenML family! From 22016a218311e87b3356ba84524afc20d57f7546 Mon Sep 17 00:00:00 2001 From: Alex Strick van Linschoten Date: Wed, 25 Jun 2025 17:30:48 +0200 Subject: [PATCH 4/6] Fix broken links --- credit-scorer/docs/guides/cloud_deployment.md | 8 ++++---- .../steps/deployment/deployment_deploy.py | 2 +- .../steps/etl/data_loader.py | 2 +- .../steps/etl/train_data_preprocessor.py | 2 +- .../steps/etl/train_data_splitter.py | 2 +- .../steps/hp_tuning/hp_tuning_single_search.py | 2 +- .../steps/inference/inference_predict.py | 2 +- .../compute_performance_metrics_on_current_data.py | 2 +- .../steps/promotion/promote_with_metric_compare.py | 2 +- .../steps/training/model_trainer.py | 2 +- end-to-end-computer-vision/README.md | 2 +- eurorate-predictor/README.md | 7 ++++--- .../promotion/promote_metric_compare_promoter.py | 2 +- .../steps/tokenizer_loader/tokenizer_loader.py | 2 +- llm-complete-guide/README.md | 13 +++++++------ llm-complete-guide/steps/url_scraper.py | 14 +++++++------- magic-photobooth/README.md | 2 +- omni-reader/README.md | 8 ++++---- oncoclear/steps/data_loader.py | 2 +- oncoclear/steps/data_preprocessor.py | 2 +- oncoclear/steps/data_splitter.py | 2 +- oncoclear/steps/inference_predict.py | 2 +- oncoclear/steps/model_evaluator.py | 2 +- vertex-registry-and-deployer/README.md | 8 ++++---- zencoder/README.md | 2 +- zenml-support-agent/README.md | 5 ++--- 26 files changed, 51 insertions(+), 50 deletions(-) diff --git a/credit-scorer/docs/guides/cloud_deployment.md b/credit-scorer/docs/guides/cloud_deployment.md index 8874013b..d870457f 100644 --- a/credit-scorer/docs/guides/cloud_deployment.md +++ b/credit-scorer/docs/guides/cloud_deployment.md @@ -1,6 +1,6 @@ ## ☁️ Cloud Deployment -CreditScorer supports storing artifacts remotely and executing pipelines on cloud infrastructure. For this example, we'll use AWS, but you can use any cloud provider you want. You can also refer to the [AWS Integration Guide](https://docs.zenml.io/how-to/popular-integrations/aws-guide) for detailed instructions. +CreditScorer supports storing artifacts remotely and executing pipelines on cloud infrastructure. For this example, we'll use AWS, but you can use any cloud provider you want. You can also refer to the [AWS Integration Guide](https://docs.zenml.io/stacks/popular-stacks/aws-guide) for detailed instructions. ### AWS Setup @@ -75,6 +75,6 @@ Similar setup processes can be followed for other cloud providers: For detailed configuration options for these providers, refer to the ZenML documentation: -- [GCP Integration Guide](https://docs.zenml.io/how-to/popular-integrations/gcp-guide) -- [Azure Integration Guide](https://docs.zenml.io/how-to/popular-integrations/azure-guide) -- [Kubernetes Integration Guide](https://docs.zenml.io/how-to/popular-integrations/kubernetes) +- [GCP Integration Guide](https://docs.zenml.io/stacks/popular-stacks/gcp-guide) +- [Azure Integration Guide](https://docs.zenml.io/stacks/popular-stacks/azure-guide) +- [Kubernetes Integration Guide](https://docs.zenml.io/stacks/popular-stacks/kubernetes) diff --git a/databricks-production-qa-demo/steps/deployment/deployment_deploy.py b/databricks-production-qa-demo/steps/deployment/deployment_deploy.py index b7407dcf..265bcd40 100644 --- a/databricks-production-qa-demo/steps/deployment/deployment_deploy.py +++ b/databricks-production-qa-demo/steps/deployment/deployment_deploy.py @@ -45,7 +45,7 @@ def deployment_deploy() -> Annotated[ In this example, the step can be configured to use different input data. See the documentation for more information: - https://docs.zenml.io/how-to/pipeline-development/use-configuration-files + https://docs.zenml.io/concepts/steps_and_pipelines/yaml_configuration Args: dataset_inf: The inference dataset. diff --git a/databricks-production-qa-demo/steps/etl/data_loader.py b/databricks-production-qa-demo/steps/etl/data_loader.py index 7ed2f850..42704739 100644 --- a/databricks-production-qa-demo/steps/etl/data_loader.py +++ b/databricks-production-qa-demo/steps/etl/data_loader.py @@ -44,7 +44,7 @@ def data_loader( In this example, the step can be configured with number of rows and logic to drop target column or not. See the documentation for more information: - https://docs.zenml.io/how-to/build-pipelines/use-pipeline-step-parameters + https://docs.zenml.io/concepts/steps_and_pipelines#pipeline-parameterization Args: is_inference: If `True` subset will be returned and target column diff --git a/databricks-production-qa-demo/steps/etl/train_data_preprocessor.py b/databricks-production-qa-demo/steps/etl/train_data_preprocessor.py index 011bf422..5b31bcb4 100644 --- a/databricks-production-qa-demo/steps/etl/train_data_preprocessor.py +++ b/databricks-production-qa-demo/steps/etl/train_data_preprocessor.py @@ -52,7 +52,7 @@ def train_data_preprocessor( columns and normalize numerical columns. See the documentation for more information: - https://docs.zenml.io/how-to/build-pipelines/use-pipeline-step-parameters + https://docs.zenml.io/concepts/steps_and_pipelines#pipeline-parameterization Args: dataset_trn: The train dataset. diff --git a/databricks-production-qa-demo/steps/etl/train_data_splitter.py b/databricks-production-qa-demo/steps/etl/train_data_splitter.py index ae541459..002fe294 100644 --- a/databricks-production-qa-demo/steps/etl/train_data_splitter.py +++ b/databricks-production-qa-demo/steps/etl/train_data_splitter.py @@ -41,7 +41,7 @@ def train_data_splitter( In this example, the step can be configured to use different test set sizes. See the documentation for more information: - https://docs.zenml.io/how-to/pipeline-development/use-configuration-files + https://docs.zenml.io/concepts/steps_and_pipelines/yaml_configuration Args: dataset: Dataset read from source. diff --git a/databricks-production-qa-demo/steps/hp_tuning/hp_tuning_single_search.py b/databricks-production-qa-demo/steps/hp_tuning/hp_tuning_single_search.py index b4c3eb0d..7f5ab7e1 100644 --- a/databricks-production-qa-demo/steps/hp_tuning/hp_tuning_single_search.py +++ b/databricks-production-qa-demo/steps/hp_tuning/hp_tuning_single_search.py @@ -50,7 +50,7 @@ def hp_tuning_single_search( to use different input datasets and also have a flag to fall back to default model architecture. See the documentation for more information: - https://docs.zenml.io/how-to/pipeline-development/use-configuration-files + https://docs.zenml.io/concepts/steps_and_pipelines/yaml_configuration Args: model_package: The package containing the model to use for hyperparameter tuning. diff --git a/databricks-production-qa-demo/steps/inference/inference_predict.py b/databricks-production-qa-demo/steps/inference/inference_predict.py index d0854065..5d8359b5 100644 --- a/databricks-production-qa-demo/steps/inference/inference_predict.py +++ b/databricks-production-qa-demo/steps/inference/inference_predict.py @@ -43,7 +43,7 @@ def inference_predict( In this example, the step can be configured to use different input data. See the documentation for more information: - https://docs.zenml.io/how-to/pipeline-development/use-configuration-files + https://docs.zenml.io/concepts/steps_and_pipelines/yaml_configuration Args: dataset_inf: The inference dataset. diff --git a/databricks-production-qa-demo/steps/promotion/compute_performance_metrics_on_current_data.py b/databricks-production-qa-demo/steps/promotion/compute_performance_metrics_on_current_data.py index a9722c0a..71fff7d6 100644 --- a/databricks-production-qa-demo/steps/promotion/compute_performance_metrics_on_current_data.py +++ b/databricks-production-qa-demo/steps/promotion/compute_performance_metrics_on_current_data.py @@ -44,7 +44,7 @@ def compute_performance_metrics_on_current_data( and target environment stage for promotion. See the documentation for more information: - https://docs.zenml.io/how-to/pipeline-development/use-configuration-files + https://docs.zenml.io/concepts/steps_and_pipelines/yaml_configuration Args: dataset_tst: The test dataset. diff --git a/databricks-production-qa-demo/steps/promotion/promote_with_metric_compare.py b/databricks-production-qa-demo/steps/promotion/promote_with_metric_compare.py index d23a0c37..5558b7f3 100644 --- a/databricks-production-qa-demo/steps/promotion/promote_with_metric_compare.py +++ b/databricks-production-qa-demo/steps/promotion/promote_with_metric_compare.py @@ -46,7 +46,7 @@ def promote_with_metric_compare( and target environment stage for promotion. See the documentation for more information: - https://docs.zenml.io/how-to/pipeline-development/use-configuration-files + https://docs.zenml.io/concepts/steps_and_pipelines/yaml_configuration Args: latest_metric: Recently trained model metric results. diff --git a/databricks-production-qa-demo/steps/training/model_trainer.py b/databricks-production-qa-demo/steps/training/model_trainer.py index 03edae2e..39b8d2fe 100644 --- a/databricks-production-qa-demo/steps/training/model_trainer.py +++ b/databricks-production-qa-demo/steps/training/model_trainer.py @@ -72,7 +72,7 @@ def model_trainer( hyperparameters to the model constructor. See the documentation for more information: - https://docs.zenml.io/how-to/pipeline-development/use-configuration-files + https://docs.zenml.io/concepts/steps_and_pipelines/yaml_configuration Args: dataset_trn: The preprocessed train dataset. diff --git a/end-to-end-computer-vision/README.md b/end-to-end-computer-vision/README.md index f3fa762b..7e5a6532 100644 --- a/end-to-end-computer-vision/README.md +++ b/end-to-end-computer-vision/README.md @@ -69,7 +69,7 @@ zenml login We will use GCP in the commands listed below, but it will work for other cloud providers. -1) Follow our guide to set up your credentials for GCP [here](https://docs.zenml.io/how-to/auth-management/gcp-service-connector) +1) Follow our guide to set up your credentials for GCP [here](https://docs.zenml.io/stacks/service-connectors/connector-types/gcp-service-connector) 2) Set up a bucket in GCP to persist your training data 3) Set up a bucket to use as artifact store within ZenML Learn how to set up a GCP artifact store stack component within ZenML diff --git a/eurorate-predictor/README.md b/eurorate-predictor/README.md index bc61a60a..8893298b 100644 --- a/eurorate-predictor/README.md +++ b/eurorate-predictor/README.md @@ -92,8 +92,7 @@ output "zenml_stack_name" { } ``` To learn more about the terraform script, read the -[ZenML documentation.](https://docs.zenml.io/how-to/ -stack-deployment/deploy-a-cloud-stack-with-terraform) or +[ZenML documentation.](https://docs.zenml.io/stacks/deployment/deploy-a-cloud-stack-with-terraform) or see the [Terraform registry](https://registry.terraform.io/ modules/zenml-io/zenml-stack). @@ -163,4 +162,6 @@ For detailed documentation on using ZenML to build your own MLOps pipelines, ple ## 🔄 Continuous Improvement -EuroRate Predictor is designed for continuous improvement of your interest rate forecasts. As new ECB data becomes available, simply re-run the pipelines to generate updated predictions. \ No newline at end of file +EuroRate Predictor is designed for continuous improvement of your interest rate +forecasts. As new ECB data becomes available, simply re-run the pipelines to +generate updated predictions. diff --git a/huggingface-sagemaker/steps/promotion/promote_metric_compare_promoter.py b/huggingface-sagemaker/steps/promotion/promote_metric_compare_promoter.py index aab26ed0..ebba4575 100644 --- a/huggingface-sagemaker/steps/promotion/promote_metric_compare_promoter.py +++ b/huggingface-sagemaker/steps/promotion/promote_metric_compare_promoter.py @@ -48,7 +48,7 @@ def promote_metric_compare_promoter( In this example, the step can be configured to use different input data. See the documentation for more information: - https://docs.zenml.io/how-to/pipeline-development/use-configuration-files + https://docs.zenml.io/concepts/steps_and_pipelines/yaml_configuration Args: latest_metrics: Recently trained model metrics results. diff --git a/huggingface-sagemaker/steps/tokenizer_loader/tokenizer_loader.py b/huggingface-sagemaker/steps/tokenizer_loader/tokenizer_loader.py index 513bc8d6..e331d8c8 100644 --- a/huggingface-sagemaker/steps/tokenizer_loader/tokenizer_loader.py +++ b/huggingface-sagemaker/steps/tokenizer_loader/tokenizer_loader.py @@ -46,7 +46,7 @@ def tokenizer_loader( For more information on how to configure steps in a pipeline, refer to the following documentation: - https://docs.zenml.io/how-to/pipeline-development/use-configuration-files + https://docs.zenml.io/concepts/steps_and_pipelines/yaml_configuration Args: lower_case: A boolean value indicating whether to convert the input text to diff --git a/llm-complete-guide/README.md b/llm-complete-guide/README.md index 403f4d72..f352d2bf 100644 --- a/llm-complete-guide/README.md +++ b/llm-complete-guide/README.md @@ -166,7 +166,7 @@ zenml service-account create ``` For more information on this part of the process, please refer to the [ZenML -documentation](https://docs.zenml.io/how-to/project-setup-and-management/connecting-to-zenml/connect-with-a-service-account). +documentation](https://docs.zenml.io/concepts/service_connectors). Once you have your service account API token and store URL (the URL of your deployed ZenML tenant), you can update the secrets with the following command: @@ -279,10 +279,11 @@ loss function](https://www.philschmid.de/fine-tune-embedding-model-for-rag) whic The basic RAG pipeline will run using a local stack, but if you want to improve the speed of the embeddings step you might want to consider using a cloud -orchestrator. Please follow the instructions in [documentation on popular integrations](https://docs.zenml.io/how-to/popular-integrations) (currently available for -[AWS](https://docs.zenml.io/how-to/popular-integrations/aws-guide) and -[GCP](https://docs.zenml.io/how-to/popular-integrations/gcp-guide)) to learn how you -can run the pipelines on a remote stack. +orchestrator. Please follow the instructions in documentation on popular integrations (currently available for +[AWS](https://docs.zenml.io/stacks/popular-stacks/aws-guide), +[GCP](https://docs.zenml.io/stacks/popular-stacks/gcp-guide), and +[Azure](https://docs.zenml.io/stacks/popular-stacks/azure-guide)) to learn how +you can run the pipelines on a remote stack. If you run the pipeline using a cloud artifact store, logs from all the steps as well as assets like the visualizations will all be shown in the ZenML dashboard. @@ -299,7 +300,7 @@ You can also self-host the ZenML dashboard. Instructions are available in our ## 📜 Project Structure -The project loosely follows [the recommended ZenML project structure](https://docs.zenml.io/how-to/setting-up-a-project-repository/best-practices): +The project loosely follows [the recommended ZenML project structure](https://docs.zenml.io/user-guides/best-practices/set-up-your-repository): ``` . diff --git a/llm-complete-guide/steps/url_scraper.py b/llm-complete-guide/steps/url_scraper.py index 923f7351..98d5e5c3 100644 --- a/llm-complete-guide/steps/url_scraper.py +++ b/llm-complete-guide/steps/url_scraper.py @@ -45,18 +45,18 @@ def url_scraper( "https://docs.zenml.io/getting-started/system-architectures", "https://docs.zenml.io/getting-started/core-concepts", "https://docs.zenml.io/user-guides/llmops-guide/rag-with-zenml/rag-85-loc", - "https://docs.zenml.io/how-to/track-metrics-metadata/logging-metadata", - "https://docs.zenml.io/how-to/debug-and-solve-issues", + "https://docs.zenml.io/concepts/metadata", + "https://docs.zenml.io/user-guides/best-practices/debug-and-solve-issues", "https://docs.zenml.io/stack-components/step-operators/azureml", - # "https://docs.zenml.io/how-to/interact-with-secrets", - # "https://docs.zenml.io/how-to/infrastructure-deployment/auth-management/service-connectors-guide", - # "https://docs.zenml.io/how-to/infrastructure-deployment/auth-management/hyperai-service-connector", + # "https://docs.zenml.io/concepts/secrets", + # "https://docs.zenml.io/stacks/service-connectors/service-connectors-guide", + # "https://docs.zenml.io/stacks/service-connectors/hyperai-service-connector", # "https://docs.zenml.io/stack-components/data-validators/evidently", # "https://docs.zenml.io/stack-components/data-validators", # "https://docs.zenml.io/stack-components/step-operators/sagemaker", # "https://docs.zenml.io/stack-components/alerters/slack", - # "https://docs.zenml.io/how-to/infrastructure-deployment/auth-management/kubernetes-service-connector", - # "https://docs.zenml.io/how-to/infrastructure-deployment/auth-management/azure-service-connector" + # "https://docs.zenml.io/stacks/service-connectors/kubernetes-service-connector", + # "https://docs.zenml.io/stacks/service-connectors/azure-service-connector" ] else: docs_urls = get_all_pages(docs_url) diff --git a/magic-photobooth/README.md b/magic-photobooth/README.md index 4fdd7d61..37eca445 100644 --- a/magic-photobooth/README.md +++ b/magic-photobooth/README.md @@ -78,7 +78,7 @@ Behind the scenes, Magic Photobooth employs Low-Rank Adaptation (LoRA) technolog Magic Photobooth requires GPU resources for optimal performance. We recommend deploying on a cloud infrastructure: -1. **Set up your cloud environment** using our [1-click deployment guide](https://docs.zenml.io/how-to/stack-deployment/deploy-a-cloud-stack) for AWS, GCP, or Azure. +1. **Set up your cloud environment** using our [1-click deployment guide](https://docs.zenml.io/stacks/deployment/deploy-a-cloud-stack) for AWS, GCP, or Azure. 2. **Configure your GPU quotas** to ensure sufficient resources for model training and inference. diff --git a/omni-reader/README.md b/omni-reader/README.md index 8043af45..82ac3b2b 100644 --- a/omni-reader/README.md +++ b/omni-reader/README.md @@ -189,7 +189,7 @@ streamlit run app.py ## ☁️ Cloud Deployment -OmniReader supports storing artifacts remotely and executing pipelines on cloud infrastructure. For this example, we'll use AWS, but you can use any cloud provider you want. You can also refer to the [AWS Integration Guide](https://docs.zenml.io/how-to/popular-integrations/aws-guide) for detailed instructions. +OmniReader supports storing artifacts remotely and executing pipelines on cloud infrastructure. For this example, we'll use AWS, but you can use any cloud provider you want. You can also refer to the [AWS Integration Guide](https://docs.zenml.io/stacks/popular-stacks/aws-guide) for detailed instructions. ### AWS Setup @@ -264,9 +264,9 @@ Similar setup processes can be followed for other cloud providers: For detailed configuration options for these providers, refer to the ZenML documentation: -- [GCP Integration Guide](https://docs.zenml.io/how-to/popular-integrations/gcp-guide) -- [Azure Integration Guide](https://docs.zenml.io/how-to/popular-integrations/azure-guide) -- [Kubernetes Integration Guide](https://docs.zenml.io/how-to/popular-integrations/kubernetes) +- [GCP Integration Guide](https://docs.zenml.io/stacks/popular-stacks/gcp-guide) +- [Azure Integration Guide](https://docs.zenml.io/stacks/popular-stacks/azure-guide) +- [Kubernetes Integration Guide](https://docs.zenml.io/stacks/popular-stacks/kubernetes) ### 🐳 Docker Settings for Cloud Deployment diff --git a/oncoclear/steps/data_loader.py b/oncoclear/steps/data_loader.py index dbf6d7da..7519f988 100644 --- a/oncoclear/steps/data_loader.py +++ b/oncoclear/steps/data_loader.py @@ -37,7 +37,7 @@ def data_loader( In this example, the step can be configured with number of rows and logic to drop target column or not. See the documentation for more information: - https://docs.zenml.io/how-to/build-pipelines/use-pipeline-step-parameters + https://docs.zenml.io/concepts/steps_and_pipelines#pipeline-parameterization Args: random_state: Random state for sampling diff --git a/oncoclear/steps/data_preprocessor.py b/oncoclear/steps/data_preprocessor.py index 2b52329e..47ed54f8 100644 --- a/oncoclear/steps/data_preprocessor.py +++ b/oncoclear/steps/data_preprocessor.py @@ -53,7 +53,7 @@ def data_preprocessor( columns and normalize numerical columns. See the documentation for more information: - https://docs.zenml.io/how-to/build-pipelines/use-pipeline-step-parameters + https://docs.zenml.io/concepts/steps_and_pipelines#pipeline-parameterization Args: random_state: Random state for sampling. diff --git a/oncoclear/steps/data_splitter.py b/oncoclear/steps/data_splitter.py index b8aeb3b8..dd8825a7 100644 --- a/oncoclear/steps/data_splitter.py +++ b/oncoclear/steps/data_splitter.py @@ -40,7 +40,7 @@ def data_splitter( In this example, the step can be configured to use different test set sizes. See the documentation for more information: - https://docs.zenml.io/how-to/build-pipelines/use-pipeline-step-parameters + https://docs.zenml.io/concepts/steps_and_pipelines#pipeline-parameterization Args: dataset: Dataset read from source. diff --git a/oncoclear/steps/inference_predict.py b/oncoclear/steps/inference_predict.py index c893ab31..13b5c15b 100644 --- a/oncoclear/steps/inference_predict.py +++ b/oncoclear/steps/inference_predict.py @@ -40,7 +40,7 @@ def inference_predict( In this example, the step can be configured to use different input data. See the documentation for more information: - https://docs.zenml.io/how-to/build-pipelines/use-pipeline-step-parameters + https://docs.zenml.io/concepts/steps_and_pipelines#pipeline-parameterization Args: model: Trained model. diff --git a/oncoclear/steps/model_evaluator.py b/oncoclear/steps/model_evaluator.py index 27e7a060..c75c1a61 100644 --- a/oncoclear/steps/model_evaluator.py +++ b/oncoclear/steps/model_evaluator.py @@ -57,7 +57,7 @@ def model_evaluator( does not meet the minimum criteria. See the documentation for more information: - https://docs.zenml.io/how-to/build-pipelines/use-pipeline-step-parameters + https://docs.zenml.io/concepts/steps_and_pipelines#pipeline-parameterization Args: model: The pre-trained model artifact. diff --git a/vertex-registry-and-deployer/README.md b/vertex-registry-and-deployer/README.md index 83689ce1..63c3989b 100644 --- a/vertex-registry-and-deployer/README.md +++ b/vertex-registry-and-deployer/README.md @@ -3,7 +3,7 @@ Welcome to your ZenML project for deploying ML models using Google Cloud's Vertex AI! This project provides a hands-on experience with MLOps pipelines using ZenML and Vertex AI. It contains a collection of ZenML steps, pipelines, and other artifacts to help you efficiently deploy your machine learning models. -Using these pipelines, you can run data preparation, model training, registration, and deployment with a single command while using YAML files for [configuration](https://docs.zenml.io/user-guides/production-guide/configure-pipeline). ZenML takes care of tracking your metadata and [containerizing your pipelines](https://docs.zenml.io/how-to/customize-docker-builds). +Using these pipelines, you can run data preparation, model training, registration, and deployment with a single command while using YAML files for [configuration](https://docs.zenml.io/user-guides/production-guide/configure-pipeline). ZenML takes care of tracking your metadata and [containerizing your pipelines](https://docs.zenml.io/concepts/containerization). ## 🏃 How to run @@ -19,7 +19,7 @@ source .venv/bin/activate pip install -r requirements.txt ``` -We will need to set up access to Google Cloud and Vertex AI. You can follow the instructions in the [ZenML documentation](https://docs.zenml.io/how-to/auth-management/gcp-service-connector) +We will need to set up access to Google Cloud and Vertex AI. You can follow the instructions in the [ZenML documentation](https://docs.zenml.io/stacks/service-connectors/connector-types/gcp-service-connector) to register a service connector and set up your Google Cloud credentials. Once you have set up your Google Cloud credentials, we can create a stack and run the deployment pipeline: @@ -66,7 +66,7 @@ python run.py --inference-pipeline ## 📜 Project Structure -The project loosely follows [the recommended ZenML project structure](https://docs.zenml.io/how-to/setting-up-a-project-repository/best-practices): +The project loosely follows [the recommended ZenML project structure](https://docs.zenml.io/user-guides/best-practices/set-up-your-repository): ``` . @@ -84,4 +84,4 @@ The project loosely follows [the recommended ZenML project structure](https://do ├── README.md # This file ├── requirements.txt # Extra Python dependencies └── run.py # CLI tool to run pipelines with ZenML # CLI tool to run pipelines on ZenML Stack -``` \ No newline at end of file +``` diff --git a/zencoder/README.md b/zencoder/README.md index 4a19d532..854e4708 100644 --- a/zencoder/README.md +++ b/zencoder/README.md @@ -32,7 +32,7 @@ One of the first jobs of somebody entering MLOps is to convert their manual scri 2. Type annotating the steps properly 3. Connecting the steps together in a pipeline 4. Creating the appropriate YAML files to [configure your pipeline](https://docs.zenml.io/user-guides/production-guide/configure-pipeline) -5. Developing a Dockerfile or equivalent to encapsulate [the environment](https://docs.zenml.io/how-to/customize-docker-builds). +5. Developing a Dockerfile or equivalent to encapsulate [the environment](https://docs.zenml.io/concepts/containerization). Frameworks like [ZenML](https://github.com/zenml-io/zenml) go a long way in alleviating this burden by abstracting much of the complexity away. However, recent advancement in Large Language Model based Copilots offer hope that even more repetitive aspects of this task can be automated. diff --git a/zenml-support-agent/README.md b/zenml-support-agent/README.md index d0d11689..08f803c3 100644 --- a/zenml-support-agent/README.md +++ b/zenml-support-agent/README.md @@ -136,10 +136,9 @@ It is much more ideal to run a pipeline like the agent creation pipeline on a re you have to [deploy ZenML](https://docs.zenml.io/user-guides/production-guide/deploying-zenml) and set up a stack that supports [our scheduling -feature](https://docs.zenml.io/how-to/build-pipelines/schedule-a-pipeline). If you +feature](https://docs.zenml.io/concepts/steps_and_pipelines/scheduling). If you wish to deploy the slack bot on GCP Cloud Run as described above, you'll also need to be using [a Google Cloud Storage Artifact Store](https://docs.zenml.io/stack-components/artifact-stores/gcp). Note that certain code artifacts like the `Dockerfile` for this project will also need to -be adapted for your own particular needs and requirements. Please check [our docs](https://docs.zenml.io/how-to/setting-up-a-project-repository/best-practices) -for more information. +be adapted for your own particular needs and requirements. Please check [our docs](https://docs.zenml.io/user-guides/best-practices/set-up-your-repository) for more information. From 0cce556e824477253810af8b60e54293b1d1233c Mon Sep 17 00:00:00 2001 From: Alex Strick van Linschoten Date: Thu, 26 Jun 2025 09:25:37 +0200 Subject: [PATCH 5/6] Replace hardcoded API key placeholders with environment variable substitution MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Update all config files to use ${VARIABLE_NAME} syntax instead of "YOUR_VARIABLE_NAME" placeholders - Update README to reflect automatic environment variable pickup - Users no longer need to manually edit config files for cloud deployment 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude --- deep_research/README.md | 10 +++++----- deep_research/configs/balanced_research.yaml | 14 +++++++------- deep_research/configs/deep_research.yaml | 14 +++++++------- deep_research/configs/enhanced_research.yaml | 14 +++++++------- .../configs/enhanced_research_with_approval.yaml | 14 +++++++------- deep_research/configs/quick_research.yaml | 14 +++++++------- deep_research/configs/rapid_research.yaml | 14 +++++++------- 7 files changed, 47 insertions(+), 47 deletions(-) diff --git a/deep_research/README.md b/deep_research/README.md index 11a5591e..f17830b5 100644 --- a/deep_research/README.md +++ b/deep_research/README.md @@ -634,19 +634,19 @@ This pipeline can integrate with: ## ☁️ Cloud Orchestrator Configuration -When running the pipeline with a cloud orchestrator (like Kubernetes, AWS SageMaker, etc.), you'll need to update the environment variables in the configuration files with your actual API keys instead of placeholder values. +When running the pipeline with a cloud orchestrator (like Kubernetes, AWS SageMaker, etc.), the configuration files automatically use environment variable substitution to pick up your API keys from the environment. -The configuration files in `configs/` contain environment variable placeholders like: +The configuration files in `configs/` use environment variable substitution like: ```yaml settings: docker: environment: - OPENROUTER_API_KEY: "YOUR_OPENROUTER_API_KEY" - TAVILY_API_KEY: "YOUR_TAVILY_API_KEY" + OPENROUTER_API_KEY: ${OPENROUTER_API_KEY} + TAVILY_API_KEY: ${TAVILY_API_KEY} # ... other keys ``` -Replace these placeholder values with your actual API keys when deploying to cloud environments. For security, consider using your cloud provider's secret management services (AWS Secrets Manager, Azure Key Vault, etc.) instead of hardcoding keys in configuration files. +Simply ensure your environment variables are set in your orchestrator environment, and the pipeline will automatically pick them up. For security, consider using your cloud provider's secret management services (AWS Secrets Manager, Azure Key Vault, etc.) to inject these environment variables into your orchestrator runtime. ## 📄 License diff --git a/deep_research/configs/balanced_research.yaml b/deep_research/configs/balanced_research.yaml index c4e9f3a4..88db1897 100644 --- a/deep_research/configs/balanced_research.yaml +++ b/deep_research/configs/balanced_research.yaml @@ -82,10 +82,10 @@ settings: - litellm==1.69.1 - langfuse==2.60.8 environment: - OPENROUTER_API_KEY: "YOUR_OPENROUTER_API_KEY" - TAVILY_API_KEY: "YOUR_TAVILY_API_KEY" - EXA_API_KEY: "YOUR_EXA_API_KEY" - ANTHROPIC_API_KEY: "YOUR_ANTHROPIC_API_KEY" - LANGFUSE_PUBLIC_KEY: "YOUR_LANGFUSE_PUBLIC_KEY" - LANGFUSE_SECRET_KEY: "YOUR_LANGFUSE_SECRET_KEY" - LANGFUSE_HOST: "YOUR_LANGFUSE_HOST" + OPENROUTER_API_KEY: ${OPENROUTER_API_KEY} + TAVILY_API_KEY: ${TAVILY_API_KEY} + EXA_API_KEY: ${EXA_API_KEY} + ANTHROPIC_API_KEY: ${ANTHROPIC_API_KEY} + LANGFUSE_PUBLIC_KEY: ${LANGFUSE_PUBLIC_KEY} + LANGFUSE_SECRET_KEY: ${LANGFUSE_SECRET_KEY} + LANGFUSE_HOST: ${LANGFUSE_HOST} diff --git a/deep_research/configs/deep_research.yaml b/deep_research/configs/deep_research.yaml index 462254dc..e9f02136 100644 --- a/deep_research/configs/deep_research.yaml +++ b/deep_research/configs/deep_research.yaml @@ -85,10 +85,10 @@ settings: - litellm==1.69.1 - langfuse==2.60.8 environment: - OPENROUTER_API_KEY: "YOUR_OPENROUTER_API_KEY" - TAVILY_API_KEY: "YOUR_TAVILY_API_KEY" - EXA_API_KEY: "YOUR_EXA_API_KEY" - ANTHROPIC_API_KEY: "YOUR_ANTHROPIC_API_KEY" - LANGFUSE_PUBLIC_KEY: "YOUR_LANGFUSE_PUBLIC_KEY" - LANGFUSE_SECRET_KEY: "YOUR_LANGFUSE_SECRET_KEY" - LANGFUSE_HOST: "YOUR_LANGFUSE_HOST" \ No newline at end of file + OPENROUTER_API_KEY: ${OPENROUTER_API_KEY} + TAVILY_API_KEY: ${TAVILY_API_KEY} + EXA_API_KEY: ${EXA_API_KEY} + ANTHROPIC_API_KEY: ${ANTHROPIC_API_KEY} + LANGFUSE_PUBLIC_KEY: ${LANGFUSE_PUBLIC_KEY} + LANGFUSE_SECRET_KEY: ${LANGFUSE_SECRET_KEY} + LANGFUSE_HOST: ${LANGFUSE_HOST} \ No newline at end of file diff --git a/deep_research/configs/enhanced_research.yaml b/deep_research/configs/enhanced_research.yaml index 96baad0a..b973625f 100644 --- a/deep_research/configs/enhanced_research.yaml +++ b/deep_research/configs/enhanced_research.yaml @@ -75,10 +75,10 @@ settings: - litellm==1.69.1 - langfuse==2.60.8 environment: - OPENROUTER_API_KEY: "YOUR_OPENROUTER_API_KEY" - TAVILY_API_KEY: "YOUR_TAVILY_API_KEY" - EXA_API_KEY: "YOUR_EXA_API_KEY" - ANTHROPIC_API_KEY: "YOUR_ANTHROPIC_API_KEY" - LANGFUSE_PUBLIC_KEY: "YOUR_LANGFUSE_PUBLIC_KEY" - LANGFUSE_SECRET_KEY: "YOUR_LANGFUSE_SECRET_KEY" - LANGFUSE_HOST: "YOUR_LANGFUSE_HOST" + OPENROUTER_API_KEY: ${OPENROUTER_API_KEY} + TAVILY_API_KEY: ${TAVILY_API_KEY} + EXA_API_KEY: ${EXA_API_KEY} + ANTHROPIC_API_KEY: ${ANTHROPIC_API_KEY} + LANGFUSE_PUBLIC_KEY: ${LANGFUSE_PUBLIC_KEY} + LANGFUSE_SECRET_KEY: ${LANGFUSE_SECRET_KEY} + LANGFUSE_HOST: ${LANGFUSE_HOST} diff --git a/deep_research/configs/enhanced_research_with_approval.yaml b/deep_research/configs/enhanced_research_with_approval.yaml index 5b775f97..6aacb530 100644 --- a/deep_research/configs/enhanced_research_with_approval.yaml +++ b/deep_research/configs/enhanced_research_with_approval.yaml @@ -81,10 +81,10 @@ settings: - litellm==1.69.1 - langfuse==2.60.8 environment: - OPENROUTER_API_KEY: "YOUR_OPENROUTER_API_KEY" - TAVILY_API_KEY: "YOUR_TAVILY_API_KEY" - EXA_API_KEY: "YOUR_EXA_API_KEY" - ANTHROPIC_API_KEY: "YOUR_ANTHROPIC_API_KEY" - LANGFUSE_PUBLIC_KEY: "YOUR_LANGFUSE_PUBLIC_KEY" - LANGFUSE_SECRET_KEY: "YOUR_LANGFUSE_SECRET_KEY" - LANGFUSE_HOST: "YOUR_LANGFUSE_HOST" \ No newline at end of file + OPENROUTER_API_KEY: ${OPENROUTER_API_KEY} + TAVILY_API_KEY: ${TAVILY_API_KEY} + EXA_API_KEY: ${EXA_API_KEY} + ANTHROPIC_API_KEY: ${ANTHROPIC_API_KEY} + LANGFUSE_PUBLIC_KEY: ${LANGFUSE_PUBLIC_KEY} + LANGFUSE_SECRET_KEY: ${LANGFUSE_SECRET_KEY} + LANGFUSE_HOST: ${LANGFUSE_HOST} \ No newline at end of file diff --git a/deep_research/configs/quick_research.yaml b/deep_research/configs/quick_research.yaml index 41cd514c..ac779bd2 100644 --- a/deep_research/configs/quick_research.yaml +++ b/deep_research/configs/quick_research.yaml @@ -63,10 +63,10 @@ settings: - litellm==1.69.1 - langfuse==2.60.8 environment: - OPENROUTER_API_KEY: "YOUR_OPENROUTER_API_KEY" - TAVILY_API_KEY: "YOUR_TAVILY_API_KEY" - EXA_API_KEY: "YOUR_EXA_API_KEY" - ANTHROPIC_API_KEY: "YOUR_ANTHROPIC_API_KEY" - LANGFUSE_PUBLIC_KEY: "YOUR_LANGFUSE_PUBLIC_KEY" - LANGFUSE_SECRET_KEY: "YOUR_LANGFUSE_SECRET_KEY" - LANGFUSE_HOST: "YOUR_LANGFUSE_HOST" + OPENROUTER_API_KEY: ${OPENROUTER_API_KEY} + TAVILY_API_KEY: ${TAVILY_API_KEY} + EXA_API_KEY: ${EXA_API_KEY} + ANTHROPIC_API_KEY: ${ANTHROPIC_API_KEY} + LANGFUSE_PUBLIC_KEY: ${LANGFUSE_PUBLIC_KEY} + LANGFUSE_SECRET_KEY: ${LANGFUSE_SECRET_KEY} + LANGFUSE_HOST: ${LANGFUSE_HOST} diff --git a/deep_research/configs/rapid_research.yaml b/deep_research/configs/rapid_research.yaml index 901cbb57..8399131b 100644 --- a/deep_research/configs/rapid_research.yaml +++ b/deep_research/configs/rapid_research.yaml @@ -63,10 +63,10 @@ settings: - litellm==1.69.1 - langfuse==2.60.8 environment: - OPENROUTER_API_KEY: "YOUR_OPENROUTER_API_KEY" - TAVILY_API_KEY: "YOUR_TAVILY_API_KEY" - EXA_API_KEY: "YOUR_EXA_API_KEY" - ANTHROPIC_API_KEY: "YOUR_ANTHROPIC_API_KEY" - LANGFUSE_PUBLIC_KEY: "YOUR_LANGFUSE_PUBLIC_KEY" - LANGFUSE_SECRET_KEY: "YOUR_LANGFUSE_SECRET_KEY" - LANGFUSE_HOST: "YOUR_LANGFUSE_HOST" + OPENROUTER_API_KEY: ${OPENROUTER_API_KEY} + TAVILY_API_KEY: ${TAVILY_API_KEY} + EXA_API_KEY: ${EXA_API_KEY} + ANTHROPIC_API_KEY: ${ANTHROPIC_API_KEY} + LANGFUSE_PUBLIC_KEY: ${LANGFUSE_PUBLIC_KEY} + LANGFUSE_SECRET_KEY: ${LANGFUSE_SECRET_KEY} + LANGFUSE_HOST: ${LANGFUSE_HOST} From 1ee2e6c8f68f511dc5ca50f92abc913d794740d5 Mon Sep 17 00:00:00 2001 From: Alex Strick van Linschoten Date: Thu, 26 Jun 2025 09:27:27 +0200 Subject: [PATCH 6/6] Remove redundant zenml requirement from config files MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ZenML is the orchestration framework already running the pipeline, so it doesn't need to be installed as a dependency inside Docker containers. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude --- deep_research/configs/balanced_research.yaml | 1 - deep_research/configs/deep_research.yaml | 1 - deep_research/configs/enhanced_research.yaml | 1 - deep_research/configs/enhanced_research_with_approval.yaml | 1 - deep_research/configs/quick_research.yaml | 1 - deep_research/configs/rapid_research.yaml | 1 - 6 files changed, 6 deletions(-) diff --git a/deep_research/configs/balanced_research.yaml b/deep_research/configs/balanced_research.yaml index 88db1897..b292304c 100644 --- a/deep_research/configs/balanced_research.yaml +++ b/deep_research/configs/balanced_research.yaml @@ -70,7 +70,6 @@ steps: settings: docker: requirements: - - zenml>=0.83.1 - tavily-python>=0.2.8 - exa-py>=1.0.0 - PyYAML>=6.0 diff --git a/deep_research/configs/deep_research.yaml b/deep_research/configs/deep_research.yaml index e9f02136..0ff22b83 100644 --- a/deep_research/configs/deep_research.yaml +++ b/deep_research/configs/deep_research.yaml @@ -73,7 +73,6 @@ steps: settings: docker: requirements: - - zenml>=0.83.1 - tavily-python>=0.2.8 - exa-py>=1.0.0 - PyYAML>=6.0 diff --git a/deep_research/configs/enhanced_research.yaml b/deep_research/configs/enhanced_research.yaml index b973625f..956d30a2 100644 --- a/deep_research/configs/enhanced_research.yaml +++ b/deep_research/configs/enhanced_research.yaml @@ -63,7 +63,6 @@ steps: settings: docker: requirements: - - zenml>=0.83.1 - tavily-python>=0.2.8 - exa-py>=1.0.0 - PyYAML>=6.0 diff --git a/deep_research/configs/enhanced_research_with_approval.yaml b/deep_research/configs/enhanced_research_with_approval.yaml index 6aacb530..419d38be 100644 --- a/deep_research/configs/enhanced_research_with_approval.yaml +++ b/deep_research/configs/enhanced_research_with_approval.yaml @@ -69,7 +69,6 @@ steps: settings: docker: requirements: - - zenml>=0.83.1 - tavily-python>=0.2.8 - exa-py>=1.0.0 - PyYAML>=6.0 diff --git a/deep_research/configs/quick_research.yaml b/deep_research/configs/quick_research.yaml index ac779bd2..aac4ece7 100644 --- a/deep_research/configs/quick_research.yaml +++ b/deep_research/configs/quick_research.yaml @@ -51,7 +51,6 @@ steps: settings: docker: requirements: - - zenml>=0.83.1 - tavily-python>=0.2.8 - exa-py>=1.0.0 - PyYAML>=6.0 diff --git a/deep_research/configs/rapid_research.yaml b/deep_research/configs/rapid_research.yaml index 8399131b..1962d0ba 100644 --- a/deep_research/configs/rapid_research.yaml +++ b/deep_research/configs/rapid_research.yaml @@ -51,7 +51,6 @@ steps: settings: docker: requirements: - - zenml>=0.83.1 - tavily-python>=0.2.8 - exa-py>=1.0.0 - PyYAML>=6.0