diff --git a/.circleci/config.yml b/.circleci/config.yml
index aa6af7d18fbd..649d14a562f6 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -97,7 +97,7 @@ jobs:
# to ensure our requirements.txt file is accurate
test_lower_prefect:
docker:
- - image: python:3.6
+ - image: python:3.7
auth:
username: $DOCKER_HUB_USER
password: $DOCKER_HUB_PW
@@ -195,39 +195,9 @@ jobs:
command: pytest tests/tasks -vvrfEsx --numprocesses 4 --dist=loadfile
# ----------------------------------
- # Run unit tests in Python 3.6-3.9
+ # Run unit tests in Python 3.7-3.9
# ----------------------------------
- test_36:
- docker:
- - image: python:3.6
- auth:
- username: $DOCKER_HUB_USER
- password: $DOCKER_HUB_PW
- steps:
- - *attach_workspace
- - checkout
- - setup_remote_docker
- - run:
- name: Install zsh for tests
- command: apt-get update && apt-get install -y zsh
-
- - run:
- name: Install graphviz
- command: apt-get update && apt-get install -y graphviz
-
- - run:
- name: Upgrade pip
- command: pip install "pip==20.2.4"
-
- - run:
- name: Install Prefect
- command: pip install ".[base_library_ci]"
-
- - run:
- name: Run tests
- command: pytest tests --ignore=tests/tasks -vvrfEsx --numprocesses 4 --dist=loadfile
-
test_37:
docker:
- image: python:3.7
@@ -437,6 +407,7 @@ jobs:
command: |
docker login --username $DOCKER_HUB_USER --password $DOCKER_HUB_PW
docker push prefecthq/prefect:master
+
build_core_docker_image:
docker:
- image: docker
@@ -493,49 +464,6 @@ jobs:
docker login --username $DOCKER_HUB_USER --password $DOCKER_HUB_PW
docker push prefecthq/prefect:core
- build_release_candidate_docker_image:
- docker:
- - image: docker
- auth:
- username: $DOCKER_HUB_USER
- password: $DOCKER_HUB_PW
- parameters:
- python_version:
- type: string
- extras:
- type: string
- environment:
- PYTHON_VERSION: << parameters.python_version >>
- EXTRAS: << parameters.extras >>
- steps:
- - checkout
- - run:
- name: 1.0rc branch check
- command: |
- apk add git
- if [[ $(git branch --contains $CIRCLE_SHA1 --points-at 1.0rc | grep 1.0rc | wc -l) -ne 1 ]]; then
- echo "commit $CIRCLE_SHA1 is not a member of the 1.0rc branch"
- exit 1
- fi
- - setup_remote_docker:
- docker_layer_caching: true
- - run:
- name: Build image
- command: |
- set -u
- docker build \
- --build-arg GIT_SHA=$CIRCLE_SHA1 \
- --build-arg BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ') \
- --build-arg PREFECT_VERSION=$CIRCLE_SHA1 \
- --build-arg PYTHON_VERSION=$PYTHON_VERSION \
- --build-arg EXTRAS=$EXTRAS \
- -t prefecthq/prefect:1.0rc0 \
- .
- - run:
- name: Push 1.0rc0 tag
- command: |
- docker login --username $DOCKER_HUB_USER --password $DOCKER_HUB_PW
- docker push prefecthq/prefect:1.0rc0
promote_server_artifacts:
docker:
- image: docker
@@ -600,7 +528,6 @@ workflows:
'Run tests':
jobs:
- - test_36
- test_37
- test_38
- test_39
@@ -622,23 +549,8 @@ workflows:
branches:
only: master
- - build_release_candidate_docker_image:
- python_version: '3.7'
- extras: 'all_orchestration_extras'
- filters:
- branches:
- only: 1.0rc
-
'Build and publish release artifacts':
jobs:
- - build_docker_image:
- python_version: '3.6'
- extras: 'all_orchestration_extras'
- filters:
- branches:
- ignore: /.*/
- tags:
- only: /^[0-9]+\.[0-9]+\.[0-9]+$/
- build_docker_image:
python_version: '3.7'
extras: 'all_orchestration_extras'
@@ -684,3 +596,43 @@ workflows:
ignore: /.*/
tags:
only: /^[0-9]+\.[0-9]+\.[0-9]+$/
+
+ # RC -----
+
+ - build_docker_image:
+ python_version: '3.7'
+ extras: 'all_orchestration_extras'
+ filters:
+ branches:
+ ignore: /.*/
+ tags:
+ only: /^1.0rc[0-9]$/
+ - build_docker_image:
+ python_version: '3.8'
+ extras: 'all_orchestration_extras'
+ filters:
+ branches:
+ ignore: /.*/
+ tags:
+ only: /^1.0rc[0-9]$/
+ - build_docker_image:
+ python_version: '3.9'
+ extras: 'all_orchestration_extras'
+ filters:
+ branches:
+ ignore: /.*/
+ tags:
+ only: /^1.0rc[0-9]$/
+ - release_to_pypi:
+ filters:
+ branches:
+ ignore: /.*/
+ tags:
+ only: /^1.0rc[0-9]$/
+ - build_core_docker_image:
+ python_version: '3.9'
+ filters:
+ branches:
+ ignore: /.*/
+ tags:
+ only: /^1.0rc[0-9]$/
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 458f68749f5c..11d811d345a4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,33 @@
# Changelog
+## 1.0.0
+
+### Highlights
+
+- Authentication with tokens has been removed; use API keys instead. - [#4643](https://github.com/PrefectHQ/prefect/pull/4643)
+- Python 3.6 is no longer supported. Use Python 3.7+ instead. - [#5136](https://github.com/PrefectHQ/prefect/pull/5136)
+- Flow `Environment`s have been removed; use `RunConfig`s instead. - [#5072](https://github.com/PrefectHQ/prefect/pull/5072), [docs](https://docs.prefect.io/orchestration/flow_config/upgrade.html)
+
+### Breaking Changes
+
+
+- The AWS Fargate agent has been removed; use the ECS agent instead. - [#3812](https://github.com/PrefectHQ/prefect/pull/3812)
+- `DockerAgent(docker_interface=...)` will now raise an exception if passed. - [#4446](https://github.com/PrefectHQ/prefect/pull/4446)
+- Agents will no longer check for authentication at the `prefect.cloud.agent.auth_token` config key. - [#5140](https://github.com/PrefectHQ/prefect/pull/5140)
+
+- Executors can no longer be imported from `prefect.engine.executors`; use `prefect.executors` instead. - [#3798](https://github.com/PrefectHQ/prefect/pull/3798)
+- `Parameter` is not importable from `prefect.core.tasks` anymore; use `prefect.Parameter` instead.
+- Exceptions are no longer importable from `prefect.utilities.exceptions`; use `prefect.exceptions` instead. - [#4664](https://github.com/PrefectHQ/prefect/pull/4664)
+- `Client.login_to_tenant` has been renamed to `Client.switch_tenant`
+
+- The `prefect register flow` command has been removed; use `prefect register` instead. - [#4256](https://github.com/PrefectHQ/prefect/pull/4256)
+- The `prefect run flow` command has been removed; use `prefect run` instead. - [#4463](https://github.com/PrefectHQ/prefect/pull/4463)
+- Authentication token CLI commands `create-token`, `revoke-token`, `list-tokens` have been removed; use API keys instead. - [#4643](https://github.com/PrefectHQ/prefect/pull/4643)
+- `prefect auth login` no longer accepts authentication tokens. - [#5140](https://github.com/PrefectHQ/prefect/pull/5140)
+- `prefect auth purge-tokens` has been added to delete the Prefect-managed tokens directory. - [#5140](https://github.com/PrefectHQ/prefect/pull/5140)
+
+- The `log_to_cloud` setting is now ignored; use `send_flow_run_logs` instead. - [#4487](https://github.com/PrefectHQ/prefect/pull/4487)]
+
## 0.15.13
Released on January 25, 2022.
diff --git a/MANIFEST.in b/MANIFEST.in
index ed0d48b00319..89ed2064d470 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -8,6 +8,5 @@ include test-requirements.txt
include src/prefect/_version.py
include src/prefect/_siginfo.py
include src/prefect/_sig29/*.txt
-graft src/prefect/environments
graft src/prefect/agent
graft src/prefect/tasks/aws/waiters
diff --git a/README.md b/README.md
index bc5f05892ea0..422a01fc2d4a 100644
--- a/README.md
+++ b/README.md
@@ -219,7 +219,7 @@ Read about Prefect's [community](https://docs.prefect.io/core/community.html) or
### Requirements
-Prefect requires Python 3.6+. If you're new to Python, we recommend installing the [Anaconda distribution](https://www.anaconda.com/distribution/).
+Prefect requires Python 3.7+. If you're new to Python, we recommend installing the [Anaconda distribution](https://www.anaconda.com/distribution/).
### Latest Release
diff --git a/changes/pr5279.yaml b/changes/pr5279.yaml
new file mode 100644
index 000000000000..c2149f368b8f
--- /dev/null
+++ b/changes/pr5279.yaml
@@ -0,0 +1,2 @@
+enhancement:
+ - "Switched to a futures based implementation for LocalDaskExecutor - [#5046](https://github.com/PrefectHQ/prefect/issues/5046)"
diff --git a/changes/pr5293.yaml b/changes/pr5293.yaml
new file mode 100644
index 000000000000..636072d7ae53
--- /dev/null
+++ b/changes/pr5293.yaml
@@ -0,0 +1,5 @@
+fix:
+ - "Declare Prefect's public api - [#5293](https://github.com/PrefectHQ/prefect/pull/5293)"
+
+contributor:
+ - "[Oliver Mannion](https://github.com/tekumara)"
diff --git a/docs/.vuepress/config.js b/docs/.vuepress/config.js
index 04fef6882776..432266d58217 100644
--- a/docs/.vuepress/config.js
+++ b/docs/.vuepress/config.js
@@ -129,11 +129,6 @@ module.exports = {
collapsable: true,
children: getChildren('docs/api/latest', 'engine')
},
- {
- title: 'prefect.environments',
- collapsable: true,
- children: getChildren('docs/api/latest', 'environments')
- },
{
title: 'prefect.executors',
collapsable: true,
@@ -238,7 +233,6 @@ module.exports = {
'agents/kubernetes',
'agents/vertex',
'agents/ecs',
- 'agents/fargate'
]
},
{
diff --git a/docs/core/advanced_tutorials/README.md b/docs/core/advanced_tutorials/README.md
index d760321efcd9..e278304397e3 100644
--- a/docs/core/advanced_tutorials/README.md
+++ b/docs/core/advanced_tutorials/README.md
@@ -5,7 +5,7 @@ sidebarDepth: 0
# Contents
These tutorials are intended to help the reader get acquainted with the many features of Prefect and its vocabulary. All code examples
-are locally executable in any Python version supported by Prefect (3.6+). Note that all features presented here are run without
+are locally executable in any Python version supported by Prefect (3.7+). Note that all features presented here are run without
the Prefect server.
## [ETL](etl.md)
diff --git a/docs/core/advanced_tutorials/local-debugging.md b/docs/core/advanced_tutorials/local-debugging.md
index 2e06605543f2..4cd8ad80837c 100644
--- a/docs/core/advanced_tutorials/local-debugging.md
+++ b/docs/core/advanced_tutorials/local-debugging.md
@@ -300,7 +300,7 @@ def whoami():
return reddit.user.me()
-storage = Docker(base_image="python:3.6", registry_url="http://my.personal.registry")
+storage = Docker(base_image="python:3.7", registry_url="http://my.personal.registry")
flow = Flow("reddit-flow", storage=storage, tasks=[whoami])
```
@@ -347,7 +347,7 @@ Which will result in a very explicit traceback!
```
Traceback (most recent call last):
flow = cloudpickle.loads(decrypted_pickle)
- File "/usr/local/lib/python3.6/site-packages/cloudpickle/cloudpickle.py", line 944, in subimport
+ File "/usr/local/lib/python3.7/site-packages/cloudpickle/cloudpickle.py", line 944, in subimport
__import__(name)
ModuleNotFoundError: No module named 'praw'
```
diff --git a/docs/core/concepts/execution.md b/docs/core/concepts/execution.md
index 7c8d05dc9e26..14a1001c2b95 100644
--- a/docs/core/concepts/execution.md
+++ b/docs/core/concepts/execution.md
@@ -30,7 +30,7 @@ from prefect import task, Flow
@task(name="Task A")
def task_a():
if random.random() > 0.5:
- raise ValueError("Non-deterministic error has occured.")
+ raise ValueError("Non-deterministic error has occurred.")
@task(name="Task B", trigger=all_successful)
def task_b():
diff --git a/docs/core/concepts/persistence.md b/docs/core/concepts/persistence.md
index fc6618740f7d..a04af66de476 100644
--- a/docs/core/concepts/persistence.md
+++ b/docs/core/concepts/persistence.md
@@ -3,7 +3,7 @@
Prefect provides a few ways to work with cached data between tasks or flows. In-memory caching of task **inputs** is automatically applied by the Prefect pipeline to optimize retries or other times when Prefect can anticipate rerunning the same task in the future. Users can also configure to cache the **output** of a prior run of a task and use it as the output of a future run of that task or even as the output of a run of a different task.
Out of the box, Prefect Core does not persist cached data in a permanent fashion. All data, results, _and_ cached states are only stored in memory within the
-Python process running the flow. However, Prefect Core provides all of the necessary hooks for configuring your data to be persisted and retrieved from external locations. When combined with a compatible state persistence layer, such as Prefect Core's server or [Prefect Cloud](../../orchestration/faq.html#what-is-the-difference-between-prefect-core-and-prefect-cloud), this means flows can pick up exactly where they left off if the in-memory cache is lost.
+Python process running the flow. However, Prefect Core provides all of the necessary hooks for configuring your data to be persisted and retrieved from external locations. When combined with a compatible state persistence layer, such as Prefect Core's server or [Prefect Cloud](/orchestration/getting-started/set-up.html), this means flows can pick up exactly where they left off if the in-memory cache is lost.
[[toc]]
diff --git a/docs/core/development/style.md b/docs/core/development/style.md
index e8343040df0b..aa2d643fcbe7 100644
--- a/docs/core/development/style.md
+++ b/docs/core/development/style.md
@@ -11,8 +11,6 @@ cd prefect
black .
```
-Please note that black requires Python 3.6+ (though Prefect does not).
-
Formatting can be easy to forget when developing, so you may choose to install a pre-push hook for black, as follows:
```
diff --git a/docs/core/development/tests.md b/docs/core/development/tests.md
index f238190b5b8c..f4ee55165f9a 100644
--- a/docs/core/development/tests.md
+++ b/docs/core/development/tests.md
@@ -76,7 +76,7 @@ The `--sw` flag will exit `pytest` the first time it encounters an error; subseq
CI will run automatically against any PR you open. Please run your tests locally first to avoid "debugging in CI", as this takes up resources that could be used by other contributors.
-In CI, Prefect's unit tests are run against Python 3.6, 3.7, and 3.8. A separate "formatting" CI job is also run. Since formatting errors are common in PRs, we have found this to be a useful early-warning during development.
+In CI, Prefect's unit tests are run against Python 3.7, 3.8, and 3.9. A separate "formatting" CI job is also run. Since formatting errors are common in PRs, we have found this to be a useful early-warning during development.
## Documentation
diff --git a/docs/core/examples/overview.md b/docs/core/examples/overview.md
index 2e69d7b2c02c..319dacd60499 100644
--- a/docs/core/examples/overview.md
+++ b/docs/core/examples/overview.md
@@ -1,16 +1,16 @@
-# Overview
+# Prefect Tutorial Examples
-Prefect includes a number of examples covering different features. These can be
-viewed live in the docs, or accessed from the GitHub repo
+Prefect includes a number of examples covering different features. Some are
+covered in the tutorials, and all can be accessed from the GitHub repo
[here](https://github.com/PrefectHQ/prefect/tree/master/examples).
-## Running with Prefect Cloud or Server
+## Running examples with Prefect Cloud or Server
When running with Prefect Cloud or Prefect Server, you can register the
examples in a new project with the Prefect CLI. You can either register all the
examples at once, or select specific examples by name.
-```
+```bash
# Create a new project named "Prefect Examples"
$ prefect create project "Prefect Examples"
@@ -32,9 +32,9 @@ make sure the Github extra is installed so the agent can pull the flows by doing
Then to start a local agent for running the examples:
-```
+```bash
$ prefect agent local start -l prefect-examples
```
-If you haven't already, we recommend going through the [Orchestration
-Tutorial](/orchestration/tutorial/overview.md) beforehand.
+If you haven't already, we recommend going through the Getting Started
+[Orchestration Layer](/orchestration/getting-started/set-up.html) topics beforehand.
diff --git a/docs/core/getting_started/install.md b/docs/core/getting_started/install.md
index e1056921e316..23eaa7f97cc6 100644
--- a/docs/core/getting_started/install.md
+++ b/docs/core/getting_started/install.md
@@ -2,7 +2,7 @@
## Basic installation
-Prefect requires Python 3.6+. If you're new to Python, we recommend installing the [Anaconda distribution](https://www.anaconda.com/distribution/).
+Prefect requires Python 3.7+. If you're new to Python, we recommend installing the [Anaconda distribution](https://www.anaconda.com/distribution/).
To install Prefect, run:
diff --git a/docs/core/tutorial/07-next-steps.md b/docs/core/tutorial/07-next-steps.md
index 85f60286d401..2773ce3a56d3 100644
--- a/docs/core/tutorial/07-next-steps.md
+++ b/docs/core/tutorial/07-next-steps.md
@@ -83,4 +83,4 @@ with Flow("Simple Pipeline") as flow:
flow.run()
```
-The Task library includes integrations with Kubernetes, GitHub, Slack, Docker, AWS, GCP, [and more](/core/task_library/)!
+The Task library includes integrations with Kubernetes, GitHub, Slack, Docker, AWS, GCP, [and more](/core/task_library/overview.html)!
diff --git a/docs/orchestration/agents/ecs.md b/docs/orchestration/agents/ecs.md
index f892147accf9..cba451022d30 100644
--- a/docs/orchestration/agents/ecs.md
+++ b/docs/orchestration/agents/ecs.md
@@ -378,7 +378,7 @@ When starting an ECS agent from the command line, you can configure retry behavi
the ECS agent by setting [AWS CLI retry modes](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-retries.html).
For example, the following example specifies the AWS Adaptive retry mode and up to 10
-retry attemps, then starts the ECS agent:
+retry attempts, then starts the ECS agent:
For example:
diff --git a/docs/orchestration/agents/fargate.md b/docs/orchestration/agents/fargate.md
deleted file mode 100644
index b7757d2d6388..000000000000
--- a/docs/orchestration/agents/fargate.md
+++ /dev/null
@@ -1,523 +0,0 @@
-# Fargate Agent (Deprecated)
-
-::: warning
-The Fargate Agent is deprecated, we recommend users transition to using the new
-[ECS Agent](/orchestration/agents/ecs.md) instead. Note that the ECS agent only
-supports [RunConfig](/orchestration/flow_config/overview.md#run-configuration)
-based flows. Flows using the legacy `Environment` classes will need to be
-transitioned before moving off the fargate agent.
-:::
-
-The Fargate Agent is an agent designed to deploy flows as Tasks using AWS Fargate. This agent can be run anywhere so long as the proper AWS configuration credentials are provided.
-
-[[toc]]
-
-::: warning Core server
-In order to use this agent with Prefect Core's server the server's GraphQL API endpoint must be accessible.
-:::
-
-### Requirements
-
-When running the Fargate you may optionally provide `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, and `AWS_SESSION_TOKEN` (specific to temporary credentials). If these three items are not explicitly defined, boto3 will default to environment variables or your credentials file. Having the `REGION_NAME` defined along with the appropriate credentials stored per aws expectations are required to initialize the [boto3 client](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#client). For more information on properly setting your credentials, check out the boto3 documentation [here](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html).
-
-### Usage
-
-```
-$ prefect agent fargate start
-
- ____ __ _ _ _
-| _ \ _ __ ___ / _| ___ ___| |_ / \ __ _ ___ _ __ | |_
-| |_) | '__/ _ \ |_ / _ \/ __| __| / _ \ / _` |/ _ \ '_ \| __|
-| __/| | | __/ _| __/ (__| |_ / ___ \ (_| | __/ | | | |_
-|_| |_| \___|_| \___|\___|\__| /_/ \_\__, |\___|_| |_|\__|
- |___/
-
-2019-08-27 14:33:39,772 - agent - INFO - Starting FargateAgent
-2019-08-27 14:33:39,772 - agent - INFO - Agent documentation can be found at https://docs.prefect.io/orchestration/
-2019-08-27 14:33:40,932 - agent - INFO - Agent successfully connected to Prefect Cloud
-2019-08-27 14:33:40,932 - agent - INFO - Waiting for flow runs...
-```
-
-The Fargate Agent can be started either through the Prefect CLI or by importing the `FargateAgent` class from the core library. Starting the agent from the CLI will require that the required AWS configuration arguments are set at the environment level while importing the agent class in a Python process will allow you to specify them at initialization.
-
-::: tip API Keys
-You can specify a service account API key via the CLI with
-
-```bash
-$ prefect agent fargate start -k SERVICE_ACCOUNT_API_KEY
-```
-
-For additional methods of specifying API keys, see the [API key documentation](../concepts/api_keys.md).
-:::
-
-### Installation
-
-Unlike the Kubernetes Agent, the Fargate Agent is not generally installed to run on Fargate itself and instead it can be spun up anywhere with the correct variables set.
-
-Through the Prefect CLI:
-
-```
-$ export AWS_ACCESS_KEY_ID=MY_ACCESS
-$ export AWS_SECRET_ACCESS_KEY=MY_SECRET
-$ export AWS_SESSION_TOKEN=MY_SESSION
-$ export REGION_NAME=MY_REGION
-$ prefect agent fargate start
-```
-
-In a Python process:
-
-```python
-from prefect.agent.fargate import FargateAgent
-
-agent = FargateAgent(
- aws_access_key_id="MY_ACCESS",
- aws_secret_access_key="MY_SECRET",
- aws_session_token="MY_SESSION",
- region_name="MY_REGION",
- )
-
-agent.start()
-```
-
-You are now ready to run some flows!
-
-### Process
-
-The Fargate Agent periodically polls for new flow runs to execute. When a flow run is retrieved from Prefect Cloud the agent checks to make sure that the flow was registered with a Docker storage option. If so, the agent then creates a Task using the `storage` attribute of that flow, and runs `prefect execute flow-run`.
-
-If it is the first run of a particular flow then a Task Definition will be registered. Each new run of that flow will run using that same Task Definition and it will override some of the environment variables in order to specify which flow run is occurring.
-
-When the flow run is found and the Task is run the logs of the agent should reflect that:
-
-```
-2019-09-01 19:00:30,532 - agent - INFO - Starting FargateAgent
-2019-09-01 19:00:30,533 - agent - INFO - Agent documentation can be found at https://docs.prefect.io/orchestration/
-2019-09-01 19:00:30,655 - agent - INFO - Agent successfully connected to Prefect Cloud
-2019-09-01 19:00:30,733 - agent - INFO - Waiting for flow runs...
-2019-09-01 19:01:08,835 - agent - INFO - Found 1 flow run(s) to submit for execution.
-2019-09-01 19:01:09,158 - agent - INFO - Submitted 1 flow run(s) for execution.
-```
-
-The Fargate Task run should be created and it will start in a `PENDING` state. Once the resources are provisioned it will enter a `RUNNING` state and on completion it will finish as `COMPLETED`.
-
-### Configuration
-
-The Fargate Agent allows for a set of AWS configuration options to be set or provided in order to initialize the boto3 client. All of these options can be provided at initialization of the `FargateAgent` class or through an environment variable:
-
-- aws_access_key_id (str, optional): AWS access key id for connecting the boto3 client. Defaults to the value set in the environment variable `AWS_ACCESS_KEY_ID`.
-- aws_secret_access_key (str, optional): AWS secret access key for connecting the boto3 client. Defaults to the value set in the environment variable `AWS_SECRET_ACCESS_KEY`.
-- aws_session_token (str, optional): AWS session key for connecting the boto3 client. Defaults to the value set in the environment variable `AWS_SESSION_TOKEN`.
-- region_name (str, optional): AWS region name for connecting the boto3 client. Defaults to the value set in the environment variable `REGION_NAME`.
-- botocore_config (dict, optional): [botocore configuration](https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html) options to be passed to the boto3 client.
-
-- enable_task_revisions (bool, optional): Enable registration of task definitions using revisions.
- When enabled, task definitions will use flow name as opposed to flow id and each new version will be a
- task definition revision. Each revision will be registered with a tag called `PrefectFlowId`
- and `PrefectFlowVersion` to enable proper lookup for existing revisions. Flow name is reformatted
- to support task definition naming rules by converting all non-alphanumeric characters to '\*'.
- Defaults to False.
-- use_external_kwargs (bool, optional): When enabled, the agent will check for the existence of an
- external json file containing kwargs to pass into the run_flow process.
- Defaults to False.
-- external_kwargs_s3_bucket (str, optional): S3 bucket containing external kwargs.
-- external_kwargs_s3_key (str, optional): S3 key prefix for the location of `/.json`.
-- \*\*kwargs (dict, optional): additional keyword arguments to pass to boto3 for
- `register_task_definition` and `run_task`
-
-While the above configuration options allow for the initialization of the boto3 client, you may also need to specify the arguments that allow for the registering and running of Fargate task definitions. The Fargate Agent makes no assumptions on how your particular AWS configuration is set up and instead has a `kwargs` argument which will accept any arguments for boto3's `register_task_definition` and `run_task` functions.
-
-::: tip Validating Configuration
-The Fargate Agent has a utility function [`validate_configuration`](/api/latest/agent/fargate.html#fargateagent) which can be used to test the configuration options set on the agent to ensure is it able to register the task definition and run the task.
-:::
-
-Accepted kwargs for [`register_task_definition`](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.register_task_definition):
-
-```
-taskRoleArn string
-executionRoleArn string
-networkMode string
-volumes list
-placementConstraints list
-cpu string
-memory string
-tags list
-pidMode string
-ipcMode string
-proxyConfiguration dict
-inferenceAccelerators list
-```
-
-We have also added the ability to select items to the `containerDefinitions` kwarg of `register_task_definition`:
-
-```
-environments list
-secrets list
-mountPoints list
-logConfiguration dict
-repositoryCredentials dict
-```
-
-Environment was added to support adding flow level environment variables via the `use_external_kwargs` described later on in the documentation.
-You should continue to use the `env_vars` kwarg to pass agent level environment variables to your tasks.
-
-This adds support for Native AWS Secrets Manager and/or Parameter Store in your flows.
-
-Given that you running your Fargate tasks on `platformVersion` 1.4.0 or higher, you can also leverage `volumes` and `mountPoints` to attach an EFS backed volume on to your tasks.
-In order to use `mountPoints` you will need to include the proper `volumes` kwarg as shown below.
-
-Here is an example of what kwargs would look like with `containerDefinitions` via Python:
-
-```python
-from prefect.agent.fargate import FargateAgent
-
-agent = FargateAgent(
- launch_type="FARGATE",
- aws_access_key_id="MY_ACCESS",
- aws_secret_access_key="MY_SECRET",
- aws_session_token="MY_SESSION",
- region_name="MY_REGION",
- networkConfiguration={
- "awsvpcConfiguration": {
- "assignPublicIp": "ENABLED",
- "subnets": ["my_subnet_id"],
- "securityGroups": []
- }
- },
- cpu="256",
- memory="512",
- platformVersion="1.4.0",
- containerDefinitions=[{
- "environment": [{
- "name": "TEST_ENV",
- "value": "Success!"
- }],
- "secrets": [{
- "name": "TEST_SECRET",
- "valueFrom": "arn:aws:ssm:us-east-1:123456789101:parameter/test/test"
- }],
- "mountPoints": [{
- "sourceVolume": "myEfsVolume",
- "containerPath": "/data",
- "readOnly": False
- }],
- "logConfiguration": {
- "logDriver": "awslogs",
- "options": {
- "awslogs-group": "/my/log/group",
- "awslogs-region": "us-east-1",
- "awslogs-stream-prefix": "prefect-flow-runs",
- "awslogs-create-group": "true",
- },
- },
- }],
- volumes=[
- {
- "name": "myEfsVolume",
- "efsVolumeConfiguration": {
- "fileSystemId": "my_efs_id",
- "transitEncryption": "ENABLED",
- "authorizationConfig": {
- "accessPointId": "my_efs_access_point",
- "iam": "ENABLED"
- }
- }
- }
- ]
- ),
-
-agent.start()
-```
-
-You can also pass these in using environment variables with the format of `containerDefinitions_`, for example:
-
-```
-containerDefinitions_environment
-containerDefinitions_secrets
-containerDefinitions_mountPoints
-```
-
-Accepted kwargs for [`run_task`](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.run_task):
-
-```
-cluster string
-count integer
-startedBy string
-group string
-placementConstraints list
-placementStrategy list
-platformVersion string
-networkConfiguration dict
-tags list
-enableECSManagedTags boolean
-propagateTags string
-```
-
-:::tip boto3 kwargs
-For more information on using Fargate with boto3 and to see the list of supported configuration options please visit the [relevant API documentation.](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html) Most importantly the functions [register_task_definition()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.register_task_definition)and [run_task()](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.run_task).
-:::
-
-All of these options can be provided at initialization of the `FargateAgent` class or through an environment variable. This means that the environment variables will need to be string representations of the values they represent.
-
-For example, the `networkConfiguration` kwarg accepts a dictionary and if provided through an environment variable it will need to be a string representation of that dictionary.
-
-```python
-networkConfiguration={
- "awsvpcConfiguration": {
- "assignPublicIp": "ENABLED",
- "subnets": ["my_subnet_id"],
- "securityGroups": []
- }
-}
-```
-
-```bash
-networkConfiguration="{'awsvpcConfiguration': {'assignPublicIp': 'ENABLED', 'subnets': ['my_subnet_id'], 'securityGroups': []}}"
-```
-
-:::warning Case Sensitive Environment Variables
-Please note that when setting the boto3 configuration for the `register_task_definition` and `run_task` the keys are case sensitive. For example: if setting placement constraints through an environment variable it must match boto3's case sensitive `placementConstraints`.
-:::
-
-#### External Kwargs
-
-By default, all of the kwargs mentioned above are passed in to the Agent configuration, which means that every flow inherits from them. There are use cases where you will want to use different attributes for different flows and that is supported through enabling `use_external_kwargs`.
-
-When enabled, the Agent will check for the existence of an external kwargs file from a bucket in S3. In order to use this feature you must also provide `external_kwargs_s3_bucket` and `external_kwargs_s3_key` to your Agent. If a file exists matching a set S3 key path, the Agent will apply these kwargs to the boto3 `register_task_definition` and `run_task` functions.
-
-External kwargs must be in `json` format.
-
-The S3 key path that will be used when fetching files is:
-
-```
-/slugified_flow_name>/.json
-```
-
-For example if the `external_kwargs_s3_key` is `prefect`, the flow name is `flow #1` and the flow ID is `a718df81-3376-4039-a1e6-cf5b79baa7d4` then your full s3 key path will be:
-
-```
-prefect/flow-1/a718df81.json
-```
-
-Below is an example S3 key patching to a particular flow:
-
-```python
-import os
-from slugify import slugify
-
-flow_id = flow.register(project_name="")
-s3_key = os.path.join('prefect-artifacts', slugify(flow.name), '{}.json'.format(flow_id[:8]))
-```
-
-This functionality requires the agent have a proper IAM policy for fetching objects from S3, here is an example:
-
-```
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Sid": "AllowListWorkBucket",
- "Effect": "Allow",
- "Action": [
- "s3:ListBucket",
- "s3:GetBucketLocation",
- "s3:GetBucketAcl"
- ],
- "Resource": ""
- },
- {
- "Sid": "AllowGetPutDeleteWorkObject",
- "Effect": "Allow",
- "Action": [
- "s3:GetObjectVersion",
- "s3:GetObject"
- ],
- "Resource": ""
- }
- ]
-}
-```
-
-External kwargs also support `containerDefinitions` mentioned above, which makes it easier support different environment variables, secrets, and mounted EFS volumes for different flows.
-
-Here is an example of the external kwargs json:
-
-```json
-{
- "networkConfiguration": {
- "awsvpcConfiguration": {
- "assignPublicIp": "ENABLED",
- "subnets": ["my_subnet_id"],
- "securityGroups": []
- }
- },
- "cpu": "256",
- "memory"": "512",
- "platformVersion": "1.4.0",
- "containerDefinitions": [{
- "environment": [{
- "name": "TEST_ENV",
- "value": "Success!"
- }],
- "secrets": [{
- "name": "TEST_SECRET",
- "valueFrom": "arn:aws:ssm:us-east-1:123456789101:parameter/test/test"
- }],
- "mountPoints": [{
- "sourceVolume": "myEfsVolume",
- "containerPath": "/data",
- "readOnly": false
- }]
- }],
- "volumes": [
- {
- "name": "myEfsVolume",
- "efsVolumeConfiguration": {
- "fileSystemId": "my_efs_id",
- "transitEncryption": "ENABLED",
- "authorizationConfig": {
- "accessPointId": "my_efs_access_point",
- "iam": "ENABLED"
- }
- }
- }
- ]
-}
-```
-
-#### Task Revisions
-
-By default, a new task definition is created each time there is a new flow version executed. However, ECS does offer the ability to apply changes through the use of revisions. The `enable_task_revisions` flag will enable using revisions by doing the following:
-
-- Use a slugified flow name for the task definition family name.
- For example, `flow #1` becomes `flow-1`.
-- Add a tag called `PrefectFlowId` and `PrefectFlowVersion` to enable proper lookup for existing revisions.
-
-This means that for each flow, the proper task definition, based on flow ID and version, will be used. If a new flow version is run, a new revision is added to the flow's task definition family. Your task definitions will now have this hierarchy:
-
-```
-
- - :
- - :
- - :
-```
-
-This functionality requires the agent have a proper IAM policy for creating task definition revisions and using the resource tagging API. Here is an example IAM policy:
-
-```
-{
- "Version": "2012-10-17",
- "Statement": [
- {
- "Sid": "ResourceAllItems",
- "Effect": "Allow",
- "Action": [
- "tag:Get*",
- "logs:PutLogEvents",
- "logs:CreateLogStream",
- "logs:CreateLogGroup",
- "events:PutTargets",
- "events:PutRule",
- "events:DescribeRule",
- "ecs:StopTask",
- "ecs:RegisterTaskDefinition",
- "ecs:Describe*",
- "ecr:GetDownloadUrlForLayer",
- "ecr:GetAuthorizationToken",
- "ecr:BatchGetImage",
- "ecr:BatchCheckLayerAvailability",
- "ec2:DescribeSubnets"
- ],
- "Resource": "*"
- },
- {
- "Sid": "EcsTaskRun",
- "Effect": "Allow",
- "Action": "ecs:RunTask",
- "Resource": "arn:aws:ecs:::task-definition/*",
- "Condition": {
- "ForAllValues:StringEquals": {
- "aws:TagKeys": [
- "PrefectFlowVersion",
- "PrefectFlowId"
- ]
- }
- }
- },
- {
- "Sid": "IamPassRole",
- "Effect": "Allow",
- "Action": "iam:PassRole",
- "Resource": "*"
- }
- ]
-}
-```
-
-### Configuration Examples
-
-Below are two minimal examples which specify information for connecting to boto3 as well as the task's resource requests and network configuration. The first example initializes a `FargateAgent` with kwargs passed in and the second example uses the Prefect CLI to start the Fargate Agent with kwargs being loaded from environment variables.
-
-#### Python Script
-
-```python
-from prefect.agent.fargate import FargateAgent
-
-agent = FargateAgent(
- aws_access_key_id="...",
- aws_secret_access_key="...",
- region_name="us-east-1",
- cpu="256",
- memory="512",
- networkConfiguration={
- "awsvpcConfiguration": {
- "assignPublicIp": "ENABLED",
- "subnets": ["my_subnet_id"],
- "securityGroups": []
- }
- }
-)
-
-agent.start()
-```
-
-#### Prefect CLI
-
-```bash
-$ export AWS_ACCESS_KEY_ID=...
-$ export AWS_SECRET_ACCESS_KEY=...
-$ export REGION_NAME=us-east-1
-$ export cpu=256
-$ export memory=512
-$ export networkConfiguration="{'awsvpcConfiguration': {'assignPublicIp': 'ENABLED', 'subnets': ['my_subnet_id'], 'securityGroups': []}}"
-
-$ prefect agent fargate start
-```
-
-:::warning Outbound Traffic
-If you encounter issues with Fargate raising errors in cases of client timeouts or inability to pull containers then you may need to adjust your `networkConfiguration`. Visit [this discussion thread](https://github.com/aws/amazon-ecs-agent/issues/1128#issuecomment-351545461) for more information on configuring AWS security groups.
-:::
-
-#### Prefect CLI Using Kwargs
-
-All configuration options for the Fargate Agent can also be provided to the `prefect agent fargate start` CLI command. They must match the camel casing used by boto3 but both the single kwarg as well as with the standard prefix of `--` are accepted. This means that `taskRoleArn=""` is the same as `--taskRoleArn=""`.
-
-```bash
-$ export AWS_ACCESS_KEY_ID=...
-$ export AWS_SECRET_ACCESS_KEY=...
-$ export REGION_NAME=us-east-1
-
-$ prefect agent fargate start cpu=256 memory=512 networkConfiguration="{'awsvpcConfiguration': {'assignPublicIp': 'ENABLED', 'subnets': ['my_subnet_id'], 'securityGroups': []}}"
-```
-
-Kwarg values can also be provided through environment variables. This is useful in situations where case sensitive environment variables are desired or when using templating tools like Terraform to deploy your Agent.
-
-```bash
-$ export AWS_ACCESS_KEY_ID=...
-$ export AWS_SECRET_ACCESS_KEY=...
-$ export REGION_NAME=us-east-1
-$ export CPU=256
-$ export MEMORY=512
-$ export NETWORK_CONFIGURATION="{'awsvpcConfiguration': {'assignPublicIp': 'ENABLED', 'subnets': ['my_subnet_id'], 'securityGroups': []}}"
-
-$ prefect agent fargate start cpu=$CPU memory=$MEMORY networkConfiguration=$NETWORK_CONFIGURATION
-```
diff --git a/docs/orchestration/concepts/api_keys.md b/docs/orchestration/concepts/api_keys.md
index 5d4006edfcae..4a5675c6ad15 100644
--- a/docs/orchestration/concepts/api_keys.md
+++ b/docs/orchestration/concepts/api_keys.md
@@ -61,9 +61,9 @@ You may also provide your key with an environment variable or the config. This i
::: tab Environment
```bash
-export PREFECT__CLOUD__API_KEY=""
+$ export PREFECT__CLOUD__API_KEY=""
# Optional
-export PREFECT__CLOUD__TENANT_ID=""
+$ export PREFECT__CLOUD__TENANT_ID=""
```
:::
@@ -88,13 +88,13 @@ tenant_id = ""
Agents will load keys from these default locations as described above, but you can also pass an override directly to the agent when you start it. For example:
```bash
-prefect agent local start --key ""
+$ prefect agent local start --key ""
```
:::
## Querying for API key metadata
-Your API key metadata can be viewed in serveral ways. Note that we _do not store_ your API keys and you will not be able to view the value of the key after creation. When querying for keys, you will only be able to see metadata for keys created by your user or, if the you are a tenant admin, metadata for the all service account API keys in the tenant.
+Your API key metadata can be viewed in several ways. Note that we _do not store_ your API keys and you will not be able to view the value of the key after creation. When querying for keys, you will only be able to see metadata for keys created by your user or, if the you are a tenant admin, metadata for the all service account API keys in the tenant.
:::: tabs
@@ -176,7 +176,7 @@ To revoke an API key in the UI navigate to Team Settings > Service Accounts or A
To revoke an API key from the Prefect CLI, use the `prefect auth revoke-key` command. You will likely need to retrieve the ID of they key with `prefect auth list-keys` first.
```bash
-prefect auth revoke-key --id API_KEY_ID
+$ prefect auth revoke-key --id API_KEY_ID
```
:::
@@ -199,19 +199,33 @@ mutation {
## Using API keys with older versions of Prefect
-The `prefect auth login` command will not work with API keys and the `PREFECT__CLOUD__API_KEY` setting will be ignored before version 0.15.0. In older versions, there were authentication tokens. Keys can be used in-place in older versions by setting them in the config or the environment in the `PREFECT__CLOUD__AUTH_TOKEN` setting.
+::: warning
+As of version 1.0.0, API tokens are no longer supported as an authentication method.
-Using an API key as a token for registering flows
+This section describes how you can use API keys for authentication in place of how you may have previously used tokens.
+
+Note that, if you have logged in with an API key, but a token still exists on your machine, the API key will be used and the token will be ignored.
+:::
+
+If you are running a version of Prefect older than 0.15.0, note that:
+
+- The `prefect auth login` CLI command will not work with API keys.
+- The `PREFECT__CLOUD__API_KEY` setting will be ignored.
+
+
+In most cases you can use API keys as you previously used tokens. Here are a few examples where API keys are used in place of tokens.
+
+Using an API key as a token for registering flows:
```bash
-export PREFECT__CLOUD__AUTH_TOKEN=""
+$ export PREFECT__CLOUD__AUTH_TOKEN=""
```
-Using an API key as a token for starting an agent by CLI
+Using an API key as a token for starting an agent by CLI:
```bash
$ prefect agent local start -k ""
```
-Using an API key as a token for starting an agent by environment
+Using an API key as a token for starting an agent by environment:
```bash
$ export PREFECT__CLOUD__AGENT__AUTH_TOKEN=""
$ prefect agent local start
@@ -219,14 +233,22 @@ $ prefect agent local start
## Removing API tokens
-If you've used `prefect auth login` with an API token or have set an API token in your config or environment, you will receieve warnings starting with version 0.15.0 that tokens have been deprecated. API keys are more secure and simpler to use, we urge you to switch over.
+As of version 1.0.0, API tokens are no longer supported.
-If you logged in with `prefect auth login`, you can remove your token with `prefect auth logout --token` or `rm -r ~/.prefect/client`.
+If you used `prefect auth login` with an API token or had set an API token in your config or environment, you would have received warnings starting with version 0.15.0.
+
+`prefect auth status` will warn about existing authentication tokens and advise on removal.
+
+If you logged in with `prefect auth login`, you can remove your token with the CLI command:
+
+```bash
+$ prefect auth purge-tokens
+```
+
+You can remove the tokens manually by using the command `rm -r ~/.prefect/client`.
If you set your token in the environment, you can unset it with `unset PREFECT__CLOUD__AUTH_TOKEN`.
If you set your token in the config, you will have to modify `~/.prefect/config.toml` to remove it.
-::: warning
-If you have logged in with an API key but a token still exists on your machine, the API key will be used and the token will be ignored.
-:::
+If you have logged in with an API key, but a token still exists on your machine, the API key will be used and the token will be ignored.
diff --git a/docs/orchestration/concepts/cloud_hooks.md b/docs/orchestration/concepts/cloud_hooks.md
index 4273e8dd7214..9018900d67a1 100644
--- a/docs/orchestration/concepts/cloud_hooks.md
+++ b/docs/orchestration/concepts/cloud_hooks.md
@@ -69,7 +69,7 @@ The Messaging Service SID requires you to have a messaging service set up. These
5. Give your project a name and check the settings.
6. Click on 'Numbers' and add a number to your account. (This is not the number your messages will get sent to so you don't need to enter this in the Prefect Cloud Hooks form.)
-7. Your Mesaging Service SID is the Service SID in the Settings page.
+7. Your Messaging Service SID is the Service SID in the Settings page.
### Pager Duty Cloud Hook
diff --git a/docs/orchestration/concepts/secrets.md b/docs/orchestration/concepts/secrets.md
index 7687f21ea797..86dbc5c813d0 100644
--- a/docs/orchestration/concepts/secrets.md
+++ b/docs/orchestration/concepts/secrets.md
@@ -77,7 +77,7 @@ $ export PREFECT__CONTEXT__SECRETS__MYSECRET="MY SECRET VALUE"
Note that this configuration only affects the environment in which it's
configured. So if you set values locally, they'll affect flows run locally or
-via a [local agent](/orchetration/agents/local.md), but _not_ flows deployed
+via a [local agent](/orchestration/agents/local.md), but _not_ flows deployed
via other agents (since those flow runs happen in a different environment). To
set local secrets on flow runs deployed by an agent, you can use the `--env`
flag to forward environment variables into the flow run environment.
diff --git a/docs/orchestration/execution/custom_environment.md b/docs/orchestration/execution/custom_environment.md
index 238ee3212bb1..2816f70f3bae 100644
--- a/docs/orchestration/execution/custom_environment.md
+++ b/docs/orchestration/execution/custom_environment.md
@@ -1,10 +1,7 @@
# Custom Environment
::: warning
-Flows configured with environments are being deprecated - we recommend users
-transition to using "Run Configs" instead. See [flow
-configuration](/orchestration/flow_config/overview.md) and [upgrading
-tips](/orchestration/flow_config/upgrade.md) for more information.
+Flows configured with environments are no longer supported. We recommend users transition to using [RunConfig](/orchestration/flow_config/run_configs.html) instead. See the [Flow Configuration](/orchestration/flow_config/overview.md) and [Upgrading](/orchestration/flow_config/upgrade.md) documentation for more information.
:::
[[toc]]
diff --git a/docs/orchestration/execution/dask_cloud_provider_environment.md b/docs/orchestration/execution/dask_cloud_provider_environment.md
index effc9f398429..99c28a60868d 100644
--- a/docs/orchestration/execution/dask_cloud_provider_environment.md
+++ b/docs/orchestration/execution/dask_cloud_provider_environment.md
@@ -1,10 +1,7 @@
# Dask Cloud Provider Environment
::: warning
-Flows configured with environments are being deprecated - we recommend users
-transition to using "Run Configs" instead. See [flow
-configuration](/orchestration/flow_config/overview.md) and [upgrading
-tips](/orchestration/flow_config/upgrade.md) for more information.
+Flows configured with environments are no longer supported. We recommend users transition to using [RunConfig](/orchestration/flow_config/run_configs.html) instead. See the [Flow Configuration](/orchestration/flow_config/overview.md) and [Upgrading](/orchestration/flow_config/upgrade.md) documentation for more information.
:::
[[toc]]
diff --git a/docs/orchestration/execution/dask_k8s_environment.md b/docs/orchestration/execution/dask_k8s_environment.md
index ea1c31dca44c..e7b6ac369fed 100644
--- a/docs/orchestration/execution/dask_k8s_environment.md
+++ b/docs/orchestration/execution/dask_k8s_environment.md
@@ -1,10 +1,7 @@
# Dask Kubernetes Environment
::: warning
-Flows configured with environments are being deprecated - we recommend users
-transition to using "Run Configs" instead. See [flow
-configuration](/orchestration/flow_config/overview.md) and [upgrading
-tips](/orchestration/flow_config/upgrade.md) for more information.
+Flows configured with environments are no longer supported. We recommend users transition to using [RunConfig](/orchestration/flow_config/run_configs.html) instead. See the [Flow Configuration](/orchestration/flow_config/overview.md) and [Upgrading](/orchestration/flow_config/upgrade.md) documentation for more information.
:::
[[toc]]
diff --git a/docs/orchestration/execution/fargate_task_environment.md b/docs/orchestration/execution/fargate_task_environment.md
index 8001b4d405a0..08aa3e093fa6 100644
--- a/docs/orchestration/execution/fargate_task_environment.md
+++ b/docs/orchestration/execution/fargate_task_environment.md
@@ -1,10 +1,7 @@
# Fargate Task Environment
::: warning
-Flows configured with environments are being deprecated - we recommend users
-transition to using "Run Configs" instead. See [flow
-configuration](/orchestration/flow_config/overview.md) and [upgrading
-tips](/orchestration/flow_config/upgrade.md) for more information.
+Flows configured with environments are no longer supported. We recommend users transition to using [RunConfig](/orchestration/flow_config/run_configs.html) instead. See the [Flow Configuration](/orchestration/flow_config/overview.md) and [Upgrading](/orchestration/flow_config/upgrade.md) documentation for more information.
:::
[[toc]]
@@ -19,9 +16,9 @@ _For more information on the Fargate Task Environment visit the relevant [API do
#### Initialization
-The `FargateTaskEnvironment` has two groups of keyword arguments: boto3-related arguments and task-related arguments. All of this configuration revolves around how the [boto3]() library communicates with AWS. The design of this Environment is meant to be open to all access methodologies for AWS instead of adhering to a single mode of authentication.
+The `FargateTaskEnvironment` has two groups of keyword arguments: boto3-related arguments and task-related arguments. All of this configuration revolves around how the boto3 library communicates with AWS. The design of this Environment is meant to be open to all access methodologies for AWS instead of adhering to a single mode of authentication.
-This Environment accepts similar arguments to how boto3 authenticates with AWS: `aws_access_key_id`, `aws_secret_access_key`, `aws_session_token`, and `region_name`. These arguments are directly passed to the [boto3 client]() which means you should initialize this Environment in the same way you would normally use boto3.
+This Environment accepts similar arguments to how boto3 authenticates with AWS: `aws_access_key_id`, `aws_secret_access_key`, `aws_session_token`, and `region_name`. These arguments are directly passed to the boto3 client which means you should initialize this Environment in the same way you would normally use boto3.
The other group of kwargs are those you would pass into boto3 for [registering](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.register_task_definition) a task definition and [running](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.run_task) that task.
diff --git a/docs/orchestration/execution/k8s_job_environment.md b/docs/orchestration/execution/k8s_job_environment.md
index cba3d1c2d26f..905db92a3d79 100644
--- a/docs/orchestration/execution/k8s_job_environment.md
+++ b/docs/orchestration/execution/k8s_job_environment.md
@@ -1,10 +1,7 @@
# Kubernetes Job Environment
::: warning
-Flows configured with environments are being deprecated - we recommend users
-transition to using "Run Configs" instead. See [flow
-configuration](/orchestration/flow_config/overview.md) and [upgrading
-tips](/orchestration/flow_config/upgrade.md) for more information.
+Flows configured with environments are no longer supported. We recommend users transition to using [RunConfig](/orchestration/flow_config/run_configs.html) instead. See the [Flow Configuration](/orchestration/flow_config/overview.md) and [Upgrading](/orchestration/flow_config/upgrade.md) documentation for more information.
:::
[[toc]]
diff --git a/docs/orchestration/execution/local_environment.md b/docs/orchestration/execution/local_environment.md
index a8d369023121..9e5dd8f58260 100644
--- a/docs/orchestration/execution/local_environment.md
+++ b/docs/orchestration/execution/local_environment.md
@@ -1,10 +1,7 @@
# Local Environment
::: warning
-Flows configured with environments are being deprecated - we recommend users
-transition to using "Run Configs" instead. See [flow
-configuration](/orchestration/flow_config/overview.md) and [upgrading
-tips](/orchestration/flow_config/upgrade.md) for more information.
+Flows configured with environments are no longer supported. We recommend users transition to using [RunConfig](/orchestration/flow_config/run_configs.html) instead. See the [Flow Configuration](/orchestration/flow_config/overview.md) and [Upgrading](/orchestration/flow_config/upgrade.md) documentation for more information.
:::
[[toc]]
diff --git a/docs/orchestration/execution/overview.md b/docs/orchestration/execution/overview.md
index ea3638976a12..a534e10621e0 100644
--- a/docs/orchestration/execution/overview.md
+++ b/docs/orchestration/execution/overview.md
@@ -1,10 +1,9 @@
-# Execution Overview
+# Environments Overview
::: warning
-Flows configured with environments are being deprecated - we recommend users
-transition to using "Run Configs" instead. See [flow
-configuration](/orchestration/flow_config/overview.md) and [upgrading
-tips](/orchestration/flow_config/upgrade.md) for more information.
+Flows configured with environments are no longer supported. We recommend users transition to using [RunConfig](/orchestration/flow_config/run_configs.html) instead. See the [Flow Configuration](/orchestration/flow_config/overview.md) and [Upgrading](/orchestration/flow_config/upgrade.md) documentation for more information.
+
+See [Storage](/orchestration/flow_config/storage.html) for current Flow definition storage capabilities.
:::
Executing flows using the Prefect API is accomplished through two powerful abstractions — storage and environments. By combining these two abstractions, flows can be saved, shared, and executed across various platforms.
@@ -13,7 +12,7 @@ Executing flows using the Prefect API is accomplished through two powerful abstr
## Storage
-[Storage](https://docs.prefect.io/api/latest/storage.html) objects are pieces of functionality which define how and where a Flow should be stored. Prefect supports storage options ranging from ephemeral in-memory storage to Docker images which can be stored in registries.
+[Storage](/api/latest/storage.html) objects define how and where a Flow should be stored. Prefect supports many [storage types](/orchestration/flow_config/storage.html#storage-types) ranging from local storage to Docker containers, code repositories including GitHub, and cloud storage with AWS, Azure, and Google Cloud.
### How Storage is Used
@@ -25,7 +24,7 @@ from prefect.storage import Docker
f = Flow("example-storage", storage=Docker(registry_url="prefecthq/storage-example"))
```
-or assign it directly:
+Or assign it directly:
```python
from prefect.storage import Docker
@@ -67,7 +66,7 @@ f.register("My First Project", build=False)
## Environments
-While Storage objects provide a way to save and retrieve Flows, [Environments](https://docs.prefect.io/api/latest/environments/execution.html) specify _how your Flow should be run_ e.g., which executor to use and whether there are any auxiliary infrastructure requirements for your Flow's execution. For example, if you want to run your Flow on Kubernetes using an auto-scaling Dask cluster then you're going to want to use an environment for that!
+While Storage objects provide a way to save and retrieve Flows, [Environments](/api/latest/environments/execution.html) specify _how your Flow should be run_ e.g., which executor to use and whether there are any auxiliary infrastructure requirements for your Flow's execution. For example, if you want to run your Flow on Kubernetes using an auto-scaling Dask cluster then you're going to want to use an environment for that!
### How Environments are Used
@@ -94,7 +93,7 @@ f.environment = LocalEnvironment(executor=DaskExecutor())
### Setup & Execute
-The two main environment functions are `setup` and `execute`. The `setup` function is responsible for creating or prepping any infrastructure requirements before the Flow is executed e.g., spinning up a Dask cluster or checking available platform resources. The `execute` function is responsible for actually telling the Flow where and how it needs to run e.g., running the Flow in process, as per the [`LocalEnvironment`](https://docs.prefect.io/api/latest/environments/execution.html##localenvironment), or registering a new Fargate task, as per the [`FargateTaskEnvironment`](https://docs.prefect.io/api/latest/environments/execution.html#fargatetaskenvironment).
+The two main environment functions are `setup` and `execute`. The `setup` function is responsible for creating or prepping any infrastructure requirements before the Flow is executed e.g., spinning up a Dask cluster or checking available platform resources. The `execute` function is responsible for actually telling the Flow where and how it needs to run e.g., running the Flow in process, as per the [`LocalEnvironment`](/api/latest/environments/execution.html##localenvironment), or registering a new Fargate task, as per the [`FargateTaskEnvironment`](/api/latest/environments/execution.html#fargatetaskenvironment).
### Environment Callbacks
diff --git a/docs/orchestration/execution/storage_options.md b/docs/orchestration/execution/storage_options.md
index fdbf972710d4..0c2a839ba6ac 100644
--- a/docs/orchestration/execution/storage_options.md
+++ b/docs/orchestration/execution/storage_options.md
@@ -1,10 +1,9 @@
# Storage Options
::: warning
-Flows configured with environments are being deprecated - we recommend users
-transition to using "Run Configs" instead. See [flow
-configuration](/orchestration/flow_config/overview.md) and [upgrading
-tips](/orchestration/flow_config/upgrade.md) for more information.
+Flows configured with environments are no longer supported. We recommend users transition to using [RunConfig](/orchestration/flow_config/run_configs.html) instead. See the [Flow Configuration](/orchestration/flow_config/overview.md) and [Upgrading](/orchestration/flow_config/upgrade.md) documentation for more information.
+
+See [Storage](/orchestration/flow_config/storage.html) for current Flow definition storage capabilities.
:::
Prefect includes a variety of `Storage` options for saving flows.
@@ -61,7 +60,7 @@ In more recent releases of Core your flow will default to using a `AzureResult`
:::
:::tip Azure Credentials
-Azure Storage uses an Azure [connection string](https://docs.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string) for Azure authentication in aim to upload (build) or download flows, so make sure to provide a valid connection string for your Azure account. A connection string can be set as a [secret](https://docs.prefect.io/orchestration/concepts/secrets.html#secrets) or an environment variable `AZURE_STORAGE_CONNECTION_STRING` in run configuration if it is not passed as `connection_string_secret`.
+Azure Storage uses an Azure [connection string](https://docs.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string) for Azure authentication in aim to upload (build) or download flows, so make sure to provide a valid connection string for your Azure account. A connection string can be set as a [secret](/orchestration/concepts/secrets.html#secrets) or an environment variable `AZURE_STORAGE_CONNECTION_STRING` in run configuration if it is not passed as `connection_string_secret`.
:::
## AWS S3
diff --git a/docs/orchestration/faq/config.md b/docs/orchestration/faq/config.md
index 318716962166..b592b4056c1c 100644
--- a/docs/orchestration/faq/config.md
+++ b/docs/orchestration/faq/config.md
@@ -1,6 +1,6 @@
# Configuration Options
-A full list of configuation options can be seen in the Prefect [config.toml](https://github.com/PrefectHQ/prefect/blob/master/src/prefect/config.toml). To update configuration settings you can update them in `./prefect/config.toml` or by setting [environment variables](/core/concepts/configuration.html#environment-variables).
+A full list of configuration options can be seen in the Prefect [config.toml](https://github.com/PrefectHQ/prefect/blob/master/src/prefect/config.toml). To update configuration settings you can update them in `./prefect/config.toml` or by setting [environment variables](/core/concepts/configuration.html#environment-variables).
For more on configuration, you can also see the [Prefect Core configuration docs](/core/concepts/configuration.html).
@@ -20,7 +20,7 @@ PREFECT__SERVER__HOST
PREFECT__SERVER__PORT
```
-## Running Prefect with a pre-exisiting postgres database
+## Running Prefect with a pre-existing postgres database
If you are running Prefect Server and have a postgres instance set up elsewhere then providing a `server.database.connection_url` or `server.database.host` will allow you to connect to it:
```
diff --git a/docs/orchestration/flow-runs/concurrency-limits.md b/docs/orchestration/flow-runs/concurrency-limits.md
index d2ddaa933ba9..753c6c59ef85 100644
--- a/docs/orchestration/flow-runs/concurrency-limits.md
+++ b/docs/orchestration/flow-runs/concurrency-limits.md
@@ -6,7 +6,7 @@ Setting global concurrency limits is a feature of Prefect Cloud's Standard Tier.
## Flow run limits
-Sometimes, you want to limit the number of flow runs executing simulatneously. For example, you may have an agent on a machine that cannot handle the load of many flow runs.
+Sometimes, you want to limit the number of flow runs executing simultaneously. For example, you may have an agent on a machine that cannot handle the load of many flow runs.
Prefect Cloud provides functionality to limit the number of simultaneous flow runs. This limit is based on [flow run labels](../agents/overview.md#labels). Flow runs can be given as many labels as you wish, and each label can be provided a concurrency limit. If a flow has multiple labels, it will only run if _all_ the labels have available concurrency. Flow run label concurrency limits are enforced globally across your entire team, and labels without explicit limits are considered to have unlimited concurrency.
@@ -93,7 +93,7 @@ query {
}
```
-You can query for specific labels, as shown above, or retrieve _all_ of your flow conurrency limits:
+You can query for specific labels, as shown above, or retrieve _all_ of your flow concurrency limits:
```graphql
query {
@@ -165,7 +165,7 @@ You can edit and remove the concurrency limit of tags at any time. Select the bl
::: tab Python client
-To update your tag concurrency limits programatically, use the Prefect library client:
+To update your tag concurrency limits programmatically, use the Prefect library client:
```python
from prefect import Client
diff --git a/docs/orchestration/flow-runs/inspection.md b/docs/orchestration/flow-runs/inspection.md
index d42d3b2f8a54..643a3e7cb9e4 100644
--- a/docs/orchestration/flow-runs/inspection.md
+++ b/docs/orchestration/flow-runs/inspection.md
@@ -4,28 +4,6 @@ For monitoring flow runs from the UI, see the [UI documentation on flow runs](..
## Python
-::: warning Experimental
-
-
-
-
-The functionality here is experimental, and may change between versions without notice. Use at your own risk.
-
-
-:::
-
The Prefect Core library provides an object for inspecting flow runs without writing queries at `prefect.backend.FlowRunView`.
### Creating a `FlowRunView`
diff --git a/docs/orchestration/flow-runs/overview.md b/docs/orchestration/flow-runs/overview.md
index 7cc9cbb8fc20..f0cd87f6de45 100644
--- a/docs/orchestration/flow-runs/overview.md
+++ b/docs/orchestration/flow-runs/overview.md
@@ -33,7 +33,7 @@ Or... keep reading for an overview of how to inspect flow runs.
Flow run information is sent to Prefect's backend during the run and persists there after the run completes. The Prefect GraphQL API allows you to craft queries that retrieve exactly the information you need about any flow run. We also provide tooling in the Prefect Core library to simplify common access patterns.
-- For programatic inspection of flow runs, see the [Python flow run documentation](./inspection#prefect-library).
+- For programmatic inspection of flow runs, see the [Python flow run documentation](./inspection#prefect-library).
- For customized GraphQL queries for flow run data, see [the documentation on query for flow runs](./inspection#graphql).
- For monitoring flow runs from the UI, see the [UI documentation on flow runs](../ui/flow-run.md).
@@ -45,7 +45,7 @@ Each flow contains tasks which actually do the _work_ of your flow. The state of
Similarly to flow runs, task runs can be inspected with various methods
-- For programatic inspection of task runs, see the [Python task run documentation](./task-runs.md#prefect-libary).
+- For programmatic inspection of task runs, see the [Python task run documentation](./task-runs.md#prefect-library).
- For passing data from one flow to another flow, see the [`get_task_run_result` task documentation](./task-runs.md#task).
- For customized GraphQL queries for task run data, see [the documentation on query for task runs](./task-runs.md#graphql).
- For monitoring task runs from the UI, see the [UI documentation on task runs](../ui/task-run.md).
diff --git a/docs/orchestration/flow-runs/task-runs.md b/docs/orchestration/flow-runs/task-runs.md
index f93375eced54..048b2eed64ef 100644
--- a/docs/orchestration/flow-runs/task-runs.md
+++ b/docs/orchestration/flow-runs/task-runs.md
@@ -10,28 +10,6 @@ Prefect does not store the _results_ of your task runs. The data that your task
### Python
-::: warning Experimental
-
-
-
-
-The functionality here is experimental, and may change between versions without notice. Use at your own risk.
-
-
-:::
-
The Prefect Core library provides an object for inspecting task runs without writing queries at `prefect.backend.TaskRunView`.
#### Creating a `TaskRunView`
@@ -83,7 +61,7 @@ task_run = TaskRunView.from_task_run_id("c8751f34-9d5e-4ea7-aead-8b50978dabb7")
#### Task run results
-Results from task runs are persisted to the location you've specified in the task's `result` attribute. The `Result` type allows you to store task results in many locations on your own infrastrucuture. See the [results documentation](/core/concepts/results.md) for more details on configuring results.
+Results from task runs are persisted to the location you've specified in the task's `result` attribute. The `Result` type allows you to store task results in many locations on your own infrastructure. See the [results documentation](/core/concepts/results.md) for more details on configuring results.
`TaskRunView` provides a `get_result` method which will load and cache the return value of your task from the result location.
diff --git a/docs/orchestration/flow_config/docker.md b/docs/orchestration/flow_config/docker.md
index 58be1677136c..7e9a55fa2016 100644
--- a/docs/orchestration/flow_config/docker.md
+++ b/docs/orchestration/flow_config/docker.md
@@ -62,13 +62,13 @@ a few different tag options:
| ---------------- | :----------------------: | -------------: |
| latest | most recent PyPi version | 3.7 |
| master | master build | 3.7 |
+| latest-python3.9 | most recent PyPi version | 3.9 |
| latest-python3.8 | most recent PyPi version | 3.8 |
| latest-python3.7 | most recent PyPi version | 3.7 |
-| latest-python3.6 | most recent PyPi version | 3.6 |
| X.Y.Z | X.Y.Z | 3.7 |
+| X.Y.Z-python3.9 | X.Y.Z | 3.9 |
| X.Y.Z-python3.8 | X.Y.Z | 3.8 |
| X.Y.Z-python3.7 | X.Y.Z | 3.7 |
-| X.Y.Z-python3.6 | X.Y.Z | 3.6 |
| core | most recent PyPi version | 3.8 |
| core-X.Y.Z | X.Y.Z | 3.8 |
diff --git a/docs/orchestration/flow_config/executors.md b/docs/orchestration/flow_config/executors.md
index c01423b94c2b..a6bf1b46fa55 100644
--- a/docs/orchestration/flow_config/executors.md
+++ b/docs/orchestration/flow_config/executors.md
@@ -29,7 +29,7 @@ Prefect's different executors have different performance (and complexity)
characteristics. Choosing a good configuration can greatly improve your flow's
performance. Here's some general recommendations:
-- If your flow already runs "fast enough", or doesn't have opportunites for
+- If your flow already runs "fast enough", or doesn't have opportunities for
parallelism (e.g. mapped tasks) you should use the
[LocalExecutor](#localexecutor). It's the simplest option, and will be the
easiest to manage.
diff --git a/docs/orchestration/flow_config/storage.md b/docs/orchestration/flow_config/storage.md
index 015e306f844d..f7e11956eec7 100644
--- a/docs/orchestration/flow_config/storage.md
+++ b/docs/orchestration/flow_config/storage.md
@@ -260,7 +260,7 @@ GCS Storage.
[Git Storage](/api/latest/storage.md#git) is a storage option for referencing flows
stored in a git repository as `.py` files.
-This storage class uses underlying git protocol instead of specific client libaries (e.g. `PyGithub` for GitHub), superseding other git based storages.
+This storage class uses underlying git protocol instead of specific client libraries (e.g. `PyGithub` for GitHub), superseding other git based storages.
```python
from prefect import Flow
diff --git a/docs/orchestration/flow_config/upgrade.md b/docs/orchestration/flow_config/upgrade.md
index d941c54db914..5b36fc18b427 100644
--- a/docs/orchestration/flow_config/upgrade.md
+++ b/docs/orchestration/flow_config/upgrade.md
@@ -2,8 +2,8 @@
Prefect 0.14.0 included a new Flow configuration system based on
[RunConfig](./run_configs.md) objects. This replaces the previous system based
-on [Environment](/orchestration/execution/overview.md) objects, with
-`Environment` based configuration being deprecated.
+on `Environment` objects, with
+`Environment` based configuration being deprecated in 0.14.0 and removed in 1.0.0.
If you never configured `flow.environment` explicitly on your flow, your
upgrade process should be seamless. Your flows will automatically transition to
@@ -13,10 +13,6 @@ If you did set an `Environment` explicitly on a flow, you'll want to transition
your flows to use an equivalent `RunConfig`. Below we'll outline a few common
environment setups, and their equivalents using run-configs.
-*Note that while `Environment` based configuration is deprecated, support for
-environments will stick around for several versions. Your old flows should
-continue to run fine, giving you time to figure out a good transition plan.*
-
## LocalEnvironment
`LocalEnvironment` was the default environment in previous versions of Prefect.
@@ -27,7 +23,7 @@ using the `LocalAgent` (it worked with any agent). This also meant that the
`LocalEnvironment` couldn't easily contain any platform-specific configuration.
In contrast, [RunConfig](./run_configs.md) objects correspond to a specific
-agent type (e.g. `LocalRun` for `LocalAgent`, `KuberenetesRun` for
+agent type (e.g. `LocalRun` for `LocalAgent`, `KubernetesRun` for
`KubernetesAgent`, ...), and contain platform-specific configuration options
(e.g. `image`, ...). The exception to this is
[UniversalRun](./run_configs.md#universalrun), which works with any agent (but
@@ -143,8 +139,7 @@ ECS tasks. There are also options for common settings (e.g. `image`, `cpu`,
for more information.
Note that use of `ECSRun` requires running an [ECS
-Agent](/orchestration/agents/ecs.md), not the deprecated [Fargate
-Agent](/orchestration/agents/fargate.md).
+Agent](/orchestration/agents/ecs.md), not the removed Fargate Agent.
- If you configured an `Executor` on your `FargateTaskEnvironment`, move that
setting to the flow itself.
diff --git a/docs/orchestration/getting-started/install.md b/docs/orchestration/getting-started/install.md
index 863b63b5fdc2..c65b32e5492c 100644
--- a/docs/orchestration/getting-started/install.md
+++ b/docs/orchestration/getting-started/install.md
@@ -2,7 +2,7 @@
## Basic installation
-Prefect requires Python 3.6+. If you're new to Python, we recommend installing the [Anaconda distribution](https://www.anaconda.com/distribution/).
+Prefect requires Python 3.7+. If you're new to Python, we recommend installing the [Anaconda distribution](https://www.anaconda.com/distribution/).
To install Prefect, run:
@@ -33,10 +33,6 @@ pipenv install --pre prefect
::::
-:::warning Python 3.9
-Prefect support for Python 3.9 is experimental and extras are not expected to work yet as we wait for required packages to be updated.
-:::
-
## Installing optional dependencies
Prefect ships with a number of optional dependencies, which can be installed using "extras" syntax:
diff --git a/docs/orchestration/getting-started/next-steps.md b/docs/orchestration/getting-started/next-steps.md
index c6cadf502dd3..71c56d218f88 100644
--- a/docs/orchestration/getting-started/next-steps.md
+++ b/docs/orchestration/getting-started/next-steps.md
@@ -43,7 +43,7 @@ old version and register a new version using the new code.
## Execute a Flow Run
-As in the [previous section](./registerings-and-running-a-flow.md#execute-a-flow-run), you can execute a
+As in the [previous section](./registering-and-running-a-flow.md#execute-a-flow-run), you can execute a
flow run using the "Quick Run" button in the UI. Make sure you still have your
Agent running [from before](./registering-and-running-a-flow.md#start-an-agent).
diff --git a/docs/orchestration/rbac/overview.md b/docs/orchestration/rbac/overview.md
index c7b7c1029ec0..1aef2ee6f406 100644
--- a/docs/orchestration/rbac/overview.md
+++ b/docs/orchestration/rbac/overview.md
@@ -76,7 +76,7 @@ mutation {
Basic and custom roles can be assigned in the Prefect UI to [users](https://cloud.prefect.io/team/members) and [service accounts](https://cloud.prefect.io/team/service-accounts). When inviting a new user to your team, you can specify a role to assign them.
-Roles can also be assigned programatically via the GraphQL API.
+Roles can also be assigned programmatically via the GraphQL API.
```graphql
mutation {
diff --git a/docs/orchestration/recipes/configuring_storage.md b/docs/orchestration/recipes/configuring_storage.md
index 92ac86f667da..f07e88442e07 100644
--- a/docs/orchestration/recipes/configuring_storage.md
+++ b/docs/orchestration/recipes/configuring_storage.md
@@ -36,7 +36,7 @@ If we attempt a dry-run build of this docker image by calling `storage.build()`,
Without going into unnecessary detail, this is because the default base image for Prefect Flows is minimal and doesn't include whatever non-Python bindings the `pyodbc` package requires. To add such dependencies, we will need to configure an appropriate base image to use for our Flow. For both reference and completeness, the following [Dockerfile](https://docs.docker.com/engine/reference/builder/) will build a base image that allows our Flow to connect to Microsoft SQL Server through `pyodbc`:
```
-FROM prefecthq/prefect:0.7.1-python3.6
+FROM prefecthq/prefect:latest-python3.7
# install some base utilities
RUN apt update && apt install build-essential -y build-essential unixodbc-dev && rm -rf /var/lib/apt/lists/*
@@ -57,7 +57,7 @@ RUN sed -i 's/TLSv1\.2/TLSv1.0/g' /etc/ssl/openssl.cnf
RUN sed -i 's/DEFAULT@SECLEVEL=2/DEFAULT@SECLEVEL=1/g' /etc/ssl/openssl.cnf
```
-Note that we used `python3.6` above, but you should attempt to match the version of Python you used in building your flow.
+Note that we used `python3.7` above, but you should attempt to match the version of Python you used in building your flow.
::: tip What types of Docker images are allowed as base images?
Note that the _only_ universal requirement for your Flow's Docker images are that the Prefect python package can be installed into them (note that Prefect will attempt to install itself at build time if your base image doesn't already have it installed).
:::
@@ -87,7 +87,7 @@ storage = Docker(registry_url="gcr.io/dev/",
### Including other Python scripts
-Another common situation is when your Flow imports objects or functions from other Python files that are not included in a publicly available Python package. Unsurprisingly, your Flow will need to be able to make the same imports within your Docker image. In order to accomodate this, you generally have two options:
+Another common situation is when your Flow imports objects or functions from other Python files that are not included in a publicly available Python package. Unsurprisingly, your Flow will need to be able to make the same imports within your Docker image. In order to accommodate this, you generally have two options:
1. Package your scripts up into a true [Python package](https://realpython.com/python-modules-packages/). You will most likely need to use the `COPY` instruction to put your package into the image, and then the `RUN` instruction to install it. This pattern will generally require using an intermediate base image so that you have full control over your [docker build context](https://docs.docker.com/develop/develop-images/dockerfile_best-practices/).
2. Use the `files` keyword argument to Prefect's Docker storage object to copy individual files into your image, and then add these files to your image's `PYTHONPATH` environment variable (either through the `env_vars` keyword argument or by building a base image and using the `ENV` docker instruction). This ensures these scripts can be imported from regardless of the present working directory of your Flow.
diff --git a/docs/orchestration/tutorial/first.md b/docs/orchestration/tutorial/first.md
deleted file mode 100644
index 75e51bcefc34..000000000000
--- a/docs/orchestration/tutorial/first.md
+++ /dev/null
@@ -1,138 +0,0 @@
-# First Flow
-
-Now that your environment is setup, it's time to deploy your first Flow.
-
-## Creating a Project
-
-Before we can register a flow with the Prefect Backend, we first need to create
-a _Project_. Similar to a directory in a filesystem, Prefect organizes flows
-into projects, where each flow belongs to exactly one project.
-
-Projects can be created using the UI through either the project filter on the
-[dashboard](/orchestration/ui/dashboard) page, or in the [project settings
-page](/orchestration/ui/team-settings.md#projects).
-
-Here we'll create a new project called "tutorial".
-
-![](/orchestration/tutorial/create-project.png)
-
-Alternatively you can use the Prefect CLI:
-
-```
-$ prefect create project "tutorial"
-```
-
-For more information, see the [projects documentation](/orchestration/concepts/projects.md).
-
-## Register a Flow
-
-In order for your flow to be managed by a Prefect Backend (either Cloud or
-Server) it must first be _registered_.
-
-The easiest way to register a created flow is to call `flow.register` with the
-name of the project you wish to register it under.
-
-Here's the example flow we'll be using:
-
-```python
-import prefect
-from prefect import task, Flow
-
-@task
-def say_hello():
- logger = prefect.context.get("logger")
- logger.info("Hello, Cloud!")
-
-with Flow("hello-flow") as flow:
- say_hello()
-
-# Register the flow under the "tutorial" project
-flow.register(project_name="tutorial")
-```
-
-When a flow is registered, the following steps happen:
-
-- The flow is validated to catch common errors
-- The flow's source is serialized and stored in the flow's
- [Storage](/orchestration/flow_config/storage.md) on your infrastructure.
- What this entails depends on the type of Storage used. Examples include building a
- [docker image](/orchestration/flow_config/storage.md#docker), saving the code
- to an [S3 bucket](/orchestration/flow_config/storage.md#aws-s3), or
- referencing a [GitHub](/orchestration/flow_config/storage.md#github)
- repository.
-- The flow's metadata is packaged up and sent to the Prefect Backend.
-
-Note that the the Prefect Backend only receives the flow metadata (name,
-structure, etc...) _and not_ the actual source for the flow. Your flow code
-itself remains safe and secure on your infrastructure.
-
-For more information on flow registration, see the [registration
-docs](/orchestration/concepts/flows.md#registration).
-
-Running the above should output some details about your flow:
-
-```bash
-$ python hello_flow.py
-Result check: OK
-Flow URL: https://cloud.prefect.io/jim-prefectio/flow/fc5e630d-9154-489d-98d4-ea6ffabb9ca0
- └── ID: 90f9f57b-bff6-4d34-85be-8696d9982306
- └── Project: tutorial
- └── Labels: ['Jims-MBP']
-```
-
-After registering your flow, you should see it in the UI on the tutorial
-project [dashboard](/orchestration/ui/dashboard.md). Clicking on the flow
-will bring you to the [flow](/orchestration/ui/flow.md) page.
-
-![](/orchestration/tutorial/hello-flow-page.png)
-
-Your flow has been successfully registered!
-
-## Start an Agent
-
-You're almost ready to start scheduling flow runs using the Prefect Backend.
-The last thing you need to do is start a [Prefect
-Agent](/orchestration/agents/overview.md). Agents watch for any scheduled flow
-runs and execute them accordingly on your infrastructure.
-
-Prefect has many different kinds of Agents for deploying on different platforms
-(Kubernetes, ECS, Docker, etc...). Here we'll start a [Local
-Agent](/orchestration/agents/local.md) for deploying flows locally on a single
-machine.
-
-In a new terminal session, run the following to start a local agent.
-
-```bash
-prefect agent local start
-```
-
-This should output some initial logs, then sit idle waiting for scheduled flow
-runs. If you need to shutdown the agent at any point, you can stop it with a
-`Ctrl-C`. For now, you'll want to leave it running for the rest of the
-tutorial.
-
-::: tip Service Account API Key
-If you're using Prefect Cloud, the Local Agent will need access to the service account's API key [you created
-earlier](/orchestration/tutorial/overview.html#create-a-service-account-key).
-:::
-
-## Execute a Flow Run
-
-You're now ready to execute your first flow run!
-
-Flow runs can be created in a few different ways - here we'll use the UI. On
-the [flow page](/orchestration/ui/flow.md) page click "Quick Run" in the
-upper-right corner.
-
-This should take you to a new page for the flow run. Here you can track
-activity for a specific flow run, view the state of individual tasks, and see
-flow run logs as they come in. For more details on the information presented
-here, see the [UI docs](/orchestration/ui/flow-run.md).
-
-Eventually the flow run should complete in a `Success` state, with all tasks in
-green.
-
-![](/orchestration/tutorial/hello-flow-run-page.png)
-
-You've now executed your first flow run! In the next section we'll expand this
-flow to cover additional features.
diff --git a/docs/orchestration/tutorial/flow_config.md b/docs/orchestration/tutorial/flow_config.md
deleted file mode 100644
index 8b5161b10924..000000000000
--- a/docs/orchestration/tutorial/flow_config.md
+++ /dev/null
@@ -1,142 +0,0 @@
-# Flow Configuration
-
-So far we've been using the default [flow
-configuration](/orchestration/flow_config/overview.md). When using a Prefect Backend,
-each flow is configured with:
-
-- **Storage**: describes where the flow should be stored to and loaded from
- during execution. By default this uses
- [Local](/orchestration/flow_config/storage.md#local) storage, which stores
- your flow locally as a file on your machine.
-
-- **Run Configuration**: describes where and how a flow run should be executed.
- By default this is a
- [UniversalRun](/orchestration/flow_config/run_configs.md#universalrun), which
- works with any Agent.
-
-- **Executor**: describes where and how *tasks* in a flow run should be
- executed. By default this is a
- [LocalExecutor](/orchestration/flow_config/executors.md#localexecutor), which
- executes tasks serially in a single thread.
-
-To demonstrate these, we'll add two more requirements to our `hello-flow`:
-
-- The greeting used should be provided by an environment variable `GREETING`
-- We need to be able to greet lots of people in parallel
-
-## Configure Environment Variables
-
-We'll handle the environment variable requirement first. Environment variables
-in a flow run can be configured in several places. Two common locations:
-
-- On the agent, by passing `--env KEY=VALUE` when starting the agent. All
- flows run by the agent will then have that environment variable set.
-
-- On the flow, through the flow's
- [RunConfig](/orchestration/flow_config/run_configs.md). All runs of the flow
- will then have that environment variable set.
-
-Here we'll use a [LocalRun](/orchestration/flow_config/run_configs.md#localrun),
-since we're running a local agent.
-
-Our new flow code might look like this:
-
-```python
-import os
-
-import prefect
-from prefect import task, Flow, Parameter
-from prefect.run_configs import LocalRun
-
-
-@task
-def say_hello(name):
- # Load the greeting to use from an environment variable
- greeting = os.environ.get("GREETING")
- logger = prefect.context.get("logger")
- logger.info(f"{greeting}, {name}!")
-
-
-with Flow("hello-flow") as flow:
- people = Parameter("people", default=["Arthur", "Ford", "Marvin"])
- say_hello.map(people)
-
-# Configure the `GREETING` environment variable for this flow
-flow.run_config = LocalRun(env={"GREETING": "Hello"})
-
-# Register the flow under the "tutorial" project
-flow.register(project_name="tutorial")
-```
-
-Try registering and running the above flow - you should see that the `GREETING`
-environment variable is properly forwarded and used.
-
-Changing or configuring a flow's `run_config` is a useful way to customize the
-environment in which a flow runs. There are different types for deploying on
-different platforms (`KubernetesRun` for kubernetes, `DockerRun` for docker,
-...), each with different options. See the [run configuration
-docs](/orchestration/flow_config/run_configs.md) for more information.
-
-## Enable Parallel Execution
-
-Sometimes flows can benefit from parallel execution. This is especially useful
-when combined with [mapped tasks](/core/concepts/mapping.md), where there are
-lots of opportunities for parallelism.
-
-The simplest way to enable parallel execution for a flow is to swap out the
-default [LocalExecutor](/orchestration/flow_config/executors.md#localexecutor)
-for a
-[LocalDaskExecutor](/orchestration/flow_config/executors.md#localdaskexecutor).
-This will run your tasks in parallel using a pool of threads (or processes).
-
-Since our `say_hello` task runs far too quickly to benefit from parallel
-execution, we'll add a `time.sleep` to provide a better demo.
-
-```python
-import os
-import time
-
-import prefect
-from prefect import task, Flow, Parameter
-from prefect.run_configs import LocalRun
-from prefect.executors import LocalDaskExecutor
-
-
-@task
-def say_hello(name):
- # Add a sleep to simulate some long-running task
- time.sleep(10)
- # Load the greeting to use from an environment variable
- greeting = os.environ.get("GREETING")
- logger = prefect.context.get("logger")
- logger.info(f"{greeting}, {name}!")
-
-
-with Flow("hello-flow") as flow:
- people = Parameter("people", default=["Arthur", "Ford", "Marvin"])
- say_hello.map(people)
-
-# Configure the `GREETING` environment variable for this flow
-flow.run_config = LocalRun(env={"GREETING": "Hello"})
-
-# Use a `LocalDaskExecutor` to run this flow
-# This will run tasks in a thread pool, allowing for parallel execution
-flow.executor = LocalDaskExecutor()
-
-# Register the flow under the "tutorial" project
-flow.register(project_name="tutorial")
-```
-
-Register and run the flow.
-
-![](/orchestration/tutorial/hello-flow-run-parallel.png)
-
-You should see multiple `say_hello` tasks running in parallel, providing a
-noticeable speedup (this ran in 13 seconds, when serially it would have taken
-30 seconds). If you look through the logs you can also see logs from each task
-interleaved, showing they ran concurrently.
-
-Not every flow will require or benefit from parallelism, but when needed
-swapping out the executor can result in large performance improvements. Prefect
-supports several executors not discussed here, for more information see the
-[executors documentation](/orchestration/flow_config/executors.md).
diff --git a/docs/orchestration/tutorial/next-steps.md b/docs/orchestration/tutorial/next-steps.md
deleted file mode 100644
index 1b829c14331d..000000000000
--- a/docs/orchestration/tutorial/next-steps.md
+++ /dev/null
@@ -1,59 +0,0 @@
-# Conclusion and Next Steps
-
-In this tutorial we covered:
-
-- Creating a project
-- Registering and running flows
-- Providing flow parameters at runtime
-- Altering a flow's execution environment through it's
- [run-config](/orchestration/flow_config/run_configs.md)
-- Enabling parallelism by swapping out a flow's
- [executor](/orchestration/flow_config/executors.md).
-
-This is just the beginning; the Prefect API has many more features to explore!
-
-## Examples
-
-Prefect provides a number of [examples](/core/examples/overview.md) that illustrate
-different aspects of developing and running flows. These examples can all be run
-locally or through Prefect Cloud/Server. To create a new project and register all
-examples, run the following:
-
-You can register all the examples in a new project by running the following:
-
-```
-# Create a new "Prefect Examples" project
-$ prefect create project "Prefect Examples"
-
-# Register all the examples into the "Prefect Examples" project
-$ prefect register --json https://docs.prefect.io/examples.json --project "Prefect Examples"
-```
-
-See the [examples](/core/examples/overview.md) page for more information.
-
-## Concepts
-
-Visit the [Concept](/orchestration/concepts/api.html) docs for actions such as
-working directly with Prefect's [GraphQL
-API](/orchestration/concepts/graphql.html), diving into the
-[CLI](/orchestration/concepts/cli.html), setting [concurrency
-limits](/orchestration/concepts/task-concurrency-limiting.html) on your Cloud runs,
-and more.
-
-## Agents
-
-To learn more about Prefect agents, [flow
-affinity](/orchestration/agents/overview.html#labels) via labels, or find
-information on platform specific agents visit the
-[agent](/orchestration/agents/overview.html) documentation.
-
-## Flow Configuration
-
-For information on all the options for configuring a flow for deployment, see
-the [Flow Configuration](/orchestration/flow_config/overview.html) documentation.
-
-## Deployment Recipes
-
-Check out some of the [deployment
-recipes](/orchestration/recipes/configuring_storage.html) that are written
-for some example flow deployment patterns.
diff --git a/docs/orchestration/tutorial/overview.md b/docs/orchestration/tutorial/overview.md
deleted file mode 100644
index 799c7c9fe116..000000000000
--- a/docs/orchestration/tutorial/overview.md
+++ /dev/null
@@ -1,80 +0,0 @@
----
-title: Overview and Setup
----
-
-
-
-
-
-# Overview and Setup
-
-Welcome to the Prefect Deployment Tutorial! This tutorial will cover:
-
-- Setting up your environment to use either [Prefect
- Cloud](https://cloud.prefect.io) or [Prefect
- Server](/orchestration/server/overview.md)
-- Configuring and registering your first Flow
-- Using a [Prefect Agent](/orchestration/agents/overview.md) to run that Flow
-
-If you haven't yet, you might want to go through the [Prefect Core
-Tutorial](/core/tutorial/01-etl-before-prefect.html),
-which covers in greater detail how to write Prefect Flows.
-
-## Install Prefect
-
-Before starting the tutorial, you'll need a working install of the core Prefect
-library.
-
-You can find installation instructions [here](/core/getting_started/install.html).
-
-## Select an Orchestration Backend
-
-Prefect supports two different orchestration backends:
-
-- `cloud` - our [hosted service](https://cloud.prefect.io)
-- `server` - the [open source backend](/orchestration/server/overview.md),
- deployed on your infrastructure
-
-To use Prefect with either backend, you must first select that backend via
-the CLI:
-
-:::: tabs
-::: tab Cloud
-
-```bash
-$ prefect backend cloud
-```
-
-:::
-
-::: tab Server
-
-```bash
-$ prefect backend server
-```
-
-:::
-::::
-
-Note that you can change backends at any time by rerunning the `prefect backend ...` command.
-
-## Authenticating with Prefect Cloud
-
-If you're using Prefect Cloud, you'll also need to authenticate with the
-backend before you can proceed further.
-
-### Create an API key and login
-
-To authenticate, you'll need to create an [API key](/orchestration/concepts/api_keys.md) and save it.
-
-- Login to [https://cloud.prefect.io](https://cloud.prefect.io)
-- Navigate to the [API Keys page](https://cloud.prefect.io/user/keys). In the User menu in the top right corner go to **Account Settings** -> **API Keys** -> **Create An API Key**.
-- Copy the created key
-- Login with the Prefect CLI `prefect auth login --key `
-
-
-::: tip Authentication for agents
-
-When running deployed Flows with an [Agent](/orchestration/agents/overview.html) we recommend creating an API key associated with a service account instead of your user. See the [API keys documentation](/orchestration/concepts/api_keys.md) for details.
-
-:::
\ No newline at end of file
diff --git a/docs/orchestration/tutorial/parameters.md b/docs/orchestration/tutorial/parameters.md
deleted file mode 100644
index 49e532dd68dc..000000000000
--- a/docs/orchestration/tutorial/parameters.md
+++ /dev/null
@@ -1,80 +0,0 @@
-# Parameters & Mapped Tasks
-
-Writing a flow to greet one person is all good, but now the requirements have
-changed.
-
-- You now need to greet multiple people
-- You need the list of people to be configurable
-
-In [the core tutorial](/core/tutorial/03-parameterized-flow.md) we learned how
-to do this using [parameters](/core/concepts/parameters.md) and [mapped
-tasks](/core/concepts/mapping.md). These concepts work equally well when run
-using a Prefect Backend.
-
-## Update Your Flow
-
-After a few minutes of editing, you might come up with a flow that looks
-something like this:
-
-```python
-import prefect
-from prefect import task, Flow, Parameter
-
-@task
-def say_hello(name):
- logger = prefect.context.get("logger")
- logger.info(f"Hello, {name}!")
-
-with Flow("hello-flow") as flow:
- # An optional parameter "people", with a default list of names
- people = Parameter("people", default=["Arthur", "Ford", "Marvin"])
- # Map `say_hello` across the list of names
- say_hello.map(people)
-
-# Register the flow under the "tutorial" project
-flow.register(project_name="tutorial")
-```
-
-This flow has an optional parameter `people` that takes in a list of names to
-greet (with a default list provided). It then maps the `say_hello` task over
-the list of names.
-
-Run the above to register a new version of `hello-flow`. This will archive the
-old version and register a new version using the new code.
-
-## Execute a Flow Run
-
-As in the [previous section](./first.md#execute-a-flow-run), you can execute a
-flow run using the "Quick Run" button in the UI. Make sure you still have your
-Agent running [from before](./first.md#start-an-agent).
-
-After a few seconds, you should see your flow run complete successfully.
-
-![](/orchestration/tutorial/hello-flow-run-mapped1.png)
-
-This run has a few more tasks than before (one `people` parameter task, and
-several mapped `say_hello` tasks). Since we used the parameter defaults, we
-should see 3 mapped `say_hello` tasks, one for each name.
-
-Click through the logs tab to see the logs for each name.
-
-## Specify New Parameters
-
-To start a flow run with non-default values for a parameter, you can click the
-`"Run"` button (middle of the flow page) instead of the `"Quick Run"` button.
-This brings you to a [run
-page](http://localhost:8080/orchestration/ui/flow.html#run) where you can
-configure more details for a specific flow run. Here we'll set the flow run
-name to `"custom-names"`, and provide new values for the `"people"` parameter.
-
-![](/orchestration/tutorial/hello-flow-run-parameter-config.png)
-
-When you're happy with the flow run settings, click `"Run"` to create a new
-flow run using the new settings.
-
-Once the flow run starts, check the logs to see that your settings took effect.
-
-![](/orchestration/tutorial/hello-flow-run-mapped2.png)
-
-Custom parameters for a flow run can also be specified programmatically, see
-the [flow run docs](/orchestration/concepts/flow_runs.md) for more information.
diff --git a/docs/orchestration/ui/interactive-api.md b/docs/orchestration/ui/interactive-api.md
index 17ebed2a9818..1c9dd3a7e35a 100644
--- a/docs/orchestration/ui/interactive-api.md
+++ b/docs/orchestration/ui/interactive-api.md
@@ -44,7 +44,7 @@ query {
We've nested `tasks` within the `flow` object, which tells the server to retrieve tasks only within the context of each flow that it returns.
### Limits and Offset
-To limit the number of items that are returned, you can use the Limit selector at the top of the Interactive API page. The default limit is 10 and the maximum is 100. Inline limit arguments are overriden by the value set in the Limit selector.
+To limit the number of items that are returned, you can use the Limit selector at the top of the Interactive API page. The default limit is 10 and the maximum is 100. Inline limit arguments are overridden by the value set in the Limit selector.
To the left of the Limit selector there is also an Offset selector. The Offset selector tells the server at which index your queries should start. For example, if your unlimited query would return 5 flows and you set the limit to 2, an offset of 0 would would return the first two items in the set. To get the next two items with no overlap, you would set the offset to 2.
diff --git a/docs/outline.toml b/docs/outline.toml
index dfbdc772c383..3d72eb3b909c 100644
--- a/docs/outline.toml
+++ b/docs/outline.toml
@@ -32,26 +32,22 @@ functions = [
title = "Flow"
module = "prefect.backend.flow"
classes = ["FlowView"]
-experimental = true
[pages.backend.flow_run]
title = "Flow Run"
module = "prefect.backend.flow_run"
classes = ["FlowRunView"]
functions = ["watch_flow_run"]
-experimental = true
[pages.backend.task_run]
title = "Task Run"
module = "prefect.backend.task_run"
classes = ["TaskRunView"]
-experimental = true
[pages.backend.tenant]
title = "Tenant"
module = "prefect.backend.tenant"
classes = ["TenantView"]
-experimental = true
[pages.backend.kv_store]
title = "KV Store"
@@ -71,12 +67,12 @@ classes = ["Secret"]
[pages.cli.agent]
title = "agent"
module = "prefect.cli.agent"
-commands = ["local", "docker", "kubernetes", "ecs", "fargate"]
+commands = ["local", "docker", "kubernetes", "ecs"]
[pages.cli.auth]
title = "auth"
module = "prefect.cli.auth"
-commands = ["login", "logout", "list_tenants", "switch_tenants", "create_key", "list_keys", "revoke_key", "status", "create_token", "list_tokens", "revoke_token"]
+commands = ["login", "logout", "list_tenants", "switch_tenants", "create_key", "list_keys", "revoke_key", "status"]
[pages.cli.create]
title = "create"
@@ -116,7 +112,7 @@ commands = ["set_command", "get_command", "delete_command", "list_command"]
[pages.cli.run]
title = "run"
module = "prefect.cli.run"
-commands = ["run", "run_flow"]
+commands = ["run"]
[pages.cli.server]
title = "server"
@@ -267,11 +263,6 @@ title = "Cloud"
module = "prefect.engine.cloud"
classes = ["CloudFlowRunner", "CloudTaskRunner"]
-[pages.environments.execution]
-title = "Execution Environments"
-module = "prefect.environments.execution"
-classes = ["DaskKubernetesEnvironment", "DaskCloudProviderEnvironment", "FargateTaskEnvironment", "KubernetesJobEnvironment", "LocalEnvironment"]
-
[pages.executors]
title = "Executors"
module = "prefect.executors"
@@ -670,11 +661,6 @@ title = "Kubernetes Agent"
module = "prefect.agent.kubernetes"
classes = {KubernetesAgent = ["start"]}
-[pages.agent.fargate]
-title = "Fargate Agent"
-module = "prefect.agent.fargate"
-classes = {FargateAgent = ["start"]}
-
[pages.agent.ecs]
title = "ECS Agent"
module = "prefect.agent.ecs"
diff --git a/requirements.txt b/requirements.txt
index ff3d4b4b787a..be2035657f54 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,13 +1,10 @@
click >= 7.0
cloudpickle >=1.3.0
croniter >= 0.3.24
-dask >= 2.17.0; python_version > '3.6'
-dask >= 2.17.0, <2021.06.0 ; python_version < '3.7'
-distributed >= 2.17.0; python_version > '3.6'
-distributed >= 2.17.0, <2021.06.0 ; python_version < '3.7'
+dask >= 2021.06.0
+distributed >= 2.17.0
docker >=3.4.1
-importlib_resources >= 3.0.0; python_version < '3.7'
-dataclasses >= 0.7, < 1.0; python_version < '3.7'
+importlib_resources >= 3.0.0
marshmallow >= 3.0.0b19
marshmallow-oneofschema >= 2.0.0b2
msgpack >= 0.6.0
diff --git a/setup.cfg b/setup.cfg
index dbd10f098f2c..386eecdf1660 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -6,9 +6,7 @@ env =
PREFECT__USER_CONFIG_PATH=""
PREFECT__BACKEND="cloud"
PREFECT__CLOUD__HEARTBEAT_MODE="off"
-filterwarnings =
- ignore:`Environment` based flow configuration is deprecated:UserWarning
-
+
[isort]
skip = __init__.py,/engine/executors/dask.py
multi_line_output = 3
diff --git a/setup.py b/setup.py
index 9eca1b457d1b..f0386636f63c 100644
--- a/setup.py
+++ b/setup.py
@@ -152,9 +152,9 @@ def run(self):
"Intended Audience :: System Administrators",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3 :: Only",
- "Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
"Topic :: Software Development :: Libraries",
"Topic :: System :: Monitoring",
],
diff --git a/src/prefect/__init__.py b/src/prefect/__init__.py
index 6ad9dd2cf0db..aeefd684f5cd 100644
--- a/src/prefect/__init__.py
+++ b/src/prefect/__init__.py
@@ -7,10 +7,8 @@
from prefect.client import Client
import prefect.schedules
import prefect.triggers
-import prefect.environments
import prefect.storage
import prefect.executors
-import prefect.engine.executors # deprecated
from prefect.core import Task, Flow, Parameter
import prefect.engine
@@ -26,10 +24,10 @@
import prefect.backend
import prefect.artifacts
-from ._version import get_versions
+from ._version import get_versions as _get_versions
-__version__ = get_versions()["version"] # type: ignore
-del get_versions
+__version__ = _get_versions()["version"] # type: ignore
+del _get_versions
try:
import signal as _signal
@@ -38,3 +36,23 @@
_signal.signal(29, _sig_handler)
except:
pass
+
+__all__ = [
+ "Client",
+ "Flow",
+ "Parameter",
+ "Task",
+ "api",
+ "apply_map",
+ "case",
+ "config",
+ "context",
+ "flatten",
+ "mapped",
+ "models",
+ "plugins",
+ "resource_manager",
+ "tags",
+ "task",
+ "unmapped",
+]
diff --git a/src/prefect/agent/__init__.py b/src/prefect/agent/__init__.py
index 614ed6620e51..ad4d258d874d 100644
--- a/src/prefect/agent/__init__.py
+++ b/src/prefect/agent/__init__.py
@@ -3,8 +3,9 @@
from prefect.agent.agent import Agent
import prefect.agent.docker
-import prefect.agent.fargate
import prefect.agent.kubernetes
import prefect.agent.local
import prefect.agent.ecs
import prefect.agent.vertex
+
+__all__ = ["Agent"]
diff --git a/src/prefect/agent/agent.py b/src/prefect/agent/agent.py
index 1fb20224146f..edb7084883ca 100644
--- a/src/prefect/agent/agent.py
+++ b/src/prefect/agent/agent.py
@@ -23,7 +23,6 @@
from prefect.serialization.state import StateSchema
from prefect.serialization.run_config import RunConfigSchema
from prefect.utilities.context import context
-from prefect.exceptions import AuthorizationError
from prefect.utilities.graphql import GraphQLResult, with_args
ascii_name = r"""
@@ -128,10 +127,8 @@ def __init__(
agent_address: str = None,
no_cloud_logs: bool = None,
) -> None:
- # Load token for backwards compatibility
- token = config.cloud.agent.get("auth_token")
# Auth with an API key will be loaded from the config or disk by the Client
- self.client = Client(api_server=config.cloud.api, api_token=token)
+ self.client = Client(api_server=config.cloud.api)
self.agent_config_id = agent_config_id
self._agent_config: Optional[dict] = None
@@ -758,17 +755,17 @@ def _mark_flow_as_failed(self, flow_run: GraphQLResult, message: str) -> None:
def _get_run_config(
self, flow_run: GraphQLResult, run_config_cls: Type[RunConfig]
- ) -> Optional[RunConfig]:
+ ) -> RunConfig:
"""
- Get a run_config for the flow, if present.
+ Get a run_config for the flow, if present. The returned run config is always of
+ type `run_config_cls`
Args:
- flow_run (GraphQLResult): A GraphQLResult flow run object
- run_config_cls (Callable): The expected run-config class
Returns:
- - RunConfig: The flow run's run-config. Returns None if an
- environment-based flow.
+ - RunConfig: The flow run's run-config or an instance of `run_config_cls`
"""
# If the flow is using a run_config, load it
if getattr(flow_run, "run_config", None) is not None:
@@ -784,11 +781,9 @@ def _get_run_config(
self.logger.error(msg)
raise TypeError(msg)
return run_config
- elif getattr(flow_run.flow, "environment", None) is None:
- # No environment, use default run_config
- return run_config_cls()
- return None
+ # Otherwise, return the default run_config
+ return run_config_cls()
def _safe_write_run_log(
self, flow_run: GraphQLResult, message: str, level: str
@@ -816,28 +811,6 @@ def _safe_write_run_log(
# Backend API connection -----------------------------------------------------------
- def _verify_token(self, token: str) -> None:
- """
- Checks whether a token with a `RUNNER` scope was provided
-
- DEPRECATED: API Keys do not have different scope
-
- Args:
- - token (str): The provided agent token to verify
- Raises:
- - AuthorizationError: if token is empty or does not have a RUNNER role
- """
- if not token:
- raise AuthorizationError("No agent API token provided.")
-
- # Check if RUNNER role
- result = self.client.graphql(query="query { auth_info { api_token_scope } }")
- if (
- not result.data # type: ignore
- or result.data.auth_info.api_token_scope != "RUNNER" # type: ignore
- ):
- raise AuthorizationError("Provided token does not have a RUNNER scope.")
-
def _register_agent(self) -> str:
"""
Register this agent with a backend API and retrieve the ID
@@ -886,7 +859,6 @@ def _setup_api_connection(self) -> None:
"""
Sets up the agent's connection to Cloud
- - Verifies token with Cloud
- Gets an agent_id and attaches it to the headers
- Runs a test query to check for a good setup
@@ -894,18 +866,6 @@ def _setup_api_connection(self) -> None:
RuntimeError: On failed test query
"""
- # Verify API tokens -- API keys do not need a type-check
- if config.backend == "cloud" and not self.client.api_key:
- self.logger.debug("Verifying authentication with Prefect Cloud...")
- try:
- self._verify_token(self.client.get_auth_token())
- self.logger.debug("Authentication successful!")
- except Exception as exc:
- self.logger.error("Failed to verify authentication.")
- raise RuntimeError(
- f"Error while contacting API at {config.cloud.api}",
- ) from exc
-
# Register agent with backend API
self.client.attach_headers({"X-PREFECT-AGENT-ID": self._register_agent()})
diff --git a/src/prefect/agent/docker/__init__.py b/src/prefect/agent/docker/__init__.py
index 047d5194c616..64877f0498b4 100644
--- a/src/prefect/agent/docker/__init__.py
+++ b/src/prefect/agent/docker/__init__.py
@@ -1 +1,3 @@
from prefect.agent.docker.agent import DockerAgent
+
+__all__ = ["DockerAgent"]
diff --git a/src/prefect/agent/docker/agent.py b/src/prefect/agent/docker/agent.py
index 153735001305..47c205b2c23b 100644
--- a/src/prefect/agent/docker/agent.py
+++ b/src/prefect/agent/docker/agent.py
@@ -91,7 +91,6 @@ class DockerAgent(Agent):
from the listed registries.
- docker_client_timeout (int, optional): The timeout to use for docker
API calls, defaults to 60 seconds.
- - docker_interface: This option has been deprecated and has no effect.
"""
def __init__(
@@ -110,7 +109,6 @@ def __init__(
networks: List[str] = None,
reg_allow_list: List[str] = None,
docker_client_timeout: int = None,
- docker_interface: bool = None, # Deprecated in 0.14.18
) -> None:
super().__init__(
agent_config_id=agent_config_id,
@@ -144,13 +142,6 @@ def __init__(
self.host_spec,
) = self._parse_volume_spec(volumes or [])
- if docker_interface is not None:
- warnings.warn(
- "DockerAgent `docker_interface` argument is deprecated and will be "
- "removed from Prefect. Setting it has no effect.",
- UserWarning,
- )
-
# Add containers to the given Docker networks
self.networks = networks
@@ -587,16 +578,9 @@ def populate_env_vars(
env.update(
{
"PREFECT__BACKEND": config.backend,
- "PREFECT__CLOUD__AUTH_TOKEN": (
- # Pull an auth token if it exists but fall back to an API key so
- # flows in pre-0.15.0 containers still authenticate correctly
- config.cloud.agent.get("auth_token")
- or self.flow_run_api_key
- or ""
- ),
"PREFECT__CLOUD__API_KEY": self.flow_run_api_key or "",
"PREFECT__CLOUD__TENANT_ID": (
- # Providing a tenant id is only necessary for API keys (not tokens)
+ # A tenant id is only required when authenticating
self.client.tenant_id
if self.flow_run_api_key
else ""
@@ -611,6 +595,8 @@ def populate_env_vars(
"PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS": "prefect.engine.cloud.CloudTaskRunner",
# Backwards compatibility variable for containers on Prefect <0.15.0
"PREFECT__LOGGING__LOG_TO_CLOUD": str(self.log_to_cloud).lower(),
+ # Backwards compatibility variable for containers on Prefect <1.0.0
+ "PREFECT__CLOUD__AUTH_TOKEN": self.flow_run_api_key or "",
}
)
return env
diff --git a/src/prefect/agent/ecs/__init__.py b/src/prefect/agent/ecs/__init__.py
index fa06c222b274..07481a963ae4 100644
--- a/src/prefect/agent/ecs/__init__.py
+++ b/src/prefect/agent/ecs/__init__.py
@@ -1 +1,3 @@
from prefect.agent.ecs.agent import ECSAgent
+
+__all__ = ["ECSAgent"]
diff --git a/src/prefect/agent/ecs/agent.py b/src/prefect/agent/ecs/agent.py
index 6159da302607..24218b1885d1 100644
--- a/src/prefect/agent/ecs/agent.py
+++ b/src/prefect/agent/ecs/agent.py
@@ -498,16 +498,9 @@ def get_run_task_kwargs(
"PREFECT__CONTEXT__FLOW_RUN_ID": flow_run.id,
"PREFECT__CONTEXT__FLOW_ID": flow_run.flow.id,
"PREFECT__CLOUD__SEND_FLOW_RUN_LOGS": str(self.log_to_cloud).lower(),
- "PREFECT__CLOUD__AUTH_TOKEN": (
- # Pull an auth token if it exists but fall back to an API key so
- # flows in pre-0.15.0 containers still authenticate correctly
- config.cloud.agent.get("auth_token")
- or self.flow_run_api_key
- or ""
- ),
"PREFECT__CLOUD__API_KEY": self.flow_run_api_key or "",
"PREFECT__CLOUD__TENANT_ID": (
- # Providing a tenant id is only necessary for API keys (not tokens)
+ # Providing a tenant id is only necessary when authenticating
self.client.tenant_id
if self.flow_run_api_key
else ""
@@ -515,6 +508,8 @@ def get_run_task_kwargs(
"PREFECT__CLOUD__AGENT__LABELS": str(self.labels),
# Backwards compatibility variable for containers on Prefect <0.15.0
"PREFECT__LOGGING__LOG_TO_CLOUD": str(self.log_to_cloud).lower(),
+ # Backwards compatibility variable for containers on Prefect <1.0.0
+ "PREFECT__CLOUD__AUTH_TOKEN": self.flow_run_api_key or "",
}
)
container_env = [{"name": k, "value": v} for k, v in env.items()]
diff --git a/src/prefect/agent/fargate/__init__.py b/src/prefect/agent/fargate/__init__.py
deleted file mode 100644
index 254f097c628e..000000000000
--- a/src/prefect/agent/fargate/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from prefect.agent.fargate.agent import FargateAgent
diff --git a/src/prefect/agent/fargate/agent.py b/src/prefect/agent/fargate/agent.py
deleted file mode 100644
index f88e78383a04..000000000000
--- a/src/prefect/agent/fargate/agent.py
+++ /dev/null
@@ -1,854 +0,0 @@
-import copy
-import json
-import os
-import warnings
-from ast import literal_eval
-from typing import Iterable
-import uuid
-
-from slugify import slugify
-
-from prefect import config
-from prefect.agent import Agent
-from prefect.utilities.agent import get_flow_image, get_flow_run_command
-from prefect.utilities.graphql import GraphQLResult
-
-
-class FargateAgent(Agent):
- """
- Agent which deploys flow runs as tasks using Fargate.
-
- DEPRECATED: The Fargate agent is deprecated, please transition to using the
- ECS agent instead.
-
- This agent can run anywhere as long as the proper access configuration
- variables are set. Information on using the Fargate Agent can be found at
- https://docs.prefect.io/orchestration/agents/fargate.html
-
- All `kwargs` are accepted that one would normally pass to boto3 for `register_task_definition`
- and `run_task`. For information on the kwargs supported visit the following links:
-
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.register_task_definition
-
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.run_task
-
- **Note**: if AWS authentication kwargs such as `aws_access_key_id` and `aws_session_token`
- are not provided they will be read from the environment.
-
- Environment variables may be set on the agent to be provided to each flow run's Fargate task:
- ```
- prefect agent fargate start --env MY_SECRET_KEY=secret --env OTHER_VAR=$OTHER_VAR
- ```
-
- boto3 kwargs being provided to the Fargate Agent:
- ```
- prefect agent fargate start \\
- networkConfiguration="{\\
- 'awsvpcConfiguration': {\\
- 'assignPublicIp': 'ENABLED',\\
- 'subnets': ['my_subnet_id'],\\
- 'securityGroups': []\\
- }\\
- }"
- ```
-
- botocore configuration options can be provided to the Fargate Agent:
- ```
- FargateAgent(botocore_config={"retries": {"max_attempts": 10}})
- ```
-
- Args:
- - agent_config_id (str, optional): An optional agent configuration ID that can be used to set
- configuration based on an agent from a backend API. If set all configuration values will be
- pulled from backend agent configuration.
- - name (str, optional): An optional name to give this agent. Can also be set through
- the environment variable `PREFECT__CLOUD__AGENT__NAME`. Defaults to "agent"
- - labels (List[str], optional): a list of labels, which are arbitrary string
- identifiers used by Prefect Agents when polling for work
- - env_vars (dict, optional): a dictionary of environment variables and values that will
- be set on each flow run that this agent submits for execution
- - max_polls (int, optional): maximum number of times the agent will poll Prefect Cloud
- for flow runs; defaults to infinite
- - agent_address (str, optional): Address to serve internal api at. Currently this is
- just health checks for use by an orchestration layer. Leave blank for no api server
- (default).
- - no_cloud_logs (bool, optional): Disable logging to a Prefect backend for this agent
- and all deployed flow runs
- - launch_type (str, optional): either FARGATE or EC2, defaults to FARGATE
- - aws_access_key_id (str, optional): AWS access key id for connecting the boto3
- client. Defaults to the value set in the environment variable
- `AWS_ACCESS_KEY_ID` or `None`
- - aws_secret_access_key (str, optional): AWS secret access key for connecting
- the boto3 client. Defaults to the value set in the environment variable
- `AWS_SECRET_ACCESS_KEY` or `None`
- - aws_session_token (str, optional): AWS session key for connecting the boto3
- client. Defaults to the value set in the environment variable
- `AWS_SESSION_TOKEN` or `None`
- - region_name (str, optional): AWS region name for connecting the boto3 client.
- Defaults to the value set in the environment variable `REGION_NAME` or `None`
- - botocore_config (dict, optional): botocore configuration options to be passed to the
- boto3 client.
- https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
- - enable_task_revisions (bool, optional): Enable registration of task definitions using
- revisions. When enabled, task definitions will use flow name as opposed to flow id
- and each new version will be a task definition revision. Each revision will be
- registered with a tag called 'PrefectFlowId' and 'PrefectFlowVersion' to enable
- proper lookup for existing revisions. Flow name is reformatted to support task
- definition naming rules by converting all non-alphanumeric characters to '_'.
- Defaults to False.
- - use_external_kwargs (bool, optional): When enabled, the agent will check for the
- existence of an external json file containing kwargs to pass into the run_flow
- process. Defaults to False.
- - external_kwargs_s3_bucket (str, optional): S3 bucket containing external kwargs.
- - external_kwargs_s3_key (str, optional): S3 key prefix for the location of
- /.json.
- - **kwargs (dict, optional): additional keyword arguments to pass to boto3 for
- `register_task_definition` and `run_task`
- """
-
- def __init__( # type: ignore
- self,
- agent_config_id: str = None,
- name: str = None,
- labels: Iterable[str] = None,
- env_vars: dict = None,
- max_polls: int = None,
- agent_address: str = None,
- no_cloud_logs: bool = None,
- launch_type: str = "FARGATE",
- aws_access_key_id: str = None,
- aws_secret_access_key: str = None,
- aws_session_token: str = None,
- region_name: str = None,
- botocore_config: dict = None,
- enable_task_revisions: bool = False,
- use_external_kwargs: bool = False,
- external_kwargs_s3_bucket: str = None,
- external_kwargs_s3_key: str = None,
- **kwargs,
- ) -> None:
- super().__init__(
- agent_config_id=agent_config_id,
- name=name,
- labels=labels,
- env_vars=env_vars,
- max_polls=max_polls,
- agent_address=agent_address,
- no_cloud_logs=no_cloud_logs,
- )
-
- if not kwargs.pop("_called_from_cli", False):
- warnings.warn(
- "`FargateAgent` is deprecated, please transition to using `ECSAgent` instead"
- )
-
- from boto3 import client as boto3_client
- from boto3 import resource as boto3_resource
- from botocore.config import Config
-
- # Config used for boto3 client initialization
- aws_access_key_id = aws_access_key_id or os.getenv("AWS_ACCESS_KEY_ID")
- aws_secret_access_key = aws_secret_access_key or os.getenv(
- "AWS_SECRET_ACCESS_KEY"
- )
- aws_session_token = aws_session_token or os.getenv("AWS_SESSION_TOKEN")
- region_name = region_name or os.getenv("REGION_NAME")
- botocore_config = botocore_config or {}
-
- # revisions and kwargs configurations
- self.enable_task_revisions = enable_task_revisions
- self.use_external_kwargs = use_external_kwargs
- self.external_kwargs_s3_bucket = external_kwargs_s3_bucket
- self.external_kwargs_s3_key = external_kwargs_s3_key
- self.launch_type = launch_type
-
- # Parse accepted kwargs for task definition, run, and container definitions key of task
- # definition
- (
- self.task_definition_kwargs,
- self.task_run_kwargs,
- self.container_definitions_kwargs,
- ) = self._parse_kwargs(kwargs, True)
-
- # Client initialization
- self.boto3_client = boto3_client(
- "ecs",
- aws_access_key_id=aws_access_key_id,
- aws_secret_access_key=aws_secret_access_key,
- aws_session_token=aws_session_token,
- region_name=region_name,
- config=Config(**botocore_config),
- )
- # fetch external kwargs from s3 if needed
- if self.use_external_kwargs:
- self.logger.info("Use of external S3 kwargs enabled.")
- self.s3_resource = boto3_resource(
- "s3",
- aws_access_key_id=aws_access_key_id,
- aws_secret_access_key=aws_secret_access_key,
- aws_session_token=aws_session_token,
- region_name=region_name,
- )
-
- # get boto3 client for resource groups tagging api
- if self.enable_task_revisions:
- self.logger.info("Native ECS task revisions enabled.")
- self.boto3_client_tags = boto3_client(
- "resourcegroupstaggingapi",
- aws_access_key_id=aws_access_key_id,
- aws_secret_access_key=aws_secret_access_key,
- aws_session_token=aws_session_token,
- region_name=region_name,
- config=Config(**botocore_config),
- )
-
- self.logger.debug(f"Launch type: {self.launch_type}")
- self.logger.debug(f"Enable task revisions: {self.enable_task_revisions}")
- self.logger.debug(f"Use external kwargs: {self.use_external_kwargs}")
- self.logger.debug(
- f"External kwargs S3 bucket: {self.external_kwargs_s3_bucket}"
- )
- self.logger.debug(f"External kwargs S3 key: {self.external_kwargs_s3_key}")
-
- def _override_kwargs(
- self,
- flow_run: GraphQLResult,
- flow_task_definition_kwargs: dict,
- flow_task_run_kwargs: dict,
- flow_container_definitions_kwargs: dict,
- ) -> None:
- """
- Return new kwargs updated from external kwargs file.
-
- Args:
- - flow_run (GraphQLResult): A GraphQLResult flow run object
- - flow_task_definition_kwargs (dict): task_definition_kwargs to update
- - flow_task_run_kwargs (dict): task_run_kwargs to update
- - flow_container_definitions_kwargs (dict): container_definitions_kwargs to update
- """
- from botocore.exceptions import ClientError
-
- # get external kwargs from S3
- try:
- self.logger.info("Fetching external kwargs from S3")
- obj = self.s3_resource.Object(
- self.external_kwargs_s3_bucket,
- os.path.join( # type: ignore
- self.external_kwargs_s3_key, # type: ignore
- slugify(flow_run.flow.name), # type: ignore
- "{}.json".format(flow_run.flow.id[:8]), # type: ignore
- ), # type: ignore
- )
- body = obj.get()["Body"].read().decode("utf-8")
- except ClientError:
- self.logger.info(
- "Flow id {} does not have external kwargs.".format(flow_run.flow.id[:8])
- )
- body = "{}"
- self.logger.debug("External kwargs:\n{}".format(body))
-
- # update kwargs from with external kwargs
- self.logger.info("Updating default kwargs with external")
- external_kwargs = json.loads(body)
- # parse external kwargs
- (
- ext_task_definition_kwargs,
- ext_task_run_kwargs,
- ext_container_definitions_kwargs,
- ) = self._parse_kwargs(external_kwargs)
- self.logger.debug(
- "External task definition kwargs:\n{}".format(ext_task_definition_kwargs)
- )
- self.logger.debug("External task run kwargs:\n{}".format(ext_task_run_kwargs))
- self.logger.debug(
- "External container definitions kwargs:\n{}".format(
- ext_container_definitions_kwargs
- )
- )
- # update flow_task_* kwargs
- flow_task_definition_kwargs.update(ext_task_definition_kwargs)
- flow_task_run_kwargs.update(ext_task_run_kwargs)
- flow_container_definitions_kwargs.update(ext_container_definitions_kwargs)
-
- def _add_flow_tags(
- self, flow_run: GraphQLResult, flow_task_definition_kwargs: dict
- ) -> None:
- """
- Add tags to task definition kwargs to
-
- Args:
- - flow_run (GraphQLResult): A GraphQLResult flow run object
- - flow_task_definition_kwargs (dict): task_definition_kwargs to add tags to
- """
- # add flow id and version to definition tags
- self.logger.info("Adding tags for flow_id and version.")
- if not flow_task_definition_kwargs.get("tags"):
- flow_task_definition_kwargs["tags"] = []
- else:
- flow_task_definition_kwargs["tags"] = copy.deepcopy(
- flow_task_definition_kwargs["tags"]
- )
- append_tag = True
- for i in flow_task_definition_kwargs["tags"]:
- if i["key"] == "PrefectFlowId":
- i["value"] = flow_run.flow.id[:8]
- append_tag = False
- if append_tag:
- flow_task_definition_kwargs["tags"].append(
- {"key": "PrefectFlowId", "value": flow_run.flow.id[:8]}
- )
- append_tag = True
- for i in flow_task_definition_kwargs["tags"]:
- if i["key"] == "PrefectFlowVersion":
- i["value"] = str(flow_run.flow.version)
- append_tag = False
- if append_tag:
- flow_task_definition_kwargs["tags"].append(
- {"key": "PrefectFlowVersion", "value": str(flow_run.flow.version)}
- )
-
- def _parse_kwargs(self, user_kwargs: dict, check_envars: bool = False) -> tuple:
- """
- Parse the kwargs passed in and separate them out for `register_task_definition`
- and `run_task`. This is required because boto3 does not allow extra kwargs
- and if they are provided it will raise botocore.exceptions.ParamValidationError.
-
- Args:
- - user_kwargs (dict): The kwargs passed to the initialization of the environment
- - check_envars (bool): Whether to check envars for kwargs
-
- Returns:
- tuple: a tuple of three dictionaries (task_definition_kwargs, task_run_kwargs,
- container_definitions_kwargs)
- """
- definition_kwarg_list = [
- "taskRoleArn",
- "executionRoleArn",
- "networkMode",
- "volumes",
- "placementConstraints",
- "cpu",
- "memory",
- "tags",
- "pidMode",
- "ipcMode",
- "proxyConfiguration",
- "inferenceAccelerators",
- ]
-
- definition_kwarg_list_no_eval = ["cpu", "memory"]
-
- run_kwarg_list = [
- "cluster",
- "count",
- "startedBy",
- "group",
- "placementConstraints",
- "placementStrategy",
- "platformVersion",
- "networkConfiguration",
- "tags",
- "enableECSManagedTags",
- "propagateTags",
- ]
-
- container_definitions_kwarg_list = [
- "mountPoints",
- "secrets",
- "environment",
- "logConfiguration",
- "repositoryCredentials",
- ]
-
- task_definition_kwargs = {}
- definition_kwarg_list_eval = {
- i: (i not in definition_kwarg_list_no_eval) for i in definition_kwarg_list
- }
- for key, item in user_kwargs.items():
- if key in definition_kwarg_list:
- if definition_kwarg_list_eval.get(key):
- try:
- # Parse kwarg if needed
- item = literal_eval(item)
- except (ValueError, SyntaxError):
- pass
- task_definition_kwargs.update({key: item})
- self.logger.debug("{} = {}".format(key, item))
-
- # Special case for int provided cpu and memory
- for key in definition_kwarg_list_no_eval:
- if isinstance(task_definition_kwargs.get(key, ""), int):
- task_definition_kwargs[key] = str(task_definition_kwargs[key])
-
- task_run_kwargs = {}
- for key, item in user_kwargs.items():
- if key in run_kwarg_list:
- try:
- # Parse kwarg if needed
- item = literal_eval(item)
- except (ValueError, SyntaxError):
- pass
- task_run_kwargs.update({key: item})
- self.logger.debug("{} = {}".format(key, item))
-
- container_definitions_kwargs = {}
- container_defs = user_kwargs.get("containerDefinitions", [{}])
- try:
- container_defs = literal_eval(container_defs)
- except (ValueError, SyntaxError):
- pass
-
- if len(container_defs) != 1:
- raise ValueError(
- "Fargate agent only accepts configuration for a single container definition."
- )
-
- for key, item in container_defs[0].items():
- if key in container_definitions_kwarg_list:
- try:
- # Parse kwarg if needed
- item = literal_eval(item)
- except (ValueError, SyntaxError):
- pass
- container_definitions_kwargs.update({key: item})
- self.logger.debug("{} = {}".format(key, item))
-
- # Check environment if keys were not provided
- if check_envars:
- for key in definition_kwarg_list:
- if not task_definition_kwargs.get(key) and os.getenv(key):
- self.logger.debug("{} from environment variable".format(key))
- def_env_value = os.getenv(key)
- if definition_kwarg_list_eval.get(key):
- try:
- # Parse env var if needed
- def_env_value = literal_eval(def_env_value) # type: ignore
- except (ValueError, SyntaxError):
- pass
- task_definition_kwargs.update({key: def_env_value})
-
- for key in run_kwarg_list:
- if not task_run_kwargs.get(key) and os.getenv(key):
- self.logger.debug("{} from environment variable".format(key))
- run_env_value = os.getenv(key)
- try:
- # Parse env var if needed
- run_env_value = literal_eval(run_env_value) # type: ignore
- except (ValueError, SyntaxError):
- pass
- task_run_kwargs.update({key: run_env_value})
-
- for key in container_definitions_kwarg_list:
- if not container_definitions_kwargs.get(key) and os.getenv(
- "containerDefinitions_{}".format(key)
- ):
- self.logger.debug(
- "Container definition: {} from environment variable".format(key)
- )
- cd_env_value = os.getenv("containerDefinitions_{}".format(key))
- try:
- # Parse env var if needed
- cd_env_value = literal_eval(cd_env_value) # type: ignore
- except (ValueError, SyntaxError):
- pass
- container_definitions_kwargs.update({key: cd_env_value})
-
- return task_definition_kwargs, task_run_kwargs, container_definitions_kwargs
-
- def deploy_flow(self, flow_run: GraphQLResult) -> str:
- """
- Deploy flow runs to Fargate
-
- Args:
- - flow_run (GraphQLResult): A GraphQLResult flow run object
-
- Returns:
- - str: Information about the deployment
- """
- # create copies of kwargs to apply overrides as needed
- flow_task_definition_kwargs = copy.deepcopy(self.task_definition_kwargs)
- flow_task_run_kwargs = copy.deepcopy(self.task_run_kwargs)
- flow_container_definitions_kwargs = copy.deepcopy(
- self.container_definitions_kwargs
- )
-
- # create task_definition_name dict for passing into verify method
- task_definition_dict = {}
-
- if self.use_external_kwargs:
- # override from external kwargs
- self._override_kwargs(
- flow_run,
- flow_task_definition_kwargs,
- flow_task_run_kwargs,
- flow_container_definitions_kwargs,
- )
-
- # set proper task_definition_name and tags based on enable_task_revisions flag
- if self.enable_task_revisions:
- # set task definition name
- task_definition_dict["task_definition_name"] = slugify(flow_run.flow.name)
- self._add_flow_tags(flow_run, flow_task_definition_kwargs)
-
- else:
- task_definition_dict["task_definition_name"] = "prefect-task-{}".format( # type: ignore
- flow_run.flow.id[:8] # type: ignore
- ) # type: ignore
-
- image = get_flow_image(flow_run=flow_run)
- flow_run_command = get_flow_run_command(flow_run=flow_run)
-
- # check if task definition exists
- self.logger.debug("Checking for task definition")
- if not self._verify_task_definition_exists(flow_run, task_definition_dict):
- self.logger.debug("No task definition found")
- self._create_task_definition(
- image=image,
- flow_task_definition_kwargs=flow_task_definition_kwargs,
- container_definitions_kwargs=flow_container_definitions_kwargs,
- task_definition_name=task_definition_dict["task_definition_name"],
- flow_run_command=flow_run_command,
- )
-
- # run task
- task_arn = self._run_task(
- flow_run, flow_task_run_kwargs, task_definition_dict["task_definition_name"]
- )
-
- self.logger.debug("Run created for task {}".format(task_arn))
-
- return "Task ARN: {}".format(task_arn)
-
- def _verify_task_definition_exists(
- self, flow_run: GraphQLResult, task_definition_dict: dict
- ) -> bool:
- """
- Check if a task definition already exists for the flow
-
- Args:
- - flow_run (GraphQLResult): A GraphQLResult representing a flow run object
- - task_definition_dict(dict): Dictionary containing task definition name to update
- if needed.
-
- Returns:
- - bool: whether or not a preexisting task definition is found for this flow
- """
- from botocore.exceptions import ClientError
-
- try:
- definition_exists = True
- task_definition_name = task_definition_dict["task_definition_name"]
- definition_response = self.boto3_client.describe_task_definition(
- taskDefinition=task_definition_name, include=["TAGS"]
- )
- # if current active task definition has current flow id, then exists
- if self.enable_task_revisions:
- definition_exists = False
- tag_dict = {x["key"]: x["value"] for x in definition_response["tags"]}
- current_flow_id = tag_dict.get("PrefectFlowId")
- current_flow_version = int(tag_dict.get("PrefectFlowVersion", 0))
- if current_flow_id == flow_run.flow.id[:8]:
- self.logger.debug(
- "Active task definition for {} already exists".format(
- flow_run.flow.id[:8]
- ) # type: ignore
- )
- definition_exists = True
- elif flow_run.flow.version < current_flow_version:
- tag_search = self.boto3_client_tags.get_resources(
- TagFilters=[
- {"Key": "PrefectFlowId", "Values": [flow_run.flow.id[:8]]}
- ],
- ResourceTypeFilters=["ecs:task-definition"],
- )
- if tag_search["ResourceTagMappingList"]:
- task_definition_dict["task_definition_name"] = [
- x.get("ResourceARN")
- for x in tag_search["ResourceTagMappingList"]
- ][-1]
- self.logger.debug(
- "Active task definition for {} already exists".format(
- flow_run.flow.id[:8]
- ) # type: ignore
- )
- definition_exists = True
- else:
- self.logger.debug(
- "Task definition {} found".format(
- task_definition_name
- ) # type: ignore
- )
- except ClientError:
- return False
- return definition_exists
-
- def _create_task_definition(
- self,
- image: str,
- flow_task_definition_kwargs: dict,
- container_definitions_kwargs: dict,
- task_definition_name: str,
- flow_run_command: str,
- ) -> None:
- """
- Create a task definition for the flow that each flow run will use. This function
- is only called when a flow is run for the first time.
-
- Args:
- - image (str): The full name of an image to use for this task definition
- - flow_task_definition_kwargs (dict): kwargs to use for registration
- - container_definitions_kwargs (dict): container definitions kwargs to use for
- registration
- - task_definition_name (str): task definition name to use
- - flow_run_command (str): the flow run command to execute
- """
- self.logger.debug("Using image {} for task definition".format(image))
- container_definitions = [
- {
- "name": "flow",
- "image": image,
- "command": ["/bin/sh", "-c", flow_run_command],
- "environment": [
- {
- "name": "PREFECT__BACKEND",
- "value": config.backend,
- },
- {
- "name": "PREFECT__CLOUD__API",
- "value": config.cloud.api or "https://api.prefect.io",
- },
- {
- "name": "PREFECT__CLOUD__AGENT__LABELS",
- "value": str(self.labels),
- },
- {"name": "PREFECT__CLOUD__USE_LOCAL_SECRETS", "value": "false"},
- {
- "name": "PREFECT__CLOUD__SEND_FLOW_RUN_LOGS",
- "value": str(self.log_to_cloud).lower(),
- },
- {"name": "PREFECT__LOGGING__LEVEL", "value": config.logging.level},
- {
- "name": "PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudFlowRunner",
- },
- {
- "name": "PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudTaskRunner",
- },
- {
- "name": "PREFECT__LOGGING__LOG_TO_CLOUD",
- "value": str(self.log_to_cloud).lower(),
- },
- ],
- "essential": True,
- }
- ]
-
- for key, value in self.env_vars.items():
- container_definitions[0]["environment"].append( # type: ignore
- dict(name=key, value=value)
- )
-
- # apply container definitions to "containerDefinitions" key of task definition
- # do not allow override of static envars from Prefect base task definition, which may
- # include self.env_vars
-
- base_envar_keys = [
- x["name"] for x in container_definitions[0]["environment"] # type: ignore
- ]
- self.logger.debug(
- "Removing static Prefect envars from container_definitions_kwargs if exists"
- )
- container_definitions_environment = [
- x
- for x in container_definitions_kwargs.get("environment", [])
- if x["name"] not in base_envar_keys
- ]
-
- container_definitions[0]["environment"].extend( # type: ignore
- container_definitions_environment
- )
-
- # Set container definition values if provided
- if container_definitions_kwargs.get("secrets"):
- container_definitions[0]["secrets"] = container_definitions_kwargs.get(
- "secrets", []
- )
-
- if container_definitions_kwargs.get("mountPoints"):
- container_definitions[0]["mountPoints"] = container_definitions_kwargs.get(
- "mountPoints", []
- )
- if container_definitions_kwargs.get("logConfiguration"):
- container_definitions[0][
- "logConfiguration"
- ] = container_definitions_kwargs.get("logConfiguration", {})
-
- if container_definitions_kwargs.get("repositoryCredentials"):
- container_definitions[0][
- "repositoryCredentials"
- ] = container_definitions_kwargs.get("repositoryCredentials", {})
-
- # If networkMode is not provided, default to awsvpc
- networkMode = flow_task_definition_kwargs.pop("networkMode", "awsvpc")
-
- self.logger.debug(f"Task definition networkMode: {networkMode}")
-
- # Register task definition
- self.logger.debug(
- "Registering task definition {}".format(
- task_definition_name # type: ignore
- )
- )
- if self.launch_type:
- flow_task_definition_kwargs["requiresCompatibilities"] = [self.launch_type]
-
- self.boto3_client.register_task_definition(
- family=task_definition_name, # type: ignore
- networkMode=networkMode,
- containerDefinitions=container_definitions,
- **flow_task_definition_kwargs,
- )
-
- def _run_task(
- self,
- flow_run: GraphQLResult,
- flow_task_run_kwargs: dict,
- task_definition_name: str,
- ) -> str:
- """
- Run a task using the flow run.
-
- Args:
- - flow_run (GraphQLResult): A GraphQLResult flow run object
- - flow_task_run_kwargs (dict): kwargs to use for task run
- - task_definition_name (str): task definition name to use
- """
- container_overrides = [
- {
- "name": "flow",
- "environment": [
- {
- "name": "PREFECT__CLOUD__AUTH_TOKEN",
- "value": config.cloud.agent.auth_token,
- },
- {
- "name": "PREFECT__CONTEXT__FLOW_RUN_ID",
- "value": flow_run.id, # type: ignore
- },
- {
- "name": "PREFECT__CONTEXT__FLOW_ID",
- "value": flow_run.flow.id, # type: ignore
- },
- ],
- }
- ]
-
- # Run task
- self.logger.debug(
- "Running task using task definition {}".format(
- task_definition_name # type: ignore
- )
- )
-
- if self.launch_type:
- flow_task_run_kwargs["launchType"] = self.launch_type
- task = self.boto3_client.run_task(
- taskDefinition=task_definition_name,
- overrides={"containerOverrides": container_overrides},
- **flow_task_run_kwargs,
- )
-
- return task["tasks"][0].get("taskArn")
-
- def validate_configuration(self) -> None:
- """
- Utility function for testing Agent's configuration. This function is helpful in
- determining if the provided configuration for the Agent is able to register a
- task definition and then subsequently run the task.
- """
- task_name = f"prefect-test-task-{str(uuid.uuid4())[:8]}"
-
- # Populate container definition with provided kwargs
- flow_container_definitions_kwargs = copy.deepcopy(
- self.container_definitions_kwargs
- )
-
- container_definitions = [
- {
- "name": "test-container",
- "image": "busybox",
- "command": ["/bin/sh", "-c", "echo 'I am alive!'"],
- "environment": [],
- "essential": True,
- }
- ]
-
- base_envar_keys = [
- x["name"] for x in container_definitions[0]["environment"] # type: ignore
- ]
- container_definitions_environment = [
- x
- for x in flow_container_definitions_kwargs.get("environment", [])
- if x["name"] not in base_envar_keys
- ]
- container_definitions[0]["environment"].extend( # type: ignore
- container_definitions_environment
- )
-
- # Assign user-provided container definition options
- if flow_container_definitions_kwargs.get("secrets"):
- container_definitions[0]["secrets"] = flow_container_definitions_kwargs.get(
- "secrets", []
- )
-
- if flow_container_definitions_kwargs.get("mountPoints"):
- container_definitions[0][
- "mountPoints"
- ] = flow_container_definitions_kwargs.get("mountPoints", [])
-
- if flow_container_definitions_kwargs.get("logConfiguration"):
- container_definitions[0][
- "logConfiguration"
- ] = flow_container_definitions_kwargs.get("logConfiguration", {})
-
- if flow_container_definitions_kwargs.get("repositoryCredentials"):
- container_definitions[0][
- "repositoryCredentials"
- ] = flow_container_definitions_kwargs.get("repositoryCredentials", {})
-
- # Register task definition
- flow_task_definition_kwargs = copy.deepcopy(self.task_definition_kwargs)
-
- # If networkMode is not provided, default to awsvpc
- networkMode = flow_task_definition_kwargs.pop("networkMode", "awsvpc")
-
- if self.launch_type:
- flow_task_definition_kwargs["requiresCompatibilities"] = [self.launch_type]
-
- self.logger.info("Testing task definition registration...")
- self.boto3_client.register_task_definition(
- family=task_name,
- networkMode=networkMode,
- containerDefinitions=container_definitions,
- **flow_task_definition_kwargs,
- )
- self.logger.info("Task definition registration successful")
-
- # Run task
- flow_task_run_kwargs = copy.deepcopy(self.task_run_kwargs)
-
- if self.launch_type:
- flow_task_run_kwargs["launchType"] = self.launch_type
-
- self.logger.info("Testing task run...")
- task = self.boto3_client.run_task(
- taskDefinition=task_name,
- overrides={"containerOverrides": []},
- **flow_task_run_kwargs,
- )
- self.logger.info(f"Task run {task['tasks'][0].get('taskArn')} successful")
-
-
-if __name__ == "__main__":
- FargateAgent().start()
diff --git a/src/prefect/agent/kubernetes/__init__.py b/src/prefect/agent/kubernetes/__init__.py
index 31148e290288..32761cb81ab2 100644
--- a/src/prefect/agent/kubernetes/__init__.py
+++ b/src/prefect/agent/kubernetes/__init__.py
@@ -1 +1,3 @@
from prefect.agent.kubernetes.agent import KubernetesAgent
+
+__all__ = ["KubernetesAgent"]
diff --git a/src/prefect/agent/kubernetes/agent.py b/src/prefect/agent/kubernetes/agent.py
index 6a234a4fa911..b19e06ba86f9 100644
--- a/src/prefect/agent/kubernetes/agent.py
+++ b/src/prefect/agent/kubernetes/agent.py
@@ -468,162 +468,7 @@ def generate_job_spec(self, flow_run: GraphQLResult) -> dict:
"""
run_config = self._get_run_config(flow_run, KubernetesRun)
assert run_config is None or isinstance(run_config, KubernetesRun) # mypy
- if run_config is not None:
- return self.generate_job_spec_from_run_config(flow_run, run_config)
- else:
- return self.generate_job_spec_from_environment(flow_run)
-
- def generate_job_spec_from_environment(
- self, flow_run: GraphQLResult, image: str = None
- ) -> dict:
- """
- Populate a k8s job spec. This spec defines a k8s job that handles
- executing a flow. This method runs each time the agent receives
- a flow to run.
-
- That job spec can optionally be customized by setting the
- following environment variables on the agent.
-
- - `NAMESPACE`: the k8s namespace the job will run in, defaults to `"default"`
- - `JOB_MEM_REQUEST`: memory requested, for example, `256Mi` for 256 MB. If this
- environment variable is not set, the cluster's defaults will be used.
- - `JOB_MEM_LIMIT`: memory limit, for example, `512Mi` For 512 MB. If this
- environment variable is not set, the cluster's defaults will be used.
- - `JOB_CPU_REQUEST`: CPU requested, defaults to `"100m"`
- - `JOB_CPU_LIMIT`: CPU limit, defaults to `"100m"`
- - `IMAGE_PULL_POLICY`: policy for pulling images. Defaults to `"IfNotPresent"`.
- - `IMAGE_PULL_SECRETS`: name of an existing k8s secret that can be used to pull
- images. This is necessary if your flow uses an image that is in a non-public
- container registry, such as Amazon ECR, or in a public registry that requires
- authentication to avoid hitting rate limits. To specify multiple image pull
- secrets, provide a comma-delimited string with no spaces, like
- `"some-secret,other-secret"`.
- - `SERVICE_ACCOUNT_NAME`: name of a service account to run the job as.
- By default, none is specified.
- - `YAML_TEMPLATE`: a path to where the YAML template should be loaded from. defaults
- to the embedded `job_spec.yaml`.
-
- Args:
- - flow_run (GraphQLResult): A flow run object
- - image (str, optional): The full name of an image to use for the job
-
- Returns:
- - dict: a dictionary representation of a k8s job for flow execution
- """
- identifier = str(uuid.uuid4())[:8]
- yaml_path = os.getenv(
- "YAML_TEMPLATE", os.path.join(os.path.dirname(__file__), "job_spec.yaml")
- )
- with open(yaml_path, "r") as job_file:
- job = yaml.safe_load(job_file)
-
- job_name = "prefect-job-{}".format(identifier)
-
- # Populate job metadata for identification
- k8s_labels = {
- "prefect.io/identifier": identifier,
- "prefect.io/flow_run_id": flow_run.id, # type: ignore
- "prefect.io/flow_id": flow_run.flow.id, # type: ignore
- }
- job["metadata"]["name"] = job_name
- job["metadata"]["labels"].update(**k8s_labels)
- job["spec"]["template"]["metadata"]["labels"].update(**k8s_labels)
-
- # Use provided image for job
- if image is None:
- image = get_flow_image(flow_run=flow_run)
- job["spec"]["template"]["spec"]["containers"][0]["image"] = image
- self.logger.debug("Using image {} for job".format(image))
-
- # Datermine flow run command
- job["spec"]["template"]["spec"]["containers"][0]["args"] = [
- get_flow_run_command(flow_run)
- ]
-
- # Populate environment variables for flow run execution
- env = job["spec"]["template"]["spec"]["containers"][0]["env"]
-
- env[0]["value"] = config.cloud.api or "https://api.prefect.io"
- env[1]["value"] = (
- # Pull an auth token if it exists but fall back to an API key so
- # flows in pre-0.15.0 containers still authenticate correctly
- config.cloud.agent.get("auth_token")
- or self.flow_run_api_key
- )
- env[2]["value"] = flow_run.id # type: ignore
- env[3]["value"] = flow_run.flow.id # type: ignore
- env[4]["value"] = self.namespace
- env[5]["value"] = str(self.labels)
- env[6]["value"] = str(self.log_to_cloud).lower()
- env[7]["value"] = self.env_vars.get(
- "PREFECT__LOGGING__LEVEL", config.logging.level
- )
-
- # append all user provided values
- for key, value in self.env_vars.items():
- env.append(dict(name=key, value=value))
-
- # Use image pull secrets if provided
- if self.image_pull_secrets:
- for idx, secret_name in enumerate(self.image_pull_secrets):
- # this check preserves behavior from previous releases,
- # where prefect would only overwrite the first entry in
- # imagePullSecrets
- if idx == 0:
- job["spec"]["template"]["spec"]["imagePullSecrets"][0] = {
- "name": secret_name
- }
- else:
- job["spec"]["template"]["spec"]["imagePullSecrets"].append(
- {"name": secret_name}
- )
- else:
- del job["spec"]["template"]["spec"]["imagePullSecrets"]
-
- # Set resource requirements if provided
- resources = job["spec"]["template"]["spec"]["containers"][0]["resources"]
- if os.getenv("JOB_MEM_REQUEST"):
- resources["requests"]["memory"] = os.getenv("JOB_MEM_REQUEST")
- if os.getenv("JOB_MEM_LIMIT"):
- resources["limits"]["memory"] = os.getenv("JOB_MEM_LIMIT")
- if os.getenv("JOB_CPU_REQUEST"):
- resources["requests"]["cpu"] = os.getenv("JOB_CPU_REQUEST")
- if os.getenv("JOB_CPU_LIMIT"):
- resources["limits"]["cpu"] = os.getenv("JOB_CPU_LIMIT")
- if self.volume_mounts:
- job["spec"]["template"]["spec"]["containers"][0][
- "volumeMounts"
- ] = self.volume_mounts
- else:
- del job["spec"]["template"]["spec"]["containers"][0]["volumeMounts"]
- if self.volumes:
- job["spec"]["template"]["spec"]["volumes"] = self.volumes
- else:
- del job["spec"]["template"]["spec"]["volumes"]
- if os.getenv("IMAGE_PULL_POLICY"):
- job["spec"]["template"]["spec"]["containers"][0][
- "imagePullPolicy"
- ] = os.getenv("IMAGE_PULL_POLICY")
- if self.service_account_name:
- job["spec"]["template"]["spec"][
- "serviceAccountName"
- ] = self.service_account_name
-
- return job
-
- def generate_job_spec_from_run_config(
- self, flow_run: GraphQLResult, run_config: KubernetesRun
- ) -> dict:
- """Generate a k8s job spec for a flow run.
-
- Args:
- - flow_run (GraphQLResult): A flow run object
- - run_config (KubernetesRun): The flow run's run_config
-
- Returns:
- - dict: a dictionary representation of a k8s job for flow execution
- """
if run_config.job_template:
job = run_config.job_template
else:
@@ -719,16 +564,9 @@ def generate_job_spec_from_run_config(
"PREFECT__BACKEND": config.backend,
"PREFECT__CLOUD__AGENT__LABELS": str(self.labels),
"PREFECT__CLOUD__API": config.cloud.api,
- "PREFECT__CLOUD__AUTH_TOKEN": (
- # Pull an auth token if it exists but fall back to an API key so
- # flows in pre-0.15.0 containers still authenticate correctly
- config.cloud.agent.get("auth_token")
- or self.flow_run_api_key
- or ""
- ),
"PREFECT__CLOUD__API_KEY": self.flow_run_api_key or "",
"PREFECT__CLOUD__TENANT_ID": (
- # Providing a tenant id is only necessary for API keys (not tokens)
+ # A tenant id is only required when authenticating
self.client.tenant_id
if self.flow_run_api_key
else ""
@@ -742,6 +580,8 @@ def generate_job_spec_from_run_config(
"PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS": "prefect.engine.cloud.CloudTaskRunner",
# Backwards compatibility variable for containers on Prefect <0.15.0
"PREFECT__LOGGING__LOG_TO_CLOUD": str(self.log_to_cloud).lower(),
+ # Backwards compatibility variable for containers on Prefect <1.0.0
+ "PREFECT__CLOUD__AUTH_TOKEN": self.flow_run_api_key or "",
}
)
container_env = [{"name": k, "value": v} for k, v in env.items()]
@@ -767,7 +607,6 @@ def generate_job_spec_from_run_config(
@staticmethod
def generate_deployment_yaml(
- token: str = None,
api: str = None,
namespace: str = None,
image_pull_secrets: str = None,
@@ -790,7 +629,6 @@ def generate_deployment_yaml(
Generate and output an installable YAML spec for the agent.
Args:
- - token (str, optional): A `RUNNER` token to give the agent
- api (str, optional): A URL pointing to the Prefect API. Defaults to
`https://api.prefect.io`
- namespace (str, optional): The namespace to create Prefect jobs in. Defaults
@@ -827,7 +665,6 @@ def generate_deployment_yaml(
"""
# Use defaults if not provided
- token = token or ""
key = key or ""
tenant_id = tenant_id or ""
api = api or "https://api.prefect.io"
@@ -843,7 +680,7 @@ def generate_deployment_yaml(
version = prefect.__version__.split("+")
image_version = (
- "latest" if len(version) > 1 or latest else (version[0] + "-python3.6")
+ "latest" if len(version) > 1 or latest else (version[0] + "-python3.7")
)
with open(
@@ -858,7 +695,7 @@ def generate_deployment_yaml(
agent_env = deployment["spec"]["template"]["spec"]["containers"][0]["env"]
# Populate env vars
- agent_env[0]["value"] = token
+ agent_env[0]["value"] = key # Pass API keys as auth tokens for backwards compat
agent_env[1]["value"] = api
agent_env[2]["value"] = namespace
agent_env[3]["value"] = image_pull_secrets or ""
diff --git a/src/prefect/agent/local/__init__.py b/src/prefect/agent/local/__init__.py
index 5af9f495b2b4..09ac5b647431 100644
--- a/src/prefect/agent/local/__init__.py
+++ b/src/prefect/agent/local/__init__.py
@@ -1 +1,3 @@
from prefect.agent.local.agent import LocalAgent
+
+__all__ = ["LocalAgent"]
diff --git a/src/prefect/agent/local/agent.py b/src/prefect/agent/local/agent.py
index 2cc43a48de3e..c4e9b054a864 100644
--- a/src/prefect/agent/local/agent.py
+++ b/src/prefect/agent/local/agent.py
@@ -1,7 +1,6 @@
import os
import socket
import sys
-import warnings
from subprocess import STDOUT, Popen, DEVNULL
from typing import Iterable, List
@@ -207,15 +206,9 @@ def populate_env_vars(
{
"PREFECT__BACKEND": config.backend,
"PREFECT__CLOUD__API": config.cloud.api,
- "PREFECT__CLOUD__AUTH_TOKEN": (
- # Pull an auth token if it exists but fall back to an API key so
- # flows in pre-0.15.0 containers still authenticate correctly
- self.client._api_token
- or self.flow_run_api_key
- ),
"PREFECT__CLOUD__API_KEY": self.flow_run_api_key,
"PREFECT__CLOUD__TENANT_ID": (
- # Providing a tenant id is only necessary for API keys (not tokens)
+ # Providing a tenant id is only necessary if authenticating
self.client.tenant_id
if self.flow_run_api_key
else None
@@ -248,8 +241,6 @@ def generate_supervisor_conf(
Generate and output an installable supervisorctl configuration file for the agent.
Args:
- - token (str, optional): A `RUNNER` token to give the agent. DEPRECATED. Use
- `key` instead.
- labels (List[str], optional): a list of labels, which are arbitrary string
identifiers used by Prefect Agents when polling for work
- env_vars (dict, optional): a dictionary of environment variables and values that
@@ -301,11 +292,6 @@ def generate_supervisor_conf(
if agent_config_id:
add_opts += f"--agent-config-id {agent_config_id}"
- # Tokens are deprecated
- if token:
- warnings.warn("API tokens are deprecated. Please switch to using API keys.")
- add_opts += f"-t {token} "
-
conf = conf.replace("{{OPTS}}", add_opts)
return conf
diff --git a/src/prefect/agent/vertex/__init__.py b/src/prefect/agent/vertex/__init__.py
index 38f87365c437..8063cece4203 100644
--- a/src/prefect/agent/vertex/__init__.py
+++ b/src/prefect/agent/vertex/__init__.py
@@ -1 +1,3 @@
from prefect.agent.vertex.agent import VertexAgent
+
+__all__ = ["VertexAgent"]
diff --git a/src/prefect/agent/vertex/agent.py b/src/prefect/agent/vertex/agent.py
index c58d98825872..047c506e3fe1 100644
--- a/src/prefect/agent/vertex/agent.py
+++ b/src/prefect/agent/vertex/agent.py
@@ -213,16 +213,9 @@ def populate_env_vars(self, flow_run: GraphQLResult) -> dict:
{
"PREFECT__BACKEND": config.backend,
"PREFECT__CLOUD__API": config.cloud.api,
- "PREFECT__CLOUD__AUTH_TOKEN": (
- # Pull an auth token if it exists but fall back to an API key so
- # flows in pre-0.15.0 containers still authenticate correctly
- config.cloud.agent.get("auth_token")
- or self.flow_run_api_key
- or ""
- ),
"PREFECT__CLOUD__API_KEY": self.flow_run_api_key or "",
"PREFECT__CLOUD__TENANT_ID": (
- # Providing a tenant id is only necessary for API keys (not tokens)
+ # Providing a tenant id is only necessary when authenticating
self.client.tenant_id
if self.flow_run_api_key
else ""
@@ -236,6 +229,8 @@ def populate_env_vars(self, flow_run: GraphQLResult) -> dict:
"PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS": "prefect.engine.cloud.CloudTaskRunner",
# Backwards compatibility variable for containers on Prefect <0.15.0
"PREFECT__LOGGING__LOG_TO_CLOUD": str(self.log_to_cloud).lower(),
+ # Backwards compatibility variable for containers on Prefect <1.0.0
+ "PREFECT__CLOUD__AUTH_TOKEN": self.flow_run_api_key or "",
}
)
return env
diff --git a/src/prefect/backend/__init__.py b/src/prefect/backend/__init__.py
index 6758991ea2d8..a4fbbe6c0861 100644
--- a/src/prefect/backend/__init__.py
+++ b/src/prefect/backend/__init__.py
@@ -10,3 +10,19 @@
update_link_artifact,
update_markdown_artifact,
)
+
+__all__ = [
+ "FlowRunView",
+ "FlowView",
+ "TaskRunView",
+ "TenantView",
+ "create_link_artifact",
+ "create_markdown_artifact",
+ "delete_artifact",
+ "delete_key",
+ "get_key_value",
+ "list_keys",
+ "set_key_value",
+ "update_link_artifact",
+ "update_markdown_artifact",
+]
diff --git a/src/prefect/backend/execution.py b/src/prefect/backend/execution.py
index 556e232f7287..ba5d6052a4e5 100644
--- a/src/prefect/backend/execution.py
+++ b/src/prefect/backend/execution.py
@@ -198,14 +198,7 @@ def execute_flow_run(
flow_run_id=flow_run_id,
message="Failed to execute flow: {exc}",
):
- if flow_metadata.run_config is not None:
- runner_cls(flow=flow).run(**run_kwargs)
-
- # Support for deprecated `flow.environment` use
- else:
- environment = flow.environment
- environment.setup(flow)
- environment.execute(flow)
+ runner_cls(flow=flow).run(**run_kwargs)
# Get the final state
flow_run = flow_run.get_latest()
@@ -268,12 +261,8 @@ def generate_flow_run_environ(
# Pass authentication through
client = prefect.Client() # Instantiate a client to get the current API key
env["PREFECT__CLOUD__API_KEY"] = run_api_key or client.api_key or ""
- # Backwards compat for auth tokens
- env["PREFECT__CLOUD__AUTH_TOKEN"] = (
- run_api_key
- or prefect.config.cloud.agent.get("auth_token")
- or prefect.config.cloud.get("auth_token")
- )
+ # Backwards compat for auth tokens (only useful for containers)
+ env["PREFECT__CLOUD__AUTH_TOKEN"] = run_api_key or client.api_key or ""
# Add context information for the run
env.update(
diff --git a/src/prefect/backend/flow.py b/src/prefect/backend/flow.py
index 92d957da3b75..00bb4d9e9794 100644
--- a/src/prefect/backend/flow.py
+++ b/src/prefect/backend/flow.py
@@ -23,8 +23,6 @@ class FlowView:
This object is designed to be an immutable view of the data stored in the Prefect
backend API at the time it is created
- EXPERIMENTAL: This interface is experimental and subject to change
-
Args:
- flow_id: The uuid of the flow
- settings: A dict of flow settings
diff --git a/src/prefect/backend/flow_run.py b/src/prefect/backend/flow_run.py
index b53af64b9dbf..8a6a4e6cb708 100644
--- a/src/prefect/backend/flow_run.py
+++ b/src/prefect/backend/flow_run.py
@@ -20,8 +20,6 @@
def stream_flow_run_logs(flow_run_id: str) -> None:
"""
Basic wrapper for `watch_flow_run` to print the logs of the run
-
- EXPERIMENTAL: This interface is experimental and subject to change
"""
for log in watch_flow_run(flow_run_id):
level_name = logging.getLevelName(log.level)
@@ -44,8 +42,6 @@ def watch_flow_run(
If both stream_states and stream_logs are `False` then this will just block until
the flow run finishes.
- EXPERIMENTAL: This interface is experimental and subject to change
-
Args:
- flow_run_id: The flow run to watch
- stream_states: If set, flow run state changes will be streamed as logs
@@ -180,8 +176,6 @@ def check_for_compatible_agents(labels: Iterable[str], since_minutes: int = 1) -
- There are no healthy agents at all and no unhealthy agents with matching labels
- There are healthy agents but no healthy or unhealthy agent has matching labels
- EXPERIMENTAL: This interface is experimental and subject to change
-
Args:
- labels: A set of labels; typically associated with a flow run
- since_minutes: The amount of time in minutes to allow an agent to be idle and
@@ -285,8 +279,6 @@ def check_for_compatible_agents(labels: Iterable[str], since_minutes: int = 1) -
class FlowRunLog(NamedTuple):
"""
Small wrapper for backend log objects
-
- EXPERIMENTAL: This interface is experimental and subject to change
"""
timestamp: pendulum.DateTime
@@ -333,8 +325,6 @@ class FlowRunView:
the latest data for that task will be pulled since they are loaded lazily. Finished
task runs will be cached in this object to reduce the amount of network IO.
- EXPERIMENTAL: This interface is experimental and subject to change
-
Args:
- flow_run_id: The uuid of the flow run
- name: The name of the flow run
diff --git a/src/prefect/backend/task_run.py b/src/prefect/backend/task_run.py
index fa60770dd66a..08e3dcefeddb 100644
--- a/src/prefect/backend/task_run.py
+++ b/src/prefect/backend/task_run.py
@@ -21,8 +21,6 @@ class TaskRunView:
This object is designed to be an immutable view of the data stored in the Prefect
backend API at the time it is created.
- EXPERIMENTAL: This interface is experimental and subject to change
-
Args:
- task_run_id: The task run uuid
- task_id: The uuid of the task associated with this task run
diff --git a/src/prefect/backend/tenant.py b/src/prefect/backend/tenant.py
index 9d78d51c4010..1b3843683337 100644
--- a/src/prefect/backend/tenant.py
+++ b/src/prefect/backend/tenant.py
@@ -13,8 +13,6 @@ class TenantView:
This object is designed to be an immutable view of the data stored in the Prefect
backend API at the time it is created
- EXPERIMENTAL: This interface is experimental and subject to change
-
Args:
- tenant_id: The uuid of the tenant
- name: The name of the tenant
diff --git a/src/prefect/cli/__init__.py b/src/prefect/cli/__init__.py
index 2aa0a3e3be79..c0092930c923 100644
--- a/src/prefect/cli/__init__.py
+++ b/src/prefect/cli/__init__.py
@@ -42,7 +42,7 @@ def cli():
agent Manage agents
create Create objects
delete Delete objects
- execute Execute a flow's environment
+ execute Execute a flow run
run Run a flow
register Register flows with an API
heartbeat Send heartbeats for a run
@@ -130,3 +130,6 @@ def backend(api):
backend_util.save_backend(api)
click.secho("Backend switched to {}".format(api), fg="green")
+
+
+__all__ = ["backend_util"]
diff --git a/src/prefect/cli/agent.py b/src/prefect/cli/agent.py
index 1ab9d60b35ee..8bb2e495d799 100644
--- a/src/prefect/cli/agent.py
+++ b/src/prefect/cli/agent.py
@@ -79,12 +79,6 @@
"environment."
),
),
- click.option(
- "--token",
- "-t",
- required=False,
- help="A Prefect Cloud API token with RUNNER scope. DEPRECATED.",
- ),
]
@@ -113,11 +107,6 @@
multiple=True,
help="Environment variables to set on each submitted flow run.",
),
- click.option(
- "--token",
- "-t",
- help="A Prefect Cloud API token with RUNNER scope. DEPRECATED.",
- ),
click.option(
"--agent-config-id",
help="An agent ID to link this agent instance with",
@@ -125,11 +114,10 @@
]
-def start_agent(agent_cls, token, api, label, env, log_level, key, tenant_id, **kwargs):
+def start_agent(agent_cls, api, label, env, log_level, key, tenant_id, **kwargs):
labels = sorted(set(label))
env_vars = dict(e.split("=", 1) for e in env)
tmp_config = {
- "cloud.agent.auth_token": token or config.cloud.agent.auth_token,
"cloud.api_key": key or config.cloud.api_key,
"cloud.tenant_id": tenant_id or config.cloud.tenant_id,
"cloud.agent.level": log_level or config.cloud.agent.level,
@@ -250,33 +238,19 @@ def docker():
"(e.g. `--network network1 --network network2`)"
),
)
-@click.option(
- "--no-docker-interface",
- default=None,
- is_flag=True,
- help=(
- "Disable the check of a Docker interface on this machine. "
- "Note: This is mostly relevant for some Docker-in-Docker "
- "setups that users may be running their agent with. "
- "DEPRECATED."
- ),
-)
@click.option(
"--docker-client-timeout",
default=None,
type=int,
help="The timeout to use for docker API calls, defaults to 60 seconds.",
)
-def start(volumes, no_docker_interface, **kwargs):
+def start(volumes, **kwargs):
"""Start a docker agent"""
from prefect.agent.docker import DockerAgent
start_agent(
DockerAgent,
volumes=list(volumes),
- docker_interface=(
- not no_docker_interface if no_docker_interface is not None else None
- ),
**kwargs,
)
@@ -361,48 +335,6 @@ def install(label, env, **kwargs):
click.echo(deployment)
-#################
-# Fargate Agent #
-#################
-
-
-def warn_fargate_deprecated():
- click.secho(
- "Warning: The Fargate agent is deprecated, please transition to using the ECS agent instead",
- fg="yellow",
- err=True,
- )
-
-
-@agent.group()
-def fargate():
- """Manage Prefect Fargate agents (DEPRECATED).
-
- The Fargate agent is deprecated, please transition to using the ECS agent instead.
- """
-
-
-@fargate.command(
- context_settings=dict(ignore_unknown_options=True, allow_extra_args=True)
-)
-@add_options(COMMON_START_OPTIONS)
-@click.pass_context
-def start(ctx, **kwargs):
- """Start a Fargate agent (DEPRECATED)
-
- The Fargate agent is deprecated, please transition to using the ECS agent instead.
- """
- from prefect.agent.fargate import FargateAgent
-
- warn_fargate_deprecated()
-
- for item in ctx.args:
- k, v = item.replace("--", "").split("=", 1)
- kwargs[k] = v
-
- start_agent(FargateAgent, _called_from_cli=True, **kwargs)
-
-
#############
# ECS Agent #
#############
diff --git a/src/prefect/cli/auth.py b/src/prefect/cli/auth.py
index 4d9e1e051eeb..1935c4707f6c 100644
--- a/src/prefect/cli/auth.py
+++ b/src/prefect/cli/auth.py
@@ -1,8 +1,10 @@
import click
import os
+import shutil
import pendulum
from click.exceptions import Abort
from tabulate import tabulate
+from pathlib import Path
from prefect import Client, config
from prefect.exceptions import AuthorizationError, ClientError
@@ -10,7 +12,12 @@
from prefect.backend import TenantView
+# For deleting authentication tokens which have been replaced with API keys
+AUTH_TOKEN_SETTINGS_PATH = Path(f"{config.home_dir}/client").expanduser()
+
+
def check_override_auth_token():
+ # Exists for purging old tokens only
if config.cloud.get("auth_token"):
if os.environ.get("PREFECT__CLOUD__AUTH_TOKEN"):
click.secho(
@@ -55,9 +62,6 @@ def auth():
create-key Create an API key
list-keys List details of existing API keys
revoke-key Delete an API key from the backend
- create-token Create an API token (DEPRECATED)
- list-tokens List the names and ids of existing API tokens (DEPRECATED)
- revoke-token Delete an API token from the backend (DEPRECATED)
\bExamples:
@@ -90,13 +94,8 @@ def auth():
"-k",
help="A Prefect Cloud API key.",
)
-@click.option(
- "--token",
- "-t",
- help="A Prefect Cloud API token. DEPRECATED.",
-)
@handle_terminal_error
-def login(key, token):
+def login(key):
"""
Login to Prefect Cloud
@@ -112,39 +111,25 @@ def login(key, token):
this key for all interaction with the API but frequently overrides can be passed to
individual commands or functions. To remove your key from disk, see
`prefect auth logout`.
-
- This command has backwards compatibility support for API tokens, which are a
- deprecated form of authentication with Prefect Cloud
"""
- if not key and not token:
- raise TerminalError("You must supply an API key or token!")
-
- if key and token:
- raise TerminalError("You cannot supply both an API key and token")
+ if not key:
+ raise TerminalError("You must supply an API key!")
abort_on_config_api_key(
"To log in with the CLI, remove the config key `prefect.cloud.api_key`"
)
- # Attempt to treat the input like an API key even if it is passed as a token
# Ignore any tenant id that has been previously set via login
- client = Client(api_key=key or token)
+ client = Client(api_key=key)
client._tenant_id = None
try:
tenant_id = client._get_auth_tenant()
except AuthorizationError:
- if key: # We'll catch an error again later if using a token
- raise TerminalError("Unauthorized. Invalid Prefect Cloud API key.")
+ raise TerminalError("Unauthorized. Invalid Prefect Cloud API key.")
except ClientError:
raise TerminalError("Error attempting to communicate with Prefect Cloud.")
else:
- if token:
- click.secho(
- "WARNING: You logged in with an API key using the `--token` flag "
- "which is deprecated. Please use `--key` instead.",
- fg="yellow",
- )
client.tenant_id = tenant_id
client.save_auth_to_disk()
tenant = TenantView.from_tenant_id(tenant_id)
@@ -154,62 +139,6 @@ def login(key, token):
)
return
- # If there's not a tenant id, we've been given an actual token, fallthrough to
- # the backwards compatibility token auth
-
- # Backwards compatibility for tokens
- if token:
- check_override_auth_token()
- client = Client(api_token=token)
-
- # Verify they're not also using an API key
- if client.api_key:
- raise TerminalError(
- "You have already logged in with an API key and cannot use a token."
- )
-
- click.secho(
- "WARNING: API tokens are deprecated. Please create an API key and use "
- "`prefect auth login --key ` to login instead.",
- fg="yellow",
- )
-
- # Verify login obtained a valid api token
- try:
- output = client.graphql(
- query={"query": {"user": {"default_membership": "tenant_id"}}}
- )
-
- # Log into default membership
- success_login = client.login_to_tenant(
- tenant_id=output.data.user[0].default_membership.tenant_id
- )
-
- if not success_login:
- raise AuthorizationError
-
- except AuthorizationError:
- click.secho(
- "Error attempting to use the given API token. "
- "Please check that you are providing a USER scoped Personal Access Token "
- "and consider switching API key.",
- fg="red",
- )
- return
- except ClientError:
- click.secho(
- "Error attempting to communicate with Prefect Cloud. "
- "Please check that you are providing a USER scoped Personal Access Token "
- "and consider switching API key.",
- fg="red",
- )
- return
-
- # save token
- client.save_api_token()
-
- click.secho("Login successful!", fg="green")
-
@auth.command(hidden=True)
@click.option(
@@ -250,45 +179,34 @@ def logout(token):
click.secho("Logged out of Prefect Cloud", fg="green")
- elif client._api_token:
-
- check_override_auth_token()
- tenant_id = client.active_tenant_id
-
- if not tenant_id:
- click.confirm(
- "Are you sure you want to log out of Prefect Cloud? "
- "This will remove your API token from this machine.",
- default=False,
- abort=True,
- )
+ else:
+ raise TerminalError(
+ "You are not logged in to Prefect Cloud. "
+ "Use `prefect auth login` to log in first."
+ )
- # Remove the token from local storage by writing blank settings
- client._save_local_settings({})
- click.secho("Logged out of Prefect Cloud", fg="green")
- else:
- # Log out of the current tenant (dropping the access token) while retaining
- # the API token. This is backwards compatible behavior. Running the logout
- # command twice will remove the token from storage entirely
- click.confirm(
- "Are you sure you want to log out of your current Prefect Cloud tenant?",
- default=False,
- abort=True,
- )
+@auth.command()
+def purge_tokens():
+ check_override_auth_token()
- client.logout_from_tenant()
+ if not AUTH_TOKEN_SETTINGS_PATH.exists():
+ click.secho(
+ "The deprecated authentication tokens settings path "
+ f"'{AUTH_TOKEN_SETTINGS_PATH}' has already been removed."
+ )
- click.secho(
- f"Logged out from tenant {tenant_id}. Run `prefect auth logout` again "
- "to delete your API token.",
- fg="green",
- )
else:
- raise TerminalError(
- "You are not logged in to Prefect Cloud. "
- "Use `prefect auth login` to log in first."
+ confirm = click.confirm(
+ "Are you sure you want to delete the deprecated authentication token "
+ f"settings folder '{AUTH_TOKEN_SETTINGS_PATH}'?"
)
+ if not confirm:
+ print("Aborted!")
+ return
+
+ shutil.rmtree(AUTH_TOKEN_SETTINGS_PATH)
+ print("Removed!")
@auth.command(hidden=True)
@@ -358,146 +276,27 @@ def switch_tenants(id, slug, default):
client = Client()
- # Deprecated API token check
if not client.api_key:
- check_override_auth_token()
-
- if default:
- raise TerminalError(
- "The default tenant flag can only be used with API keys."
- )
-
- else: # Using an API key
- if default:
- # Clear the set tenant on disk
- client.tenant_id = None
- client.save_auth_to_disk()
- click.secho(
- "Tenant restored to the default tenant for your API key: "
- f"{client._get_auth_tenant()}",
- fg="green",
- )
- return
+ raise TerminalError("You are not logged in!")
- login_success = client.login_to_tenant(tenant_slug=slug, tenant_id=id)
- if not login_success:
- raise TerminalError("Unable to switch tenant!")
-
- # `login_to_tenant` will write to disk if using an API token, if using an API key
- # we will write to disk manually here
- if client.api_key:
+ if default:
+ # Clear the set tenant on disk
+ client.tenant_id = None
client.save_auth_to_disk()
-
- click.secho(f"Tenant switched to {client.tenant_id}", fg="green")
-
-
-@auth.command(hidden=True)
-@click.option("--name", "-n", required=True, help="A token name.", hidden=True)
-@click.option("--scope", "-s", required=True, help="A token scopre.", hidden=True)
-def create_token(name, scope):
- """
- DEPRECATED. Please use API keys instead.
-
- Create a Prefect Cloud API token.
-
- For more info on API tokens visit https://docs.prefect.io/orchestration/concepts/api.html
-
- \b
- Options:
- --name, -n TEXT A name to give the generated token
- --scope, -s TEXT A scope for the token
- """
- click.secho(
- "WARNING: API tokens are deprecated. Please use `prefect auth create-key` to "
- "create an API key instead.",
- fg="yellow",
- err=True, # Write to stderr in case the user is piping
- )
-
- client = Client()
-
- output = client.graphql(
- query={
- "mutation($input: create_api_token_input!)": {
- "create_api_token(input: $input)": {"token"}
- }
- },
- variables=dict(input=dict(name=name, scope=scope)),
- )
-
- if not output.get("data", None):
- click.secho("Issue creating API token", fg="red")
- return
-
- click.echo(output.data.create_api_token.token)
-
-
-@auth.command(hidden=True)
-def list_tokens():
- """
- DEPRECATED. Please use API keys instead.
-
- List your available Prefect Cloud API tokens.
- """
- click.secho(
- "WARNING: API tokens are deprecated. Please consider removing your remaining "
- "tokens and using API keys instead.",
- fg="yellow",
- err=True, # Write to stderr in case the user is piping
- )
-
- client = Client()
- output = client.graphql(query={"query": {"api_token": {"id", "name"}}})
-
- if not output.get("data", None):
- click.secho("Unable to list API tokens", fg="red")
- return
-
- tokens = []
- for item in output.data.api_token:
- tokens.append([item.name, item.id])
-
- click.echo(
- tabulate(
- tokens,
- headers=["NAME", "ID"],
- tablefmt="plain",
- numalign="left",
- stralign="left",
+ click.secho(
+ "Tenant restored to the default tenant for your API key: "
+ f"{client._get_auth_tenant()}",
+ fg="green",
)
- )
-
-
-@auth.command(hidden=True)
-@click.option("--id", "-i", required=True, help="A token ID.", hidden=True)
-def revoke_token(id):
- """
- DEPRECATED. Please use API keys instead.
-
- Revote a Prefect Cloud API token
-
- \b
- Options:
- --id, -i TEXT The id of a token to revoke
- """
- check_override_auth_token()
-
- client = Client()
-
- output = client.graphql(
- query={
- "mutation($input: delete_api_token_input!)": {
- "delete_api_token(input: $input)": {"success"}
- }
- },
- variables=dict(input=dict(token_id=id)),
- )
-
- if not output.get("data", None) or not output.data.delete_api_token.success:
- click.secho("Unable to revoke token with ID {}".format(id), fg="red")
return
- click.secho("Token successfully revoked", fg="green")
+ try:
+ tenant_id = client.switch_tenant(tenant_slug=slug, tenant_id=id)
+ except AuthorizationError:
+ raise TerminalError("Unauthorized. Your API key is not valid for that tenant.")
+
+ client.save_auth_to_disk()
+ click.secho(f"Tenant switched to {tenant_id}", fg="green")
@auth.command(hidden=True)
@@ -672,17 +471,32 @@ def status():
except Exception as exc:
click.echo(f"Your authentication is not working: {exc}")
- if client._api_token:
+ if AUTH_TOKEN_SETTINGS_PATH.exists():
click.secho(
- "You are logged in with an API token. These have been deprecated in favor "
- "of API keys."
- + (
- " Since you have set an API key as well, this will be ignored."
- if client.api_key
- else ""
- ),
+ "The authentication tokens settings path still exists. These have been "
+ "removed in favor of API keys. We recommend purging old tokens with "
+ "`prefect auth purge-tokens`",
fg="yellow",
)
- if not client._api_token and not client.api_key:
- click.secho("You are not logged in!", fg="yellow")
+ if config.cloud.get("auth_token"):
+ if os.environ.get("PREFECT__CLOUD__AUTH_TOKEN"):
+ click.secho(
+ "An authentication token is set via environment variable. "
+ "These have been removed in favor of API keys and the variable will be "
+ "ignored. We recommend unsetting the 'PREFECT__CLOUD__AUTH_TOKEN' key",
+ fg="yellow",
+ )
+ else:
+ click.secho(
+ "An authentication token is set via the prefect config file. "
+ "These have been removed in favor of API keys and the setting will be "
+ "ignored. We recommend removing the 'prefect.cloud.auth_token' key",
+ fg="yellow",
+ )
+
+ if not client.api_key:
+ click.secho(
+ "You are not logged in! Use `prefect auth login` to login with an API key.",
+ fg="yellow",
+ )
diff --git a/src/prefect/cli/build_register.py b/src/prefect/cli/build_register.py
index ffdbbb16c06c..796fbda0bb3d 100644
--- a/src/prefect/cli/build_register.py
+++ b/src/prefect/cli/build_register.py
@@ -19,7 +19,6 @@
from click.exceptions import ClickException
import prefect
-from prefect.utilities.storage import extract_flow_from_file
from prefect.utilities.filesystems import read_bytes_from_path, parse_path
from prefect.utilities.graphql import with_args, EnumValue, compress
from prefect.utilities.importtools import import_object
@@ -296,25 +295,20 @@ def prepare_flows(flows: "List[FlowLike]", labels: List[str] = None) -> None:
for flow in flows:
if isinstance(flow, dict):
# Add any extra labels to the flow
- if flow.get("environment"):
- new_labels = set(flow["environment"].get("labels") or []).union(labels)
- flow["environment"]["labels"] = sorted(new_labels)
- else:
- new_labels = set(flow["run_config"].get("labels") or []).union(labels)
- flow["run_config"]["labels"] = sorted(new_labels)
+ new_labels = set(flow["run_config"].get("labels") or []).union(labels)
+ flow["run_config"]["labels"] = sorted(new_labels)
else:
# Set the default flow result if not specified
if not flow.result:
flow.result = flow.storage.result
# Add a `run_config` if not configured explicitly
- if flow.run_config is None and flow.environment is None:
+ if flow.run_config is None:
flow.run_config = UniversalRun()
# Add any extra labels to the flow (either specified via the CLI,
# or from the storage object).
- obj = flow.run_config or flow.environment
- obj.labels.update(labels)
- obj.labels.update(flow.storage.labels)
+ flow.run_config.labels.update(labels)
+ flow.run_config.labels.update(flow.storage.labels)
# Add the flow to storage
flow.storage.add_flow(flow)
@@ -762,26 +756,15 @@ def watch_for_changes(
),
default=True,
)
-@click.pass_context
@handle_terminal_error
def register(
- ctx, project, paths, modules, json_paths, names, labels, force, watch, schedule
+ project, paths, modules, json_paths, names, labels, force, watch, schedule
):
"""Register one or more flows into a project.
Flows with unchanged metadata will be skipped as registering again will only
change the version number.
"""
- # Since the old command was a subcommand of this, we have to do some
- # mucking to smoothly deprecate it. Can be removed with `prefect register
- # flow` is removed.
- if ctx.invoked_subcommand is not None:
- if any([project, paths, modules, names, labels, force]):
- raise ClickException(
- "Got unexpected extra argument (%s)" % ctx.invoked_subcommand
- )
- return
-
if project is None:
raise ClickException("Missing required option '--project'")
@@ -987,65 +970,3 @@ def build(paths, modules, names, labels, output, update):
# Exit with appropriate status code
if errored:
raise TerminalError
-
-
-@register.command(hidden=True)
-@click.option(
- "--file",
- "-f",
- required=True,
- help="A file that contains a flow",
- hidden=True,
- default=None,
- type=click.Path(exists=True),
-)
-@click.option(
- "--name",
- "-n",
- required=False,
- help="The `flow.name` to pull out of the file provided",
- hidden=True,
- default=None,
-)
-@click.option(
- "--project",
- "-p",
- required=False,
- help="The name of a Prefect project to register this flow",
- hidden=True,
- default=None,
-)
-@click.option(
- "--label",
- "-l",
- required=False,
- help="A label to set on the flow, extending any existing labels.",
- hidden=True,
- multiple=True,
-)
-@click.option(
- "--skip-if-flow-metadata-unchanged",
- is_flag=True,
- help="Skips registration if flow metadata is unchanged",
- hidden=True,
-)
-def flow(file, name, project, label, skip_if_flow_metadata_unchanged):
- """Register a flow (DEPRECATED)"""
- # Deprecated in 0.14.13
- click.secho(
- (
- "Warning: `prefect register flow` is deprecated, please transition to "
- "using `prefect register` instead."
- ),
- fg="yellow",
- )
- # Don't run extra `run` and `register` functions inside file
- file_path = os.path.abspath(file)
- with prefect.context({"loading_flow": True, "local_script_path": file_path}):
- flow = extract_flow_from_file(file_path=file_path, flow_name=name)
-
- idempotency_key = (
- flow.serialized_hash() if skip_if_flow_metadata_unchanged else None
- )
-
- flow.register(project_name=project, labels=label, idempotency_key=idempotency_key)
diff --git a/src/prefect/cli/execute.py b/src/prefect/cli/execute.py
index 62decbd1bbc6..ebfb0318b3b9 100644
--- a/src/prefect/cli/execute.py
+++ b/src/prefect/cli/execute.py
@@ -73,15 +73,11 @@ def flow_run():
flow = storage.get_flow(flow_data.name)
with prefect.context(secrets=secrets):
- if flow_data.run_config is not None:
- runner_cls = get_default_flow_runner_class()
- runner_cls(flow=flow).run()
- else:
- environment = flow.environment
- environment.setup(flow)
- environment.execute(flow)
+ runner_cls = get_default_flow_runner_class()
+ runner_cls(flow=flow).run()
+
except Exception as exc:
- msg = "Failed to load and execute Flow's environment: {}".format(repr(exc))
+ msg = "Failed to load and execute flow run: {}".format(repr(exc))
state = prefect.engine.state.Failed(message=msg)
client.set_flow_run_state(flow_run_id=flow_run_id, state=state)
client.write_run_logs(
diff --git a/src/prefect/cli/run.py b/src/prefect/cli/run.py
index 9ceb1b7ef873..6b726e853aac 100644
--- a/src/prefect/cli/run.py
+++ b/src/prefect/cli/run.py
@@ -12,7 +12,6 @@
import click
from click import ClickException
-from tabulate import tabulate
import prefect
from prefect.backend.flow import FlowView
@@ -24,7 +23,6 @@
log_exception,
)
from prefect.client import Client
-from prefect.utilities.graphql import EnumValue, with_args
from prefect.utilities.importtools import import_object
from prefect.utilities.logging import temporary_logger_config
@@ -346,7 +344,6 @@ def cast_value(value: str) -> Any:
@click.group(invoke_without_command=True, epilog=RUN_EPILOG)
-@click.pass_context
# Flow lookup settings -----------------------------------------------------------------
@click.option(
"--id",
@@ -484,7 +481,6 @@ def cast_value(value: str) -> Any:
)
@handle_terminal_error
def run(
- ctx,
flow_or_group_id,
project,
path,
@@ -504,17 +500,6 @@ def run(
watch,
):
"""Run a flow"""
- # Since the old command was a subcommand of this, we have to do some
- # mucking to smoothly deprecate it. Can be removed with `prefect run flow`
- # is removed.
- if ctx.invoked_subcommand is not None:
- if any([params, no_logs, quiet, flow_or_group_id]):
- # These options are not supported by `prefect run flow`
- raise ClickException(
- "Got unexpected extra argument (%s)" % ctx.invoked_subcommand
- )
- return
-
# Define a simple function so we don't have to have a lot of `if not quiet` logic
quiet_echo = (
(lambda *_, **__: None)
@@ -788,308 +773,3 @@ def run(
else:
quiet_echo(f"Flow run is in unexpected state: {flow_run.state}", fg="yellow")
sys.exit(1)
-
-
-# DEPRECATED: prefect run flow ---------------------------------------------------------
-
-
-@run.command("flow", hidden=True)
-@click.option("--id", help="The UUID of a flow to run.", default=None)
-@click.option(
- "--version-group-id",
- required=False,
- help="The id of a flow version group to run.",
- hidden=True,
-)
-@click.option(
- "--name", "-n", required=False, help="The name of a flow to run.", hidden=True
-)
-@click.option(
- "--project",
- "-p",
- required=False,
- help="The project that contains the flow.",
- hidden=True,
-)
-@click.option("--version", "-v", type=int, help="A flow version to run.", hidden=True)
-@click.option(
- "--parameters-file",
- "-pf",
- help="A parameters JSON file.",
- hidden=True,
- type=click.Path(exists=True),
-)
-@click.option(
- "--parameters-string", "-ps", help="A parameters JSON string.", hidden=True
-)
-@click.option("--run-name", "-rn", help="A name to assign for this run.", hidden=True)
-@click.option("--context", "-c", help="A context JSON string.", hidden=True)
-@click.option(
- "--watch",
- "-w",
- is_flag=True,
- help="Watch current state of the flow run.",
- hidden=True,
-)
-@click.option(
- "--label",
- "labels",
- help="A list of labels to apply to the flow run",
- hidden=True,
- multiple=True,
-)
-@click.option(
- "--logs", "-l", is_flag=True, help="Live logs of the flow run.", hidden=True
-)
-@click.option(
- "--no-url",
- is_flag=True,
- help="Only output flow run id instead of link.",
- hidden=True,
-)
-def run_flow(
- id,
- version_group_id,
- name,
- project,
- version,
- parameters_file,
- parameters_string,
- run_name,
- context,
- watch,
- labels,
- logs,
- no_url,
-):
- """
- Run a flow that is registered to the Prefect API
-
- DEPRECATED: Use `prefect run` instead of `prefect run flow`
-
- \b
- Options:
- --id, -i TEXT The ID of a flow to run
- --version-group-id TEXT The ID of a flow version group to run
- --name, -n TEXT The name of a flow to run
- --project, -p TEXT The name of a project that contains the flow
- --version, -v INTEGER A flow version to run
- --parameters-file, -pf FILE PATH A filepath of a JSON file containing
- parameters
- --parameters-string, -ps TEXT A string of JSON parameters (note: to ensure these are
- parsed correctly, it is best to include the full payload
- within single quotes)
- --run-name, -rn TEXT A name to assign for this run
- --context, -c TEXT A string of JSON key / value pairs to include in context
- (note: to ensure these are parsed correctly, it is best
- to include the full payload within single quotes)
- --watch, -w Watch current state of the flow run, stream
- output to stdout
- --label TEXT Set labels on the flow run; use multiple times to set
- multiple labels.
- --logs, -l Get logs of the flow run, stream output to
- stdout
- --no-url Only output the flow run id instead of a
- link
-
- \b
- Either `id`, `version-group-id`, or both `name` and `project` must be provided to run a flow.
-
- \b
- If both `--parameters-file` and `--parameters-string` are provided then the values
- passed in through the string will override the values provided from the file.
-
- \b
- e.g.
- File contains: {"a": 1, "b": 2}
- String: '{"a": 3}'
- Parameters passed to the flow run: {"a": 3, "b": 2}
-
- \b
- Example:
- $ prefect run flow -n "Test-Flow" -p "My Project" -ps '{"my_param": 42}'
- Flow Run: https://cloud.prefect.io/myslug/flow-run/2ba3rrfd-411c-4d99-bb2a-f64a6dea78f9
- """
- if not id and not (name and project) and not version_group_id:
- click.secho(
- "A flow ID, version group ID, or a combination of flow name and project must be provided.",
- fg="red",
- )
- return
-
- if sum(map(bool, (id, version_group_id, name))) != 1:
- click.secho(
- "Only one of flow ID, version group ID, or a name/project combination can be provided.",
- fg="red",
- )
- return
-
- if watch and logs:
- click.secho(
- "Streaming state and logs not currently supported together.", fg="red"
- )
- return
-
- if labels == ():
- labels = None
-
- client = Client()
- flow_id = id
- if not flow_id and not version_group_id:
- where_clause = {
- "_and": {
- "name": {"_eq": name},
- "version": {"_eq": version},
- "project": {"name": {"_eq": project}},
- }
- }
-
- query = {
- "query": {
- with_args(
- "flow",
- {
- "where": where_clause,
- "order_by": {
- "name": EnumValue("asc"),
- "version": EnumValue("desc"),
- },
- "distinct_on": EnumValue("name"),
- },
- ): {"id": True}
- }
- }
-
- result = client.graphql(query)
-
- flow_data = result.data.flow
-
- if flow_data:
- flow_id = flow_data[0].id
- else:
- click.secho("{} not found".format(name), fg="red")
- return
-
- # Load parameters from file if provided
- file_params = {}
- if parameters_file:
- with open(parameters_file) as params_file:
- file_params = json.load(params_file)
-
- # Load parameters from string if provided
- string_params = {}
- if parameters_string:
- string_params = json.loads(parameters_string)
-
- if context:
- context = json.loads(context)
- flow_run_id = client.create_flow_run(
- flow_id=flow_id,
- version_group_id=version_group_id,
- context=context,
- labels=labels,
- parameters={**file_params, **string_params},
- run_name=run_name,
- )
-
- if no_url:
- click.echo("Flow Run ID: {}".format(flow_run_id))
- else:
- flow_run_url = client.get_cloud_url("flow-run", flow_run_id)
- click.echo("Flow Run: {}".format(flow_run_url))
-
- if watch:
- current_states = []
- while True:
- query = {
- "query": {
- with_args("flow_run_by_pk", {"id": flow_run_id}): {
- with_args(
- "states",
- {"order_by": {EnumValue("timestamp"): EnumValue("asc")}},
- ): {"state": True, "timestamp": True}
- }
- }
- }
-
- result = client.graphql(query)
-
- # Filter through retrieved states and output in order
- for state_index in result.data.flow_run_by_pk.states:
- state = state_index.state
- if state not in current_states:
- if state != "Success" and state != "Failed":
- click.echo("{} -> ".format(state), nl=False)
- else:
- click.echo(state)
- return flow_run_id
-
- current_states.append(state)
-
- time.sleep(3)
-
- if logs:
- all_logs = []
-
- log_query = {
- with_args(
- "logs", {"order_by": {EnumValue("timestamp"): EnumValue("asc")}}
- ): {"timestamp": True, "message": True, "level": True},
- "start_time": True,
- "state": True,
- }
-
- query = {
- "query": {
- with_args(
- "flow_run",
- {
- "where": {"id": {"_eq": flow_run_id}},
- "order_by": {EnumValue("start_time"): EnumValue("desc")},
- },
- ): log_query
- }
- }
-
- while True:
- result = client.graphql(query)
-
- flow_run = result.data.flow_run
- if not flow_run:
- click.secho("{} not found".format(flow_run_id), fg="red")
- return
-
- new_run = flow_run[0]
- logs = new_run.logs
- output = []
-
- for i in logs:
- if [i.timestamp, i.level, i.message] not in all_logs:
-
- if not len(all_logs):
- click.echo(
- tabulate(
- [[i.timestamp, i.level, i.message]],
- headers=["TIMESTAMP", "LEVEL", "MESSAGE"],
- tablefmt="plain",
- numalign="left",
- stralign="left",
- )
- )
- all_logs.append([i.timestamp, i.level, i.message])
- continue
-
- output.append([i.timestamp, i.level, i.message])
- all_logs.append([i.timestamp, i.level, i.message])
-
- if output:
- click.echo(
- tabulate(output, tablefmt="plain", numalign="left", stralign="left")
- )
-
- if new_run.state == "Success" or new_run.state == "Failed":
- return flow_run_id
-
- time.sleep(3)
-
- return flow_run_id
diff --git a/src/prefect/client/__init__.py b/src/prefect/client/__init__.py
index a3f9778f13c9..cfc3887dd935 100644
--- a/src/prefect/client/__init__.py
+++ b/src/prefect/client/__init__.py
@@ -1,2 +1,4 @@
from prefect.client.client import Client
from prefect.client.secrets import Secret
+
+__all__ = ["Client", "Secret"]
diff --git a/src/prefect/client/client.py b/src/prefect/client/client.py
index f968237744f1..22907b580243 100644
--- a/src/prefect/client/client.py
+++ b/src/prefect/client/client.py
@@ -1,6 +1,5 @@
import datetime
import json
-import os
import random
import re
import time
@@ -12,11 +11,10 @@
Any,
Dict,
List,
+ Mapping,
NamedTuple,
Optional,
Union,
- cast,
- Mapping,
)
from urllib.parse import urljoin, urlparse
@@ -32,26 +30,28 @@
from slugify import slugify
import prefect
-from prefect.run_configs import RunConfig
from prefect.exceptions import (
AuthorizationError,
ClientError,
VersionLockMismatchSignal,
ObjectNotFoundError,
)
+from prefect.run_configs import RunConfig
from prefect.utilities.graphql import (
EnumValue,
GraphQLResult,
compress,
+ format_graphql_request_error,
parse_graphql,
with_args,
- format_graphql_request_error,
)
from prefect.utilities.logging import create_diagnostic_logger
if TYPE_CHECKING:
- from prefect.core import Flow
import requests
+
+ from prefect.core import Flow
+
JSONLike = Union[bool, dict, list, str, int, float, None]
# type definitions for GraphQL results
@@ -90,8 +90,7 @@ class Client:
Client for communication with Prefect Cloud
If the arguments aren't specified the client initialization first checks the prefect
- configuration and if the server is not set there it checks the current context. The
- token will only be present in the current context.
+ configuration and if the server is not set there it checks the current context.
Args:
- api_server (str, optional): the URL to send all GraphQL requests to; if not
@@ -103,11 +102,6 @@ class Client:
- tenant_id (str, optional): the Prefect tenant to use. If not provided, loaded
from `config.cloud.tenant_id` or the on disk cache from the
`prefect auth` CLI
- - api_token (str, optional): a Prefect Cloud API token, taken from
- `config.cloud.auth_token` if not provided. If this token is USER-scoped, it may
- be used to log in to any tenant that the user is a member of. In that case,
- ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself
- will be used as authorization. DEPRECATED; use `api_key` instead.
"""
def __init__(
@@ -115,11 +109,7 @@ def __init__(
api_server: str = None,
api_key: str = None,
tenant_id: str = None,
- api_token: str = None,
):
- self._access_token = None
- self._refresh_token = None
- self._access_token_expires_at = pendulum.now()
self._attached_headers = {} # type: Dict[str, str]
self.logger = create_diagnostic_logger("Diagnostics")
@@ -128,8 +118,7 @@ def __init__(
# Note the default is `cloud.api` which is `cloud.endpoint` or `server.endpoint`
# depending on the value of the `backend` key
- # This must be set before `load_auth_from_disk()` can be called but if no API
- # key is found this will default to a different value for backwards compat
+ # This must be set before `load_auth_from_disk()` can be called
self.api_server = api_server or prefect.context.config.cloud.api
# Load the API key
@@ -140,9 +129,13 @@ def __init__(
or cached_auth.get("api_key")
)
+ # mypy struggles with this attribute type if not created here
+ self._tenant_id: Optional[str] = None
+
# Load the tenant id
- self._tenant_id: Optional[str] = (
- tenant_id
+ # This assignment is validated by the `Client.tenant_id` setter
+ self.tenant_id: Optional[str] = (
+ tenant_id # type: ignore
or prefect.context.config.cloud.get("tenant_id")
or cached_auth.get("tenant_id")
)
@@ -150,50 +143,6 @@ def __init__(
# If not set at this point, when `Client.tenant_id` is accessed the default
# tenant will be loaded and used for future requests.
- # Backwards compatibility for API tokens ---------------------------------------
-
- self._api_token = api_token or prefect.context.config.cloud.get("auth_token")
-
- if (
- not self.api_key
- and not api_server
- and prefect.context.config.backend == "cloud"
- ):
- # The default value for the `api_server` changed for API keys but we want
- # to load API tokens from the correct backwards-compatible location on disk
- self.api_server = prefect.config.cloud.graphql
-
- if (
- not self.api_key
- and not self._api_token
- and prefect.config.backend == "cloud"
- ):
- # If not using an API key and a token has not been passed or set in the
- # config, attempt to load an API token from disk
- self._init_tenant()
-
- if self._api_token and not self.api_key:
- warnings.warn(
- "Client was created with an API token configured for authentication. "
- "API tokens are deprecated, please use API keys instead."
- )
-
- # Warn if using both a token and API key, but only if they have different values
- # because we pass the api key as an api token in some places for backwards
- # compatibility
- if self._api_token and self.api_key and self._api_token != self.api_key:
- warnings.warn(
- "Found both an API token and an API key. API tokens have been "
- "deprecated and it will be ignored in favor of the API key."
- + (
- # If they did not pass one explicitly, we can tell them how to fix
- # this in the config
- " Remove the token from the config at `prefect.config.auth_token`"
- if not api_token
- else ""
- )
- )
-
# API key authentication -----------------------------------------------------------
def _get_auth_tenant(self) -> str:
@@ -317,21 +266,13 @@ def tenant_id(self) -> str:
If it is has not been explicitly set, the default tenant id will be retrieved
"""
- if prefect.config.backend == "cloud":
- if self._api_token and not self.api_key:
- # Backwards compatibility for API tokens
- if not self._tenant_id:
- self._init_tenant()
-
- # Should be set by `_init_tenant()` but we will not guarantee it
- return self._tenant_id # type: ignore
-
- if not self._tenant_id:
+ if not self._tenant_id:
+ if prefect.config.backend == "cloud":
self._tenant_id = self._get_auth_tenant()
-
- elif prefect.config.backend == "server":
- if not self._tenant_id:
+ elif prefect.config.backend == "server":
self._tenant_id = self._get_default_server_tenant()
+ else:
+ raise ValueError(f"Unknown backend setting {prefect.config.backend!r}")
if not self._tenant_id:
raise ClientError(
@@ -342,8 +283,20 @@ def tenant_id(self) -> str:
return self._tenant_id
@tenant_id.setter
- def tenant_id(self, tenant_id: str) -> None:
- self._tenant_id = tenant_id
+ def tenant_id(self, tenant_id: Union[str, uuid.UUID, None]) -> None:
+ if tenant_id is None:
+ self._tenant_id = None
+ return
+
+ if not isinstance(tenant_id, uuid.UUID):
+ try:
+ tenant_id = uuid.UUID(tenant_id)
+ except ValueError as exc:
+ raise ValueError(
+ f"The `tenant_id` must be a valid UUID. Got {tenant_id!r}."
+ ) from exc
+
+ self._tenant_id = str(tenant_id)
# ----------------------------------------------------------------------------------
@@ -364,8 +317,10 @@ def create_tenant(self, name: str, slug: str = None) -> str:
- ValueError: if run against Prefect Cloud
"""
if prefect.config.backend != "server":
- msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/"
- raise ValueError(msg)
+ raise ValueError(
+ "To create a tenant with Prefect Cloud, please signup at "
+ "https://cloud.prefect.io/"
+ )
if slug is None:
slug = slugify(name)
@@ -389,11 +344,11 @@ def get(
server: str = None,
headers: dict = None,
params: Dict[str, JSONLike] = None,
- token: str = None,
+ api_key: str = None,
retry_on_api_error: bool = True,
) -> dict:
"""
- Convenience function for calling the Prefect API with token auth and GET request
+ Convenience function for calling the Prefect API with auth and GET request
Args:
- path (str): the path of the API url. For example, to GET
@@ -402,7 +357,7 @@ def get(
defaults to `self.api_server`
- headers (dict, optional): Headers to pass with the request
- params (dict): GET parameters
- - token (str): an auth token. If not supplied, the `client.access_token` is used.
+ - api_key (str): An api key for auth. Defaults to `client.api_key`.
- retry_on_api_error (bool): whether the operation should be retried if the API returns
an API_ERROR code
@@ -415,7 +370,7 @@ def get(
params=params,
server=server,
headers=headers,
- token=token,
+ api_key=api_key,
retry_on_api_error=retry_on_api_error,
)
if response.text:
@@ -429,11 +384,11 @@ def post(
server: str = None,
headers: dict = None,
params: Dict[str, JSONLike] = None,
- token: str = None,
+ api_key: str = None,
retry_on_api_error: bool = True,
) -> dict:
"""
- Convenience function for calling the Prefect API with token auth and POST request
+ Convenience function for calling the Prefect API with auth and POST request
Args:
- path (str): the path of the API url. For example, to POST
@@ -442,7 +397,7 @@ def post(
defaults to `self.api_server`
- headers(dict): headers to pass with the request
- params (dict): POST parameters
- - token (str): an auth token. If not supplied, the `client.access_token` is used.
+ - api_key (str): An api key for auth. Defaults to `client.api_key`.
- retry_on_api_error (bool): whether the operation should be retried if the API returns
an API_ERROR code
@@ -455,7 +410,7 @@ def post(
params=params,
server=server,
headers=headers,
- token=token,
+ api_key=api_key,
retry_on_api_error=retry_on_api_error,
)
if response.text:
@@ -463,65 +418,13 @@ def post(
else:
return {}
- def _init_tenant(self) -> None:
- """
- Init the tenant to contact the server.
-
- If your backend is set to cloud the tenant will be read from: $HOME/.prefect/settings.toml.
-
- For the server backend it will try to retrieve the default tenant. If the server is
- protected with auth like BasicAuth do not forget to `attach_headers` before any call.
-
- DEPRECATED.
- - API keys no longer need to log in and out of a tenant
- - The tenant is now set at __init__ or in the `tenant_id` property
- """
- if prefect.config.backend == "cloud":
- # if no api token was passed, attempt to load state from local storage
- settings = self._load_local_settings()
-
- if not self._api_token:
- self._api_token = settings.get("api_token")
- if self._api_token:
- self._tenant_id = settings.get("active_tenant_id")
-
- # Must refer to private variable since the property calls this function
- if self._tenant_id:
- try:
- self.login_to_tenant(tenant_id=self._tenant_id)
- except AuthorizationError:
- # Either the token is invalid _or_ it is not USER scoped. Try
- # pulling the correct tenant id from the API
- try:
- result = self.graphql({"query": {"tenant": {"id"}}})
- tenants = result["data"]["tenant"]
- # TENANT or RUNNER scoped tokens should have a single tenant
- if len(tenants) != 1:
- raise ValueError(
- "Failed to authorize with Prefect Cloud. "
- f"Could not log in to tenant {self._tenant_id!r}. "
- f"Found available tenants: {tenants}"
- )
- self._tenant_id = tenants[0].id
- except AuthorizationError:
- # On failure, we've just been given an invalid token and should
- # delete the auth information from disk
- self.logout_from_tenant()
-
- # This code should now be superceded by the `tenant_id` property but will remain
- # here for backwards compat until API tokens are removed entirely
- else:
- tenant_info = self.graphql({"query": {"tenant": {"id"}}})
- if tenant_info.data.tenant:
- self._tenant_id = tenant_info.data.tenant[0].id
-
def graphql(
self,
query: Any,
raise_on_error: bool = True,
headers: Dict[str, str] = None,
variables: Mapping[str, JSONLike] = None,
- token: str = None,
+ api_key: str = None,
retry_on_api_error: bool = True,
) -> GraphQLResult:
"""
@@ -536,7 +439,7 @@ def graphql(
request
- variables (dict): Variables to be filled into a query with the key being
equivalent to the variables that are accepted by the query
- - token (str): an auth token. If not supplied, the `client.access_token` is used.
+ - api_key (str): An api key for auth. Defaults to `client.api_key`.
- retry_on_api_error (bool): whether the operation should be retried if the API returns
an API_ERROR code
@@ -551,7 +454,7 @@ def graphql(
server=self.api_server,
headers=headers,
params=dict(query=parse_graphql(query), variables=json.dumps(variables)),
- token=token,
+ api_key=api_key,
retry_on_api_error=retry_on_api_error,
)
@@ -672,7 +575,7 @@ def _request(
params: Dict[str, JSONLike] = None,
server: str = None,
headers: dict = None,
- token: str = None,
+ api_key: str = None,
retry_on_api_error: bool = True,
) -> "requests.models.Response":
"""
@@ -685,7 +588,7 @@ def _request(
- server (str, optional): The server to make requests against, base API
server is used if not specified
- headers (dict, optional): Headers to pass with the request
- - token (str): an auth token. If not supplied, the `client.access_token` is used.
+ - api_key (str): An api key for auth. Defaults to `client.api_key`.
- retry_on_api_error (bool): whether the operation should be retried if the API returns
an API_ERROR code
@@ -693,7 +596,7 @@ def _request(
- requests.models.Response: The response returned from the request
Raises:
- - ClientError: if the client token is not in the context (due to not being logged in)
+ - ClientError: on bad responses from the API
- ValueError: if a method is specified outside of the accepted GET, POST, DELETE
- requests.HTTPError: if a status code is returned that is not `200` or `401`
"""
@@ -701,8 +604,7 @@ def _request(
server = self.api_server
assert isinstance(server, str) # mypy assert
- if token is None:
- token = self.get_auth_token()
+ api_key = api_key or self.api_key
# 'import requests' is expensive time-wise, we should do this just-in-time to keep
# the 'import prefect' time low
@@ -713,8 +615,8 @@ def _request(
params = params or {}
headers = headers or {}
- if token:
- headers["Authorization"] = "Bearer {}".format(token)
+ if api_key:
+ headers["Authorization"] = "Bearer {}".format(api_key)
if self.api_key and self._tenant_id:
# Attach a tenant id to the headers if using an API key since it can be
@@ -781,256 +683,69 @@ def attach_headers(self, headers: dict) -> None:
"""
self._attached_headers.update(headers)
- # API Token Authentication ---------------------------------------------------------
- # This is all deprecated and slated for removal in 0.16.0 when API token support is
- # dropped
-
- @property
- def _api_token_settings_path(self) -> Path:
- """
- Returns the local settings directory corresponding to the current API servers
- when using an API token
-
- DEPRECATED: API keys have replaced API tokens. API keys are stored in a new
- location. See `_auth_file`.
- """
- path = "{home}/client/{server}".format(
- home=prefect.context.config.home_dir,
- server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"),
- )
- return Path(os.path.expanduser(path)) / "settings.toml"
-
- @property
- def active_tenant_id(self) -> Optional[str]:
- """
- DEPRECATED: This retains an old property used by API tokens. `tenant_id` is the
- new implementation.
- """
- return self.tenant_id
-
- def _save_local_settings(self, settings: dict) -> None:
- """
- Writes settings to local storage
-
- DEPRECATED: API keys have replaced API tokens. API keys are stored in a new
- location. See `save_auth_to_disk`
- """
- self._api_token_settings_path.parent.mkdir(exist_ok=True, parents=True)
- with self._api_token_settings_path.open("w+") as f:
- toml.dump(settings, f)
-
- def _load_local_settings(self) -> dict:
- """
- Loads settings from local storage
-
- DEPRECATED: API keys have replaced API tokens. API keys are stored in a new
- location. See `load_auth_from_disk`
- """
- if self._api_token_settings_path.exists():
- with self._api_token_settings_path.open("r") as f:
- return toml.load(f) # type: ignore
- return {}
-
- def save_api_token(self) -> None:
- """
- Saves the API token in local storage.
-
- DEPRECATED: API keys have replaced API tokens. API keys are stored in a new
- location. See `save_auth_to_disk`
+ def switch_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> str:
"""
- settings = self._load_local_settings()
- settings["api_token"] = self._api_token
- self._save_local_settings(settings)
+ Switch this client to the given tenant by slug or tenant id.
- def get_auth_token(self) -> str:
- """
- Returns an auth token:
- - if there's an API key, return that immediately
- - if no explicit access token is stored, returns the api token
- - if there is an access token:
- - if there's a refresh token and the access token expires in the next 30 seconds,
- then we refresh the access token and store the result
- - return the access token
-
-
- DEPRECATED: API keys have replaced API tokens. We no longer need this refresh
- logic for API keys.
-
- Returns:
- - str: the access token
- """
- if self.api_key:
- return self.api_key
-
- if not self._access_token:
- return self._api_token # type: ignore
-
- expiration = self._access_token_expires_at or pendulum.now()
- if self._refresh_token and pendulum.now().add(seconds=30) > expiration:
- self._refresh_access_token()
-
- return self._access_token
-
- def get_available_tenants(self) -> List[Dict]:
- """
- Returns a list of available tenants.
-
- NOTE: this should only be called by users who have provided a USER-scoped API token.
-
- Returns:
- - List[Dict]: a list of dictionaries containing the id, slug, and name of
- available tenants
- """
- result = self.graphql(
- {"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}},
- # API keys can see all available tenants. If not using an API key, we can't
- # use the access token which is scoped to a single tenant
- token=self.api_key or self._api_token,
- )
- return result.data.tenant # type: ignore
-
- def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool:
- """
- Log in to a specific tenant
-
- If using an API key, the client tenant will be updated but will not be saved to
- disk without an explicit call.
-
- If using an API token, it must be USER-scoped API token. The client tenant will
- be updated and the new tenant will be saved to disk for future clients.
+ The client tenant will be updated but will not be saved to disk without an
+ explicit call.
Args:
- tenant_slug (str): the tenant's slug
- tenant_id (str): the tenant's id
Returns:
- - bool: True if the login was successful
+ - str: The id of the tenant
Raises:
- - ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided
- - ValueError: if the `tenant_id` is not a valid UUID
- ValueError: if no matching tenants are found
+ - ValueError: both slug and id are provided
+ - AuthenticationError: if the key is not valid for the given tenant
"""
# Validate the given tenant id -------------------------------------------------
- if tenant_slug is None and tenant_id is None:
+ if tenant_slug and tenant_id:
raise ValueError(
- "At least one of `tenant_slug` or `tenant_id` must be provided."
+ "Received both `tenant_slug` and `tenant_id`; only one is allowed."
)
- elif tenant_id:
- # TODO: Consider removing this check. This would be caught by GraphQL
- try:
- uuid.UUID(tenant_id)
- except ValueError as exc:
- raise ValueError("The `tenant_id` must be a valid UUID.") from exc
-
- tenant = self.graphql(
- {
- "query($slug: String, $id: uuid)": {
- "tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"}
- }
- },
- variables=dict(slug=tenant_slug, id=tenant_id),
- # API keys can see all available tenants. If not using an API key, we can't
- # use the access token which is scoped to a single tenant
- token=self.api_key or self._api_token,
- )
- if not tenant.data.tenant:
- raise ValueError("No matching tenant found.")
- # We may have been given just the slug so set the id
- tenant_id = tenant.data.tenant[0].id
-
- # Update the tenant the client is using ----------------------------------------
- self._tenant_id = tenant_id
-
- # Backwards compatibility for API tokens ---------------------------------------
- # - Get a new access token for the tenant
- # - Save it to disk
-
- if not self.api_key and prefect.config.backend == "cloud":
- payload = self.graphql(
+ if tenant_slug:
+ tenant = self.graphql(
{
- "mutation($input: switch_tenant_input!)": {
- "switch_tenant(input: $input)": {
- "access_token",
- "expires_at",
- "refresh_token",
- }
+ "query($slug: String)": {
+ "tenant(where: {slug: { _eq: $slug } })": {"id"}
}
},
- variables=dict(input=dict(tenant_id=tenant_id)),
- # Use the API token to switch tenants
- token=self._api_token,
- )
- self._access_token = payload.data.switch_tenant.access_token
- self._access_token_expires_at = cast(
- pendulum.DateTime, pendulum.parse(payload.data.switch_tenant.expires_at)
+ variables=dict(slug=tenant_slug),
)
- self._refresh_token = payload.data.switch_tenant.refresh_token
+ if not tenant.data.tenant:
+ raise ValueError(f"No matching tenant found for slug {tenant_slug!r}.")
- # Save the tenant setting to disk
- settings = self._load_local_settings()
- settings["active_tenant_id"] = self.tenant_id
- self._save_local_settings(settings)
+ tenant_id = tenant.data.tenant[0].id
- return True
+ if not tenant_id:
+ raise ValueError("A `tenant_id` or `tenant_slug` must be provided.")
- def logout_from_tenant(self) -> None:
- """
- DEPRECATED: API keys have replaced API tokens.
-
- Logout can be accomplished for API keys with:
- ```
- client = Client()
- client.api_key = ""
- client._tenant_id = ""
- client.save_auth_to_disk()
- ```
- """
- self._access_token = None
- self._refresh_token = None
- self._tenant_id = None
+ self.tenant_id = tenant_id
+ self._get_auth_tenant()
- # remove the tenant setting
- settings = self._load_local_settings()
- settings["active_tenant_id"] = None
- self._save_local_settings(settings)
+ return self.tenant_id
- def _refresh_access_token(self) -> bool:
+ def get_available_tenants(self) -> List[Dict]:
"""
- Refresh the client's JWT access token.
-
- NOTE: this should only be called by users who have provided a USER-scoped API token.
-
- DEPRECATED: API keys have replaced API tokens
+ Returns a list of available tenants.
Returns:
- - bool: True if the refresh succeeds
+ - List[Dict]: a list of dictionaries containing the id, slug, and name of
+ available tenants
"""
- payload = self.graphql(
- {
- "mutation($input: refresh_token_input!)": {
- "refresh_token(input: $input)": {
- "access_token",
- "expires_at",
- "refresh_token",
- }
- }
- },
- variables=dict(input=dict(access_token=self._access_token)),
- # pass the refresh token as the auth header
- token=self._refresh_token,
- ) # type: ignore
- self._access_token = payload.data.refresh_token.access_token # type: ignore
- self._access_token_expires_at = pendulum.parse( # type: ignore
- payload.data.refresh_token.expires_at # type: ignore
- ) # type: ignore
- self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore
-
- return True
+ result = self.graphql(
+ {"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}},
+ api_key=self.api_key,
+ )
+ return result.data.tenant # type: ignore
# -------------------------------------------------------------------------
# Actions
@@ -1053,7 +768,7 @@ def register(
Args:
- flow (Flow): a flow to register
- project_name (str, optional): the project that should contain this flow.
- - build (bool, optional): if `True`, the flow's environment is built
+ - build (bool, optional): if `True`, the flow's storage is built
prior to serialization; defaults to `True`
- set_schedule_active (bool, optional): if `False`, will set the schedule to
inactive in the database to prevent auto-scheduling runs (if the Flow has a
@@ -1132,19 +847,6 @@ def register(
serialized_flow = flow.serialize(build=build) # type: Any
- # Configure environment.metadata (if using environment-based flows)
- if flow.environment is not None:
- # Set Docker storage image in environment metadata if provided
- if isinstance(flow.storage, prefect.storage.Docker):
- flow.environment.metadata["image"] = flow.storage.name
- serialized_flow = flow.serialize(build=False)
-
- # If no image ever set, default metadata to image on current version
- if not flow.environment.metadata.get("image"):
- version = prefect.__version__.split("+")[0]
- flow.environment.metadata["image"] = f"prefecthq/prefect:{version}"
- serialized_flow = flow.serialize(build=False)
-
# verify that the serialized flow can be deserialized
try:
prefect.serialization.flow.FlowSchema().load(serialized_flow)
@@ -1267,8 +969,6 @@ def register(
# Extra information to improve visibility
if flow.run_config is not None:
labels = sorted(flow.run_config.labels)
- elif flow.environment is not None:
- labels = sorted(flow.environment.labels)
else:
labels = []
msg = (
@@ -1280,15 +980,13 @@ def register(
return flow_id
- def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str:
+ def get_cloud_url(self, subdirectory: str, id: str) -> str:
"""
Convenience method for creating Prefect Cloud URLs for a given subdirectory.
Args:
- subdirectory (str): the subdirectory to use (e.g., `"flow-run"`)
- id (str): the ID of the page
- - as_user (bool, optional): whether this query is being made from a USER scoped token;
- defaults to `True`. Only used internally for queries made from RUNNERs
Returns:
- str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory
@@ -1307,10 +1005,7 @@ def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str
# Search for matching cloud API because we can't guarantee that the backend config is set
using_cloud_api = ".prefect.io" in prefect.config.cloud.api
- # Only use the "old" `as_user` logic if using an api token
- tenant_slug = self.get_default_tenant_slug(
- as_user=(as_user and using_cloud_api and self._api_token is not None)
- )
+ tenant_slug = self.get_default_tenant_slug()
# For various API versions parse out `api-` for direct UI link
base_url = (
@@ -1325,43 +1020,24 @@ def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str
return "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id])
- def get_default_tenant_slug(self, as_user: bool = False) -> str:
+ def get_default_tenant_slug(self) -> str:
"""
Get the default tenant slug for the currently authenticated user
- Args:
- - as_user (bool, optional):
- whether this query is being made from a USER scoped token;
- defaults to `False`. Only relevant when using an API token.
-
Returns:
- str: the slug of the current default tenant for this user
"""
- if as_user:
- query = {
- "query": {"user": {"default_membership": {"tenant": "slug"}}}
- } # type: dict
- else:
- query = {"query": {"tenant": {"id", "slug"}}}
-
- res = self.graphql(query)
+ res = self.graphql({"query": {"tenant": {"id", "slug"}}})
- if as_user:
- user = res.get("data").user[0]
- slug = user.default_membership.tenant.slug
- else:
- tenants = res["data"]["tenant"]
- for tenant in tenants:
- # Return the slug if it matches the current tenant id OR if there is no
- # current tenant id we are using a RUNNER API token so we'll return
- # the first (and only) tenant
- if tenant.id == self.tenant_id or self.tenant_id is None:
- return tenant.slug
- raise ValueError(
- f"Failed to find current tenant {self.tenant_id!r} in result {res}"
- )
+ tenants = res["data"]["tenant"]
+ for tenant in tenants:
+ # Return the slug if it matches the current tenant id
+ if tenant.id == self.tenant_id:
+ return tenant.slug
- return slug
+ raise ValueError(
+ f"Failed to find current tenant {self.tenant_id!r} in result {res}"
+ )
def create_project(self, project_name: str, project_description: str = None) -> str:
"""
diff --git a/src/prefect/configuration.py b/src/prefect/configuration.py
index d2e9aa7b7226..a55b6cf8fbe3 100644
--- a/src/prefect/configuration.py
+++ b/src/prefect/configuration.py
@@ -1,7 +1,6 @@
import datetime
import os
import re
-import warnings
from ast import literal_eval
from typing import Optional, Union, cast, Iterable
@@ -351,16 +350,6 @@ def load_configuration(
return config
-def warn_on_deprecated_config_keys(config: Config) -> None:
- # logging.log_to_cloud: Deprecated in 0.14.20
- # Only warn if they've tried to disable logging using this key
- if "log_to_cloud" in config.logging and config.logging.log_to_cloud is False:
- warnings.warn(
- "`prefect.logging.log_to_cloud` is deprecated. "
- "Please use `prefect.cloud.send_flow_run_logs` instead."
- )
-
-
def load_default_config() -> "Config":
# load prefect configuration
config = load_configuration(
@@ -373,9 +362,6 @@ def load_default_config() -> "Config":
# add task defaults
config = process_task_defaults(config)
- # handle deprecations
- warn_on_deprecated_config_keys(config)
-
return config
diff --git a/src/prefect/core/__init__.py b/src/prefect/core/__init__.py
index ef60e55c7af4..3e597435fcf4 100644
--- a/src/prefect/core/__init__.py
+++ b/src/prefect/core/__init__.py
@@ -2,3 +2,5 @@
from prefect.core.parameter import Parameter
from prefect.core.edge import Edge
from prefect.core.flow import Flow
+
+__all__ = ["Edge", "Flow", "Parameter", "Task"]
diff --git a/src/prefect/core/flow.py b/src/prefect/core/flow.py
index 0db8bb9fbb1a..f28a44bf2c57 100644
--- a/src/prefect/core/flow.py
+++ b/src/prefect/core/flow.py
@@ -40,7 +40,6 @@
from prefect.executors import Executor
from prefect.engine.result import Result
from prefect.engine.state import State
-from prefect.environments import Environment
from prefect.storage import Storage, get_default_storage_class
from prefect.run_configs import RunConfig, UniversalRun
from prefect.utilities import diagnostics, logging
@@ -118,8 +117,6 @@ def my_task():
- executor (prefect.executors.Executor, optional): The executor that the flow
should use. If `None`, the default executor configured in the runtime environment
will be used.
- - environment (prefect.environments.Environment, optional, DEPRECATED): The environment
- that the flow should be run in.
- run_config (prefect.run_configs.RunConfig, optional): The runtime
configuration to use when deploying this flow.
- storage (prefect.storage.Storage, optional): The unit of storage
@@ -158,7 +155,6 @@ def __init__(
name: str,
schedule: prefect.schedules.Schedule = None,
executor: Executor = None,
- environment: Environment = None,
run_config: RunConfig = None,
storage: Storage = None,
tasks: Iterable[Task] = None,
@@ -181,7 +177,6 @@ def __init__(
self.logger = logging.get_logger(self.name)
self.schedule = schedule
self.executor = executor
- self.environment = environment
self.run_config = run_config
self.storage = storage
self.result = result
@@ -1459,7 +1454,7 @@ def serialize(self, build: bool = False) -> dict:
Creates a serialized representation of the flow.
Args:
- - build (bool, optional): if `True`, the flow's environment is built
+ - build (bool, optional): if `True`, the flow's storage is built
prior to serialization
Returns:
@@ -1514,7 +1509,7 @@ def serialized_hash(self, build: bool = False) -> str:
if a new version is not registered with the server.
Args:
- - build (bool, optional): if `True`, the flow's environment is built
+ - build (bool, optional): if `True`, the flow's storage is built
prior to serialization. Passed through to `Flow.serialize()`.
Returns:
@@ -1620,8 +1615,6 @@ def run_agent(
with set_temporary_config(temp_config):
if self.run_config is not None:
labels = list(self.run_config.labels or ())
- elif self.environment is not None:
- labels = list(self.environment.labels or ())
else:
labels = []
agent = prefect.agent.local.LocalAgent(
@@ -1646,9 +1639,9 @@ def register(
Args:
- project_name (str, optional): the project that should contain this flow.
- - build (bool, optional): if `True`, the flow's environment is built
+ - build (bool, optional): if `True`, the flow's storage is built
prior to serialization; defaults to `True`
- - labels (List[str], optional): a list of labels to add to this Flow's environment;
+ - labels (List[str], optional): a list of labels to add to this Flow;
useful for associating Flows with individual Agents; see
http://docs.prefect.io/orchestration/agents/overview.html#labels
- set_schedule_active (bool, optional): if `False`, will set the schedule to
@@ -1690,33 +1683,16 @@ def register(
)
return None
- if (
- self.environment is not None
- and self.run_config is None
- and self.executor is not None
- ):
- warnings.warn(
- "This flow is using the deprecated `flow.environment` based configuration, "
- "but has `flow.executor` set.\n\n"
- "This executor will be *not* be used at runtime.\n\n"
- "Please transition to the `flow.run_config` based system instead to "
- "make use of setting `flow.executor`. "
- "See https://docs.prefect.io/orchestration/flow_config/overview.html "
- "for more information.",
- stacklevel=2,
- )
-
if self.storage is None:
self.storage = get_default_storage_class()(**kwargs)
- if self.environment is None and self.run_config is None:
+ if self.run_config is None:
self.run_config = UniversalRun()
- # add auto-labels for various types of storage
- for obj in [self.environment, self.run_config]:
- if obj is not None:
- obj.labels.update(self.storage.labels)
- obj.labels.update(labels or ())
+ # Add run config labels from storage
+ if self.run_config is not None:
+ self.run_config.labels.update(self.storage.labels)
+ self.run_config.labels.update(labels or ())
# register the flow with a default result if one not provided
if not self.result:
diff --git a/src/prefect/core/task.py b/src/prefect/core/task.py
index a96496be7f45..3e82c8bcb8db 100644
--- a/src/prefect/core/task.py
+++ b/src/prefect/core/task.py
@@ -1310,16 +1310,3 @@ def __le__(self, other: object) -> "Task":
for p in inspect.Signature.from_callable(Task.__call__).parameters.values()
if p.kind == inspect.Parameter.KEYWORD_ONLY
]
-
-
-# DEPRECATED - this is to allow backwards-compatible access to Parameters
-# https://github.com/PrefectHQ/prefect/pull/2758
-from .parameter import Parameter as _Parameter
-
-
-class Parameter(_Parameter):
- def __new__(cls, *args, **kwargs): # type: ignore
- warnings.warn(
- "`Parameter` has moved, please import as `prefect.Parameter`", stacklevel=2
- )
- return super().__new__(cls)
diff --git a/src/prefect/engine/__init__.py b/src/prefect/engine/__init__.py
index 5d406f72c655..b0dc7ca40941 100644
--- a/src/prefect/engine/__init__.py
+++ b/src/prefect/engine/__init__.py
@@ -76,3 +76,6 @@ def get_default_task_runner_class() -> type:
return prefect.engine.task_runner.TaskRunner
else:
return config_value
+
+
+__all__ = ["FlowRunner", "TaskRunner"]
diff --git a/src/prefect/engine/cloud/__init__.py b/src/prefect/engine/cloud/__init__.py
index 33d633727ef1..5152d131afe4 100644
--- a/src/prefect/engine/cloud/__init__.py
+++ b/src/prefect/engine/cloud/__init__.py
@@ -1,2 +1,4 @@
from prefect.engine.cloud.task_runner import CloudTaskRunner
from prefect.engine.cloud.flow_runner import CloudFlowRunner
+
+__all__ = ["CloudFlowRunner", "CloudTaskRunner"]
diff --git a/src/prefect/engine/executors/__init__.py b/src/prefect/engine/executors/__init__.py
index 2b13f715a5fd..829f01e325c5 100644
--- a/src/prefect/engine/executors/__init__.py
+++ b/src/prefect/engine/executors/__init__.py
@@ -1,3 +1,5 @@
from prefect.executors.base import Executor
from prefect.engine.executors.dask import DaskExecutor, LocalDaskExecutor
from prefect.engine.executors.local import LocalExecutor
+
+__all__ = ["DaskExecutor", "Executor", "LocalDaskExecutor", "LocalExecutor"]
diff --git a/src/prefect/engine/result/__init__.py b/src/prefect/engine/result/__init__.py
index b04207509e37..e2c379581619 100644
--- a/src/prefect/engine/result/__init__.py
+++ b/src/prefect/engine/result/__init__.py
@@ -5,3 +5,5 @@
"""
import prefect
from prefect.engine.result.base import Result, NoResult, NoResultType
+
+__all__ = ["NoResult", "NoResultType", "Result"]
diff --git a/src/prefect/engine/results/__init__.py b/src/prefect/engine/results/__init__.py
index 1ef6a404f35b..7e58e22fcfbc 100644
--- a/src/prefect/engine/results/__init__.py
+++ b/src/prefect/engine/results/__init__.py
@@ -39,3 +39,13 @@ def my_example_task():
from prefect.engine.results.azure_result import AzureResult
from prefect.engine.results.s3_result import S3Result
from prefect.engine.results.secret_result import SecretResult
+
+__all__ = [
+ "AzureResult",
+ "ConstantResult",
+ "GCSResult",
+ "LocalResult",
+ "PrefectResult",
+ "S3Result",
+ "SecretResult",
+]
diff --git a/src/prefect/environments/__init__.py b/src/prefect/environments/__init__.py
deleted file mode 100644
index eebb1f96f64d..000000000000
--- a/src/prefect/environments/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from prefect.environments.execution import (
- Environment,
- DaskKubernetesEnvironment,
- FargateTaskEnvironment,
- KubernetesJobEnvironment,
- LocalEnvironment,
- DaskCloudProviderEnvironment,
-)
diff --git a/src/prefect/environments/execution/__init__.py b/src/prefect/environments/execution/__init__.py
deleted file mode 100644
index cf99ad9c7199..000000000000
--- a/src/prefect/environments/execution/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-"""
-Execution environments encapsulate the logic for where your Flow should execute in Prefect Cloud.
-
-DEPRECATED: Environment based configuration is deprecated, please transition to
-configuring `flow.run_config` instead of `flow.environment`. See
-https://docs.prefect.io/orchestration/flow_config/overview.html for more info.
-"""
-from prefect.environments.execution.base import Environment, load_and_run_flow
-from prefect.environments.execution.dask import DaskKubernetesEnvironment
-from prefect.environments.execution.dask import DaskCloudProviderEnvironment
-from prefect.environments.execution.fargate import FargateTaskEnvironment
-from prefect.environments.execution.k8s import KubernetesJobEnvironment
-from prefect.environments.execution.local import LocalEnvironment
diff --git a/src/prefect/environments/execution/base.py b/src/prefect/environments/execution/base.py
deleted file mode 100644
index 432e49b321d5..000000000000
--- a/src/prefect/environments/execution/base.py
+++ /dev/null
@@ -1,176 +0,0 @@
-"""
-Environments are JSON-serializable objects that fully describe how to run a flow. Serialization
-schemas are contained in `prefect.serialization.environment.py`.
-
-Different Environment objects correspond to different computation environments. Environments
-that are written on top of a type of infrastructure also define how to set up and execute
-that environment. e.g. the `DaskKubernetesEnvironment` is an environment which
-runs a flow on Kubernetes using the `dask-kubernetes` library.
-"""
-
-import warnings
-from typing import Callable, Iterable, TYPE_CHECKING
-
-import prefect
-from prefect.client import Client
-from prefect.utilities import logging
-from prefect.utilities.graphql import with_args
-
-if TYPE_CHECKING:
- from prefect.core.flow import Flow # pylint: disable=W0611
-
-
-class Environment:
- """
- Base class for Environments.
-
- DEPRECATED: Environment based configuration is deprecated, please transition to
- configuring `flow.run_config` instead of `flow.environment`. See
- https://docs.prefect.io/orchestration/flow_config/overview.html for more info.
-
- An environment is an object that can be instantiated in a way that makes it possible to
- call `environment.setup()` to stand up any required static infrastructure and
- `environment.execute()` to execute the flow inside this environment.
-
- The `setup` and `execute` functions of an environment require a Prefect Flow object.
-
- Args:
- - labels (List[str], optional): a list of labels, which are arbitrary string
- identifiers used by Prefect Agents when polling for work
- - on_start (Callable, optional): a function callback which will be called before the
- flow begins to run
- - on_exit (Callable, optional): a function callback which will be called after the flow
- finishes its run
- - metadata (dict, optional): extra metadata to be set and serialized on this environment
- """
-
- def __init__(
- self,
- labels: Iterable[str] = None,
- on_start: Callable = None,
- on_exit: Callable = None,
- metadata: dict = None,
- ) -> None:
- self.labels = set(labels) if labels else set()
- self.on_start = on_start
- self.on_exit = on_exit
- self.metadata = metadata or {}
- self.logger = logging.get_logger(type(self).__name__)
- warnings.warn(
- "`Environment` based flow configuration is deprecated, please transition to configuring "
- "`flow.run_config` instead of `flow.environment`. "
- "See https://docs.prefect.io/orchestration/flow_config/overview.html for more info.",
- stacklevel=2 if type(self) is Environment else 3,
- )
-
- def __repr__(self) -> str:
- return "".format(type(self).__name__)
-
- @property
- def dependencies(self) -> list:
- return []
-
- def setup(self, flow: "Flow") -> None:
- """
- Sets up any infrastructure needed for this environment
-
- Args:
- - flow (Flow): the Flow object
- """
-
- def execute(self, flow: "Flow") -> None:
- """
- Executes the flow for this environment.
-
- Args:
- - flow (Flow): the Flow object
- """
-
- def serialize(self) -> dict:
- """
- Returns a serialized version of the Environment
-
- Returns:
- - dict: the serialized Environment
- """
- schema = prefect.serialization.environment.EnvironmentSchema()
- return schema.dump(self)
-
-
-class _RunMixin:
- """This mixin will go away when all environments share the same run
- implementation.
-
- For now this is just to share code between a few of the environments"""
-
- def run(self, flow: "Flow") -> None:
- """
- Run the flow using this environment.
-
- Args:
- - flow (Flow): the flow object
- """
- assert isinstance(self, Environment) # mypy
- if self.on_start:
- self.on_start()
-
- try:
- from prefect.engine import get_default_flow_runner_class
-
- runner_cls = get_default_flow_runner_class()
- runner_cls(flow=flow).run(executor=self.executor) # type: ignore
- except Exception as exc:
- self.logger.exception(
- "Unexpected error raised during flow run: {}".format(exc)
- )
- raise exc
- finally:
- if self.on_exit:
- self.on_exit()
-
-
-def load_and_run_flow() -> None:
- """
- Loads a flow (and the corresponding environment), then runs the flow with
- the environment.
-
- This is useful for environments whose `execute` method schedules a job that
- later needs to run the flow.
-
- Raises:
- - ValueError: if no `flow_run_id` is found in context
- """
- logger = logging.get_logger("Environment")
- try:
- flow_run_id = prefect.context.get("flow_run_id")
-
- if not flow_run_id:
- raise ValueError("No flow run ID found in context.")
-
- query = {
- "query": {
- with_args("flow_run", {"where": {"id": {"_eq": flow_run_id}}}): {
- "flow": {"name": True, "storage": True},
- }
- }
- }
-
- client = Client()
- result = client.graphql(query)
- flow_run = result.data.flow_run[0]
-
- flow_data = flow_run.flow
- storage_schema = prefect.serialization.storage.StorageSchema()
- storage = storage_schema.load(flow_data.storage)
-
- # populate global secrets
- secrets = prefect.context.get("secrets", {})
- for secret in storage.secrets:
- secrets[secret] = prefect.tasks.secrets.PrefectSecret(name=secret).run()
-
- with prefect.context(secrets=secrets):
- flow = storage.get_flow(flow_data.name)
- flow.environment.run(flow)
- except Exception as exc:
- logger.exception("Unexpected error raised during flow run: {}".format(exc))
- raise exc
diff --git a/src/prefect/environments/execution/dask/__init__.py b/src/prefect/environments/execution/dask/__init__.py
deleted file mode 100644
index 0180ac36c5e4..000000000000
--- a/src/prefect/environments/execution/dask/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from prefect.environments.execution.dask.k8s import DaskKubernetesEnvironment
-from prefect.environments.execution.dask.cloud_provider import (
- DaskCloudProviderEnvironment,
-)
diff --git a/src/prefect/environments/execution/dask/cloud_provider.py b/src/prefect/environments/execution/dask/cloud_provider.py
deleted file mode 100644
index b829e7230390..000000000000
--- a/src/prefect/environments/execution/dask/cloud_provider.py
+++ /dev/null
@@ -1,252 +0,0 @@
-from typing import Any, Callable, Dict, List, Type, TYPE_CHECKING
-from urllib.parse import urlparse
-
-import prefect
-from prefect import Client
-from prefect.environments.execution import Environment
-
-if TYPE_CHECKING:
- from prefect.core.flow import Flow
- from distributed.deploy.cluster import Cluster
- from distributed.security import Security
-
-
-class DaskCloudProviderEnvironment(Environment):
- """
- DaskCloudProviderEnvironment creates Dask clusters using the Dask Cloud Provider
- project.
-
- DEPRECATED: Environment based configuration is deprecated, please transition to
- configuring `flow.run_config` instead of `flow.environment`. See
- https://docs.prefect.io/orchestration/flow_config/overview.html for more info.
-
- For each flow run, a new Dask cluster will be dynamically created and the
- flow will run using a `DaskExecutor` with the Dask scheduler address from the newly
- created Dask cluster. You can specify the number of Dask workers manually
- (for example, passing the kwarg `n_workers`) or enable adaptive mode by
- passing `adaptive_min_workers` and, optionally, `adaptive_max_workers`. This
- environment aims to provide a very easy path to Dask scalability for users of
- cloud platforms, like AWS.
-
- **NOTE:** AWS Fargate Task (not Prefect Task) startup time can be slow, depending
- on docker image size. Total startup time for a Dask scheduler and workers can
- be several minutes. This environment is a much better fit for production
- deployments of scheduled Flows where there's little sensitivity to startup
- time. `DaskCloudProviderEnvironment` is a particularly good fit for automated
- deployment of Flows in a CI/CD pipeline where the infrastructure for each Flow
- should be as independent as possible, e.g. each Flow could have its own docker
- image, dynamically create the Dask cluster to run on, etc. However, for
- development and interactive testing, creating a Dask cluster manually with Dask
- Cloud Provider and then using `LocalEnvironment` with a `DaskExecutor`
- will result in a much better development experience.
-
- (Dask Cloud Provider currently only supports AWS using either Fargate or ECS.
- Support for AzureML is coming soon.)
-
- *IMPORTANT* By default, Dask Cloud Provider may create a Dask cluster in some
- environments (e.g. Fargate) that is accessible via a public IP, without any
- authentication, and configured to NOT encrypt network traffic. Please be
- conscious of security issues if you test this environment. (Also see pull
- requests [85](https://github.com/dask/dask-cloudprovider/pull/85) and
- [91](https://github.com/dask/dask-cloudprovider/pull/91) in the Dask Cloud
- Provider project.)
-
- Args:
- - provider_class (class): Class of a provider from the Dask Cloud Provider
- projects. Current supported options are `ECSCluster` and `FargateCluster`.
- - adaptive_min_workers (int, optional): Minimum number of workers for adaptive
- mode. If this value is None, then adaptive mode will not be used and you
- should pass `n_workers` or the appropriate kwarg for the provider class you
- are using.
- - adaptive_max_workers (int, optional): Maximum number of workers for adaptive
- mode.
- - security (Type[Security], optional): a Dask Security object from
- `distributed.security.Security`. Use this to connect to a Dask cluster that is
- enabled with TLS encryption. For more on using TLS with Dask see
- https://distributed.dask.org/en/latest/tls.html
- - executor_kwargs (dict, optional): a dictionary of kwargs to be passed to
- the executor; defaults to an empty dictionary
- - labels (List[str], optional): a list of labels, which are arbitrary string
- identifiers used by Prefect Agents when polling for work
- - on_execute (Callable[[Dict[str, Any], Dict[str, Any]], None], optional): a function
- callback which will be called before the flow begins to run. The callback function
- can examine the Flow run parameters and modify kwargs to be passed to the Dask
- Cloud Provider class's constructor prior to launching the Dask cluster for the Flow
- run. This allows for dynamically sizing the cluster based on the Flow run
- parameters, e.g. settings n_workers. The callback function's signature should be:
- `on_execute(parameters: Dict[str, Any], provider_kwargs: Dict[str, Any]) -> None`
- The callback function may modify provider_kwargs
- (e.g. `provider_kwargs["n_workers"] = 3`) and any relevant changes will be used when
- creating the Dask cluster via a Dask Cloud Provider class.
- - on_start (Callable, optional): a function callback which will be called before the
- flow begins to run
- - on_exit (Callable, optional): a function callback which will be called after the flow
- finishes its run
- - metadata (dict, optional): extra metadata to be set and serialized on this environment
- - **kwargs (dict, optional): additional keyword arguments to pass to boto3 for
- `register_task_definition` and `run_task`
- """
-
- def __init__( # type: ignore
- self,
- provider_class: "Type[Cluster]",
- adaptive_min_workers: int = None,
- adaptive_max_workers: int = None,
- security: "Security" = None,
- executor_kwargs: Dict[str, Any] = None,
- labels: List[str] = None,
- on_execute: Callable[[Dict[str, Any], Dict[str, Any]], None] = None,
- on_start: Callable = None,
- on_exit: Callable = None,
- metadata: dict = None,
- **kwargs
- ) -> None:
- self._provider_class = provider_class
- self._adaptive_min_workers = adaptive_min_workers
- self._adaptive_max_workers = adaptive_max_workers
- self._on_execute = on_execute
- self._provider_kwargs = kwargs
- self.executor_kwargs = (executor_kwargs or {}).copy()
- if "skip_cleanup" not in self._provider_kwargs:
- # Prefer this default (if not provided) to avoid deregistering task definitions See
- # this issue in Dask Cloud Provider:
- # https://github.com/dask/dask-cloudprovider/issues/94
- self._provider_kwargs["skip_cleanup"] = True
- self._security = security
- if self._security:
- # We'll use the security config object both for our Dask Client connection *and*
- # for the particular Dask Cloud Provider (e.g. Fargate) to use with *its* Dask
- # Client when it connects to the scheduler after cluster creation. So we
- # put it in _provider_kwargs so it gets passed to the Dask Cloud Provider's constructor
- self._provider_kwargs["security"] = self._security
- self.executor_kwargs["client_kwargs"] = {"security": self._security}
-
- self.cluster = None
-
- super().__init__(
- labels=labels,
- on_start=on_start,
- on_exit=on_exit,
- metadata=metadata,
- )
-
- @property
- def dependencies(self) -> list:
- return ["dask_cloudprovider"]
-
- def _create_dask_cluster(self) -> None:
- self.logger.info("Creating Dask cluster using {}".format(self._provider_class))
- self.cluster = self._provider_class(**self._provider_kwargs)
- if self.cluster and self.cluster.scheduler and self.cluster.scheduler.address:
- self.logger.info(
- "Dask cluster created. Scheduler address: {} Dashboard: http://{}:8787 "
- "(unless port was changed from default of 8787)".format(
- self.cluster.scheduler.address,
- urlparse(self.cluster.scheduler.address).hostname,
- ) # TODO submit PR to Dask Cloud Provider allowing discovery of dashboard port
- )
-
- self.executor_kwargs["address"] = self.cluster.scheduler.address # type: ignore
- else:
- if self.cluster:
- self.cluster.close()
- raise Exception(
- "Unable to determine the Dask scheduler address after cluster creation. "
- "Tearting down cluster and terminating setup."
- )
- if self._adaptive_min_workers:
- self.logger.info(
- "Enabling adaptive mode with min_workers={} max_workers={}".format(
- self._adaptive_min_workers, self._adaptive_max_workers
- )
- )
- self.cluster.adapt( # type: ignore
- minimum=self._adaptive_min_workers, maximum=self._adaptive_max_workers
- )
-
- def execute( # type: ignore
- self, flow: "Flow", **kwargs: Any # type: ignore
- ) -> None:
- """
- Execute a flow run on a dask-cloudprovider cluster.
-
- Args:
- - flow (Flow): the Flow object
- - **kwargs (Any): Unused
- """
- flow_run_info = None
- flow_run_id = prefect.context.get("flow_run_id")
- if self._on_execute:
- # If an on_execute Callable has been provided, retrieve the flow run parameters
- # and then allow the Callable a chance to update _provider_kwargs. This allows
- # better sizing of the cluster resources based on parameters for this Flow run.
- try:
- client = Client()
- flow_run_info = client.get_flow_run_info(flow_run_id)
- parameters = flow_run_info.parameters or {} # type: ignore
- self._on_execute(parameters, self._provider_kwargs)
- except Exception as exc:
- self.logger.info(
- "Failed to retrieve flow run info with error: {}".format(repr(exc))
- )
- if "image" not in self._provider_kwargs or not self._provider_kwargs.get(
- "image"
- ):
- # If image is not specified, use the Flow's image so that dependencies are
- # identical on all containers: Flow runner, Dask scheduler, and Dask workers
- flow_id = prefect.context.get("flow_id")
- try:
- client = Client()
- if not flow_id: # We've observed cases where flow_id is None
- if not flow_run_info:
- flow_run_info = client.get_flow_run_info(flow_run_id)
- flow_id = flow_run_info.flow_id
- flow_info = client.graphql(
- """query {
- flow(where: {id: {_eq: "%s"}}) {
- storage
- }
- }"""
- % flow_id
- )
- storage_info = flow_info["data"]["flow"][0]["storage"]
- image = "{}/{}:{}".format(
- storage_info["registry_url"],
- storage_info["image_name"],
- storage_info["image_tag"],
- )
- self.logger.info(
- "Using Flow's Docker image for Dask scheduler & workers: {}".format(
- image
- )
- )
- self._provider_kwargs["image"] = image
- except Exception as exc:
- self.logger.info(
- "Failed to retrieve flow info with error: {}".format(repr(exc))
- )
-
- self._create_dask_cluster()
-
- self.logger.info(
- "Executing on dynamically created Dask Cluster with scheduler address: {}".format(
- self.executor_kwargs["address"]
- )
- )
- if self.on_start:
- self.on_start()
-
- try:
- from prefect.engine import get_default_flow_runner_class
- from prefect.executors import DaskExecutor
-
- runner_cls = get_default_flow_runner_class()
- runner_cls(flow=flow).run(executor=DaskExecutor(**self.executor_kwargs))
- except Exception as exc:
- self.logger.exception(
- "Unexpected error raised during flow run: {}".format(exc)
- )
- raise
- finally:
- if self.on_exit:
- self.on_exit()
diff --git a/src/prefect/environments/execution/dask/job.yaml b/src/prefect/environments/execution/dask/job.yaml
deleted file mode 100644
index 401d4ddae3cf..000000000000
--- a/src/prefect/environments/execution/dask/job.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-apiVersion: batch/v1
-kind: Job
-metadata:
- name: prefect-dask-job
- labels:
- app: prefect-dask-job
-spec:
- template:
- metadata:
- labels:
- app: prefect-dask-job
- spec:
- containers:
- - name: flow
- image: prefecthq/prefect:latest
- imagePullPolicy: IfNotPresent
- command: ["/bin/sh", "-c"]
- args:
- [
- 'python -c "import prefect; prefect.environments.execution.load_and_run_flow()"',
- ]
- env:
- - name: PREFECT__CLOUD__GRAPHQL
- value: PREFECT__CLOUD__GRAPHQL
- - name: PREFECT__CLOUD__AUTH_TOKEN
- value: PREFECT__CLOUD__AUTH_TOKEN
- - name: PREFECT__CONTEXT__FLOW_RUN_ID
- value: PREFECT__CONTEXT__FLOW_RUN_ID
- - name: PREFECT__CONTEXT__NAMESPACE
- value: PREFECT__CONTEXT__NAMESPACE
- - name: PREFECT__CONTEXT__IMAGE
- value: PREFECT__CONTEXT__IMAGE
- - name: PREFECT__CLOUD__USE_LOCAL_SECRETS
- value: "false"
- - name: PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS
- value: "prefect.engine.cloud.CloudFlowRunner"
- - name: PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS
- value: "prefect.engine.cloud.CloudTaskRunner"
- - name: PREFECT__ENGINE__EXECUTOR__DEFAULT_CLASS
- value: "prefect.executors.DaskExecutor"
- - name: PREFECT__CLOUD__SEND_FLOW_RUN_LOGS
- value: "true"
- # Backwards compatibility variable for `SEND_FLOW_RUN_LOGS`
- - name: PREFECT__LOGGING__LOG_TO_CLOUD
- value: "true"
- - name: PREFECT__LOGGING__LEVEL
- value: "INFO"
- - name: PREFECT__DEBUG
- value: "true"
- - name: DASK_DISTRIBUTED__SCHEDULER__WORK_STEALING
- value: "True"
- - name: PREFECT__LOGGING__EXTRA_LOGGERS
- value: PREFECT__LOGGING__EXTRA_LOGGERS
- resources:
- requests:
- cpu: "100m"
- limits:
- cpu: "100m"
- restartPolicy: Never
diff --git a/src/prefect/environments/execution/dask/k8s.py b/src/prefect/environments/execution/dask/k8s.py
deleted file mode 100644
index ad5c18cf94ed..000000000000
--- a/src/prefect/environments/execution/dask/k8s.py
+++ /dev/null
@@ -1,637 +0,0 @@
-import base64
-import json
-import uuid
-from os import path
-from typing import Callable, List, TYPE_CHECKING
-import warnings
-
-import yaml
-
-import prefect
-from prefect.client import Secret
-from prefect.environments.execution import Environment
-from prefect.utilities.storage import get_flow_image
-
-if TYPE_CHECKING:
- from prefect.core.flow import Flow # pylint: disable=W0611
-
-
-class DaskKubernetesEnvironment(Environment):
- """
- DaskKubernetesEnvironment is an environment which deploys your flow on Kubernetes by
- spinning up a temporary Dask Cluster (using
- [dask-kubernetes](https://kubernetes.dask.org/en/latest/)) and running the Prefect
- `DaskExecutor` on this cluster.
-
- DEPRECATED: Environment based configuration is deprecated, please transition to
- configuring `flow.run_config` instead of `flow.environment`. See
- https://docs.prefect.io/orchestration/flow_config/overview.html for more info.
-
- When running your flows that are registered with a private container registry, you should
- either specify the name of an `image_pull_secret` on the flow's `DaskKubernetesEnvironment`
- or directly set the `imagePullSecrets` on your custom worker/scheduler specs.
-
- It is possible to provide a custom scheduler and worker spec YAML files through the
- `scheduler_spec_file` and `worker_spec_file` arguments. These specs (if provided) will be
- used in place of the defaults. Your spec files should be modeled after the job.yaml and
- worker_pod.yaml found
- [here](https://github.com/PrefectHQ/prefect/tree/master/src/prefect/environments/execution/dask).
- The main aspects to be aware of are the `command` and `args` on the container. The
- following environment variables, required for cloud, do not need to be included––they are
- automatically added and populated during execution:
-
- - `PREFECT__CLOUD__GRAPHQL`
- - `PREFECT__CLOUD__AUTH_TOKEN`
- - `PREFECT__CONTEXT__FLOW_RUN_ID`
- - `PREFECT__CONTEXT__NAMESPACE`
- - `PREFECT__CONTEXT__IMAGE`
- - `PREFECT__CLOUD__USE_LOCAL_SECRETS`
- - `PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS`
- - `PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS`
- - `PREFECT__ENGINE__EXECUTOR__DEFAULT_CLASS`
- - `PREFECT__LOGGING__LEVEL`
- - `PREFECT__CLOUD__SEND_FLOW_RUN_LOGS`
- - `PREFECT__LOGGING__EXTRA_LOGGERS`
-
- Note: the logging attributes are only populated if they are not already provided.
-
- Args:
- - min_workers (int, optional): the minimum allowed number of Dask worker pods; defaults to 1
- - max_workers (int, optional): the maximum allowed number of Dask worker pods; defaults to 1
- - work_stealing (bool, optional): toggle Dask Distributed scheduler work stealing;
- defaults to False Only used when a custom scheduler spec is not provided. Enabling
- this may cause ClientErrors to appear when multiple Dask workers try to run the
- same Prefect Task.
- `Warning`: `work_stealing` if provided won't be appended to your custom
- `scheduler_spec_file`. If wanted, don't forget to add it in your container env
- (`DASK_DISTRIBUTED__SCHEDULER__WORK_STEALING`).
- - scheduler_logs (bool, optional): log all Dask scheduler logs, defaults to False
- - private_registry (bool, optional, DEPRECATED): a boolean specifying whether your
- Flow's Docker container will be in a private Docker registry; if so, requires a
- Prefect Secret containing your docker credentials to be set. Defaults to `False`.
- - docker_secret (str, optional, DEPRECATED): the name of the Prefect Secret containing
- your Docker credentials; defaults to `"DOCKER_REGISTRY_CREDENTIALS"`. This Secret
- should be a dictionary containing the following keys: `"docker-server"`,
- `"docker-username"`, `"docker-password"`, and `"docker-email"`.
- - labels (List[str], optional): a list of labels, which are arbitrary string
- identifiers used by Prefect Agents when polling for work
- - on_start (Callable, optional): a function callback which will be called before the
- flow begins to run
- - on_exit (Callable, optional): a function callback which will be called after the flow
- finishes its run
- - metadata (dict, optional): extra metadata to be set and serialized on this environment
- - scheduler_spec_file (str, optional): Path to a scheduler spec YAML file
- - worker_spec_file (str, optional): Path to a worker spec YAML file
- - image_pull_secret (str, optional): optional name of an `imagePullSecret` to use for
- the scheduler and worker pods. To specify multiple image pull secrets, provide a comma
- delimited string with no spaces, like `"some-secret,other-secret"`.
- For more information go
- [here](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/).
- `Warning`: `image_pull_secret` if provided won't be appended to your custom
- `worker_spec_file` or `scheduler_spec_file`. If you want it, don't forget to add it in
- your spec files.
- - log_k8s_errors (bool, optional): optional toggle to also log Kubernetes errors that may occur
- using the Prefect logger. Defaults to `False`.
- """
-
- def __init__(
- self,
- min_workers: int = 1,
- max_workers: int = 2,
- work_stealing: bool = True,
- scheduler_logs: bool = False,
- private_registry: bool = False,
- docker_secret: str = None,
- labels: List[str] = None,
- on_start: Callable = None,
- on_exit: Callable = None,
- metadata: dict = None,
- scheduler_spec_file: str = None,
- worker_spec_file: str = None,
- image_pull_secret: str = None,
- log_k8s_errors: bool = False,
- ) -> None:
- self.min_workers = min_workers
- self.max_workers = max_workers
- self.work_stealing = work_stealing
- self.scheduler_logs = scheduler_logs
- self.private_registry = private_registry
- if self.private_registry:
- self.docker_secret = docker_secret or "DOCKER_REGISTRY_CREDENTIALS"
-
- warnings.warn(
- "The `private_registry` and `docker_secret` options are deprecated. "
- "Please set `imagePullSecrets` on custom work and scheduler YAML manifests.",
- stacklevel=2,
- )
- else:
- self.docker_secret = None # type: ignore
- self.scheduler_spec_file = scheduler_spec_file
- self.worker_spec_file = worker_spec_file
- self.image_pull_secret = image_pull_secret
-
- # Load specs from file if path given, store on object
- self._scheduler_spec, self._worker_spec = self._load_specs_from_file()
-
- self._identifier_label = ""
- self.log_k8s_errors = log_k8s_errors
-
- super().__init__(
- labels=labels, on_start=on_start, on_exit=on_exit, metadata=metadata
- )
-
- @property
- def dependencies(self) -> list:
- return ["kubernetes"]
-
- @property
- def identifier_label(self) -> str:
- if not hasattr(self, "_identifier_label") or not self._identifier_label:
- self._identifier_label = str(uuid.uuid4())
- return self._identifier_label
-
- def __getstate__(self) -> dict:
- state = self.__dict__.copy()
- # Ensure _identifier_label is not persisted
- if "_identifier_label" in state:
- del state["_identifier_label"]
- return state
-
- def __setstate__(self, state: dict) -> None:
- self.__dict__.update(state)
-
- def setup(self, flow: "Flow") -> None: # type: ignore
- if self.private_registry:
- from kubernetes import client, config
-
- # Verify environment is running in cluster
- try:
- config.load_incluster_config()
- except config.config_exception.ConfigException as config_exception:
- self.logger.error("Environment not currently running inside a cluster")
- raise EnvironmentError(
- "Environment not currently inside a cluster"
- ) from config_exception
-
- v1 = client.CoreV1Api()
- namespace = prefect.context.get("namespace", "default")
- secret_name = namespace + "-docker"
- secrets = v1.list_namespaced_secret(namespace=namespace, watch=False)
- if not [
- secret
- for secret in secrets.items
- if secret.metadata.name == secret_name
- ]:
- self.logger.debug(
- "Docker registry secret {} does not exist for this tenant.".format(
- secret_name
- )
- )
- self._create_namespaced_secret()
- else:
- self.logger.debug(
- "Docker registry secret {} found.".format(secret_name)
- )
-
- def execute(self, flow: "Flow") -> None: # type: ignore
- """
- Create a single Kubernetes job that spins up a dask scheduler, dynamically
- creates worker pods, and runs the flow.
-
- Args:
- - flow (Flow): the Flow object
-
- Raises:
- - Exception: if the environment is unable to create the Kubernetes job
- """
- docker_name = get_flow_image(flow)
-
- from kubernetes import client, config
-
- # Verify environment is running in cluster
- try:
- config.load_incluster_config()
- except config.config_exception.ConfigException as config_exception:
- self.logger.error("Environment not currently running inside a cluster")
- raise EnvironmentError(
- "Environment not currently inside a cluster"
- ) from config_exception
-
- batch_client = client.BatchV1Api()
-
- if self._scheduler_spec:
- job = self._scheduler_spec
- job = self._populate_scheduler_spec_yaml(
- yaml_obj=job, docker_name=docker_name
- )
- else:
- with open(path.join(path.dirname(__file__), "job.yaml")) as job_file:
- job = yaml.safe_load(job_file)
- job = self._populate_job_yaml(yaml_obj=job, docker_name=docker_name)
-
- # Create Job
- try:
- batch_client.create_namespaced_job(
- namespace=prefect.context.get("namespace"), body=job
- )
- except Exception as exc:
- self.logger.critical("Failed to create Kubernetes job: {}".format(exc))
- raise exc
-
- def _create_namespaced_secret(self) -> None:
- self.logger.debug(
- 'Creating Docker registry kubernetes secret from "{}" Prefect Secret.'.format(
- self.docker_secret
- )
- )
- try:
- from kubernetes import client
-
- docker_creds = Secret(self.docker_secret).get()
- assert isinstance(docker_creds, dict)
-
- v1 = client.CoreV1Api()
- cred_payload = {
- "auths": {
- docker_creds["docker-server"]: {
- "Username": docker_creds["docker-username"],
- "Password": docker_creds["docker-password"],
- "Email": docker_creds["docker-email"],
- }
- }
- }
- data = {
- ".dockerconfigjson": base64.b64encode(
- json.dumps(cred_payload).encode()
- ).decode()
- }
- namespace = prefect.context.get("namespace", "unknown")
- name = namespace + "-docker"
- secret = client.V1Secret(
- api_version="v1",
- data=data,
- kind="Secret",
- metadata=dict(name=name, namespace=namespace),
- type="kubernetes.io/dockerconfigjson",
- )
- v1.create_namespaced_secret(namespace, body=secret)
- self.logger.debug("Created Docker registry secret {}.".format(name))
- except Exception as exc:
- self.logger.error(
- "Failed to create Kubernetes secret for private Docker registry: {}".format(
- exc
- )
- )
- raise exc
-
- def run(self, flow: "Flow") -> None:
- """
- Run the flow using a temporary dask-kubernetes cluster.
-
- Args:
- - flow (Flow): the flow to run.
- """
- # Call on_start callback if specified
- if self.on_start:
- self.on_start()
-
- try:
- from prefect.engine import get_default_flow_runner_class
- from prefect.executors import DaskExecutor
- from dask_kubernetes import KubeCluster
-
- if self._worker_spec:
- worker_pod = self._worker_spec
- worker_pod = self._populate_worker_spec_yaml(yaml_obj=worker_pod)
- else:
- with open(
- path.join(path.dirname(__file__), "worker_pod.yaml")
- ) as pod_file:
- worker_pod = yaml.safe_load(pod_file)
- worker_pod = self._populate_worker_pod_yaml(yaml_obj=worker_pod)
-
- cluster = KubeCluster.from_dict(
- worker_pod, namespace=prefect.context.get("namespace")
- )
- cluster.adapt(minimum=self.min_workers, maximum=self.max_workers)
-
- executor = DaskExecutor(address=cluster.scheduler_address)
- runner_cls = get_default_flow_runner_class()
- runner_cls(flow=flow).run(executor=executor)
- except Exception as exc:
- self.logger.exception(
- "Unexpected error raised during flow run: {}".format(exc)
- )
- raise exc
- finally:
- # Call on_exit callback if specified
- if self.on_exit:
- self.on_exit()
-
- def _extra_loggers(self) -> str:
- """
- Set dask-kubernetes related loggers for debugging and providing more
- visibility into the workings of the Dask cluster. These loggers are useful
- for information about cluster autoscaling and possible kubernetes issues
- that may otherwise be hidden.
-
- Specifying `scheduler_logs=True` on this environment will also elevate the Dask
- scheduler logs. This will lead to a large increase in the amount of logs created
- and should only be used for debugging purposes.
-
- Returns:
- - str: a string representation of a list of extra loggers to use
- """
- cluster_loggers = [
- "dask_kubernetes.core",
- "distributed.deploy.adaptive",
- ]
-
- if self.log_k8s_errors:
- cluster_loggers.append("kubernetes")
-
- config_extra_loggers = prefect.config.logging.extra_loggers
-
- extra_loggers = [*config_extra_loggers, *cluster_loggers]
-
- if self.scheduler_logs:
- extra_loggers.append("distributed.scheduler")
- return str(extra_loggers)
-
- ################################
- # Default YAML Spec Manipulation
- ################################
-
- def _set_prefect_labels(self, obj: dict) -> None:
- flow_run_id = prefect.context.get("flow_run_id", "unknown")
- labels = {
- "prefect.io/identifier": self.identifier_label,
- "prefect.io/flow_run_id": flow_run_id,
- }
- obj.setdefault("metadata", {}).setdefault("labels", {}).update(labels)
-
- def _populate_job_yaml(self, yaml_obj: dict, docker_name: str) -> dict:
- """
- Populate the execution job yaml object used in this environment with the proper values
-
- Args:
- - yaml_obj (dict): A dictionary representing the parsed yaml
- - docker_name (str): the full path to the docker image
-
- Returns:
- - dict: a dictionary with the yaml values replaced
- """
- flow_run_id = prefect.context.get("flow_run_id", "unknown")
- namespace = prefect.context.get("namespace", "unknown")
-
- # set identifier labels
- yaml_obj["metadata"]["name"] = "prefect-dask-job-{}".format(
- self.identifier_label
- )
- self._set_prefect_labels(yaml_obj)
- self._set_prefect_labels(yaml_obj["spec"]["template"])
-
- # set environment variables
- env = yaml_obj["spec"]["template"]["spec"]["containers"][0]["env"]
- pod_spec = yaml_obj["spec"]["template"]["spec"]
- if self.private_registry:
- pod_spec["imagePullSecrets"] = []
- pod_spec["imagePullSecrets"].append({"name": namespace + "-docker"})
- elif self.image_pull_secret:
- pod_spec["imagePullSecrets"] = []
- secrets = self.image_pull_secret.split(",")
- for secret_name in secrets:
- pod_spec["imagePullSecrets"].append({"name": secret_name})
-
- env[0]["value"] = prefect.config.cloud.graphql
- env[1]["value"] = prefect.config.cloud.auth_token
- env[2]["value"] = flow_run_id
- env[3]["value"] = prefect.context.get("namespace", "default")
- env[4]["value"] = docker_name
- env[12]["value"] = str(self.work_stealing)
- env[13]["value"] = self._extra_loggers()
-
- # set image
- yaml_obj["spec"]["template"]["spec"]["containers"][0]["image"] = docker_name
-
- return yaml_obj
-
- def _populate_worker_pod_yaml(self, yaml_obj: dict) -> dict:
- """
- Populate the worker pod yaml object used in this environment with the proper values.
-
- Args:
- - yaml_obj (dict): A dictionary representing the parsed yaml
-
- Returns:
- - dict: a dictionary with the yaml values replaced
- """
- # set identifier labels
- self._set_prefect_labels(yaml_obj)
-
- # set environment variables
- env = yaml_obj["spec"]["containers"][0]["env"]
-
- env[0]["value"] = prefect.config.cloud.graphql
- env[1]["value"] = prefect.config.cloud.auth_token
- env[2]["value"] = prefect.context.get("flow_run_id", "")
- env[10]["value"] = self._extra_loggers()
-
- pod_spec = yaml_obj["spec"]
- if self.private_registry:
- namespace = prefect.context.get("namespace", "default")
- pod_spec["imagePullSecrets"] = []
- pod_spec["imagePullSecrets"].append({"name": namespace + "-docker"})
- elif self.image_pull_secret:
- pod_spec["imagePullSecrets"] = []
- secrets = self.image_pull_secret.split(",")
- for secret_name in secrets:
- pod_spec["imagePullSecrets"].append({"name": secret_name})
-
- # set image
- yaml_obj["spec"]["containers"][0]["image"] = prefect.context.get(
- "image", "daskdev/dask:latest"
- )
-
- return yaml_obj
-
- ###############################
- # Custom YAML Spec Manipulation
- ###############################
-
- def _populate_scheduler_spec_yaml(self, yaml_obj: dict, docker_name: str) -> dict:
- """
- Populate the custom execution job yaml object used in this environment with the proper
- values.
-
- Args:
- - yaml_obj (dict): A dictionary representing the parsed yaml
- - docker_name (str): the full path to the docker image
-
- Returns:
- - dict: a dictionary with the yaml values replaced
- """
- flow_run_id = prefect.context.get("flow_run_id", "unknown")
-
- yaml_obj["metadata"]["name"] = "prefect-dask-job-{}".format(
- self.identifier_label
- )
- self._set_prefect_labels(yaml_obj)
- self._set_prefect_labels(yaml_obj["spec"]["template"])
-
- # Required Cloud environment variables
- env_values = [
- {"name": "PREFECT__CLOUD__GRAPHQL", "value": prefect.config.cloud.graphql},
- {
- "name": "PREFECT__CLOUD__AUTH_TOKEN",
- "value": prefect.config.cloud.auth_token,
- },
- {"name": "PREFECT__CONTEXT__FLOW_RUN_ID", "value": flow_run_id},
- {
- "name": "PREFECT__CONTEXT__NAMESPACE",
- "value": prefect.context.get("namespace", "default"),
- },
- {"name": "PREFECT__CONTEXT__IMAGE", "value": docker_name},
- {"name": "PREFECT__CLOUD__USE_LOCAL_SECRETS", "value": "false"},
- {
- "name": "PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudFlowRunner",
- },
- {
- "name": "PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudTaskRunner",
- },
- {
- "name": "PREFECT__ENGINE__EXECUTOR__DEFAULT_CLASS",
- "value": "prefect.executors.DaskExecutor",
- },
- ]
-
- # Logging env vars
- log_vars = [
- {
- "name": "PREFECT__CLOUD__SEND_FLOW_RUN_LOGS",
- "value": str(prefect.config.cloud.send_flow_run_logs).lower(),
- },
- {
- "name": "PREFECT__LOGGING__LEVEL",
- "value": str(prefect.config.logging.level),
- },
- {
- "name": "PREFECT__LOGGING__EXTRA_LOGGERS",
- "value": self._extra_loggers(),
- },
- ]
-
- # set environment variables
- env = yaml_obj["spec"]["template"]["spec"]["containers"][0].get("env")
- if not env:
- yaml_obj["spec"]["template"]["spec"]["containers"][0]["env"] = []
- env = yaml_obj["spec"]["template"]["spec"]["containers"][0]["env"]
-
- env.extend(env_values)
-
- # Append logging env vars if not already present
- for var in log_vars:
- if not any(d.get("name") == var.get("name") for d in env):
- env.append(var)
-
- # set image
- yaml_obj["spec"]["template"]["spec"]["containers"][0]["image"] = docker_name
-
- return yaml_obj
-
- def _populate_worker_spec_yaml(self, yaml_obj: dict) -> dict:
- """
- Populate the custom worker pod yaml object used in this environment with the proper values.
-
- Args:
- - yaml_obj (dict): A dictionary representing the parsed yaml
-
- Returns:
- - dict: a dictionary with the yaml values replaced
- """
- # set identifier labels
- self._set_prefect_labels(yaml_obj)
-
- # Required Cloud environment variables
- env_values = [
- {"name": "PREFECT__CLOUD__GRAPHQL", "value": prefect.config.cloud.graphql},
- {
- "name": "PREFECT__CLOUD__AUTH_TOKEN",
- "value": prefect.config.cloud.auth_token,
- },
- {
- "name": "PREFECT__CONTEXT__FLOW_RUN_ID",
- "value": prefect.context.get("flow_run_id", ""),
- },
- {"name": "PREFECT__CLOUD__USE_LOCAL_SECRETS", "value": "false"},
- {
- "name": "PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudFlowRunner",
- },
- {
- "name": "PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudTaskRunner",
- },
- {
- "name": "PREFECT__ENGINE__EXECUTOR__DEFAULT_CLASS",
- "value": "prefect.executors.DaskExecutor",
- },
- ]
-
- # Logging env vars
- log_vars = [
- {
- "name": "PREFECT__CLOUD__SEND_FLOW_RUN_LOGS",
- "value": str(prefect.config.cloud.send_flow_run_logs).lower(),
- },
- {
- "name": "PREFECT__LOGGING__LEVEL",
- "value": str(prefect.config.logging.level),
- },
- {
- "name": "PREFECT__LOGGING__EXTRA_LOGGERS",
- "value": self._extra_loggers(),
- },
- ]
-
- # set environment variables
- env = yaml_obj["spec"]["containers"][0].get("env")
- if not env:
- yaml_obj["spec"]["containers"][0]["env"] = []
- env = yaml_obj["spec"]["containers"][0]["env"]
-
- env.extend(env_values)
-
- # Append logging env vars if not already present
- for var in log_vars:
- if not any(d.get("name") == var.get("name") for d in env):
- env.append(var)
-
- # set image
- yaml_obj["spec"]["containers"][0]["image"] = prefect.context.get(
- "image", "daskdev/dask:latest"
- )
-
- return yaml_obj
-
- def _load_specs_from_file(self) -> tuple:
- """
- Load scheduler and worker spec from provided file paths
-
- Returns:
- - tuple: scheduler spec dictionary, worker spec dictionary
- """
- scheduler = None
- worker = None
-
- if self.scheduler_spec_file:
- with open(self.scheduler_spec_file) as scheduler_spec_file:
- scheduler = yaml.safe_load(scheduler_spec_file)
-
- if self.worker_spec_file:
- with open(self.worker_spec_file) as worker_spec_file:
- worker = yaml.safe_load(worker_spec_file)
-
- return scheduler, worker
diff --git a/src/prefect/environments/execution/dask/worker_pod.yaml b/src/prefect/environments/execution/dask/worker_pod.yaml
deleted file mode 100644
index 405cc21d2cc2..000000000000
--- a/src/prefect/environments/execution/dask/worker_pod.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-kind: Pod
-metadata:
- labels:
- app: prefect-dask-worker
-spec:
- restartPolicy: Never
- containers:
- - image: prefecthq/prefect:latest
- imagePullPolicy: IfNotPresent
- args: [dask-worker, --no-bokeh, --death-timeout, '60']
- name: dask-worker
- env:
- - name: PREFECT__CLOUD__GRAPHQL
- value: PREFECT__CLOUD__GRAPHQL
- - name: PREFECT__CLOUD__AUTH_TOKEN
- value: PREFECT__CLOUD__AUTH_TOKEN
- - name: PREFECT__CONTEXT__FLOW_RUN_ID
- value: PREFECT__CONTEXT__FLOW_RUN_ID
- - name: PREFECT__CLOUD__USE_LOCAL_SECRETS
- value: "false"
- - name: PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS
- value: "prefect.engine.cloud.CloudFlowRunner"
- - name: PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS
- value: "prefect.engine.cloud.CloudTaskRunner"
- - name: PREFECT__ENGINE__EXECUTOR__DEFAULT_CLASS
- value: "prefect.executors.DaskExecutor"
- - name: PREFECT__CLOUD__SEND_FLOW_RUN_LOGS
- value: "true"
- # Backwards compatibility variable for `SEND_FLOW_RUN_LOGS`
- - name: PREFECT__LOGGING__LOG_TO_CLOUD
- value: "true"
- - name: PREFECT__LOGGING__LEVEL
- value: "DEBUG"
- - name: PREFECT__DEBUG
- value: "true"
- - name: PREFECT__LOGGING__EXTRA_LOGGERS
- value: PREFECT__LOGGING__EXTRA_LOGGERS
- resources:
- requests:
- cpu: "500m"
- limits:
- cpu: "500m"
diff --git a/src/prefect/environments/execution/fargate/__init__.py b/src/prefect/environments/execution/fargate/__init__.py
deleted file mode 100644
index 86d600b6b602..000000000000
--- a/src/prefect/environments/execution/fargate/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from prefect.environments.execution.fargate.fargate_task import FargateTaskEnvironment
diff --git a/src/prefect/environments/execution/fargate/fargate_task.py b/src/prefect/environments/execution/fargate/fargate_task.py
deleted file mode 100644
index ee4e07406a2c..000000000000
--- a/src/prefect/environments/execution/fargate/fargate_task.py
+++ /dev/null
@@ -1,446 +0,0 @@
-import operator
-import os
-from typing import TYPE_CHECKING, Callable, List
-
-import prefect
-from prefect import config
-from prefect.environments.execution.base import Environment, _RunMixin
-from prefect.utilities.storage import get_flow_image
-
-if TYPE_CHECKING:
- from prefect.core.flow import Flow # pylint: disable=W0611
-
-_DEFINITION_KWARG_LIST = [
- "family",
- "taskRoleArn",
- "executionRoleArn",
- "networkMode",
- "containerDefinitions",
- "volumes",
- "placementConstraints",
- "requiresCompatibilities",
- "cpu",
- "memory",
- "tags",
- "pidMode",
- "ipcMode",
- "proxyConfiguration",
- "inferenceAccelerators",
-]
-
-
-class FargateTaskEnvironment(Environment, _RunMixin):
- """
- FargateTaskEnvironment is an environment which deploys your flow as a Fargate task.
-
- DEPRECATED: Environment based configuration is deprecated, please transition to
- configuring `flow.run_config` instead of `flow.environment`. See
- https://docs.prefect.io/orchestration/flow_config/overview.html for more info.
-
- This environment requires AWS credentials and extra boto3 kwargs which are
- used in the creation and running of the Fargate task. When providing a
- custom container definition spec the first container in the spec must be
- the container that the flow runner will be executed on.
-
- The following environment variables, required for cloud, do not need to be
- included––they are automatically added and populated during execution:
-
- - `PREFECT__CLOUD__GRAPHQL`
- - `PREFECT__CLOUD__AUTH_TOKEN`
- - `PREFECT__CONTEXT__FLOW_RUN_ID`
- - `PREFECT__CONTEXT__IMAGE`
- - `PREFECT__CLOUD__USE_LOCAL_SECRETS`
- - `PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS`
- - `PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS`
- - `PREFECT__CLOUD__SEND_FLOW_RUN_LOGS`
- - `PREFECT__LOGGING__EXTRA_LOGGERS`
-
- Additionally, the following command will be applied to the first container:
-
- `$ /bin/sh -c "python -c 'import prefect; prefect.environments.execution.load_and_run_flow()'"`
-
- All `kwargs` are accepted that one would normally pass to boto3 for `register_task_definition`
- and `run_task`. For information on the kwargs supported visit the following links:
-
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.register_task_definition
-
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.run_task
-
- Note: You must provide `family` and `taskDefinition` with the same string so they match on
- run of the task.
-
- The secrets and kwargs that are provided at initialization time of this environment
- are not serialized and will only ever exist on this object.
-
- Args:
- - launch_type (str, optional): either FARGATE or EC2, defaults to FARGATE
- - aws_access_key_id (str, optional): AWS access key id for connecting the boto3
- client. Defaults to the value set in the environment variable
- `AWS_ACCESS_KEY_ID` or `None`
- - aws_access_key_id (str, optional): AWS access key id for connecting the boto3
- client. Defaults to the value set in the environment variable
- `AWS_ACCESS_KEY_ID` or `None`
- - aws_secret_access_key (str, optional): AWS secret access key for connecting
- the boto3 client. Defaults to the value set in the environment variable
- `AWS_SECRET_ACCESS_KEY` or `None`
- - aws_session_token (str, optional): AWS session key for connecting the boto3
- client. Defaults to the value set in the environment variable
- `AWS_SESSION_TOKEN` or `None`
- - region_name (str, optional): AWS region name for connecting the boto3 client.
- Defaults to the value set in the environment variable `REGION_NAME` or `None`
- - executor (Executor, optional): the executor to run the flow with. If not provided, the
- default executor will be used.
- - labels (List[str], optional): a list of labels, which are arbitrary string
- identifiers used by Prefect Agents when polling for work
- - on_start (Callable, optional): a function callback which will be called before the
- flow begins to run
- - on_exit (Callable, optional): a function callback which will be called after the flow
- finishes its run
- - metadata (dict, optional): extra metadata to be set and serialized on this environment
- - **kwargs (dict, optional): additional keyword arguments to pass to boto3 for
- `register_task_definition` and `run_task`
- """
-
- def __init__( # type: ignore
- self,
- launch_type: str = "FARGATE",
- aws_access_key_id: str = None,
- aws_secret_access_key: str = None,
- aws_session_token: str = None,
- region_name: str = None,
- executor: "prefect.executors.Executor" = None,
- labels: List[str] = None,
- on_start: Callable = None,
- on_exit: Callable = None,
- metadata: dict = None,
- **kwargs,
- ) -> None:
- self.launch_type = launch_type
- # Not serialized, only stored on the object
- self.aws_access_key_id = aws_access_key_id or os.getenv("AWS_ACCESS_KEY_ID")
- self.aws_secret_access_key = aws_secret_access_key or os.getenv(
- "AWS_SECRET_ACCESS_KEY"
- )
- self.aws_session_token = aws_session_token or os.getenv("AWS_SESSION_TOKEN")
- self.region_name = region_name or os.getenv("REGION_NAME")
-
- # Parse accepted kwargs for definition and run
- self.task_definition_kwargs, self.task_run_kwargs = self._parse_kwargs(kwargs)
-
- if executor is None:
- executor = prefect.engine.get_default_executor_class()()
- elif not isinstance(executor, prefect.executors.Executor):
- raise TypeError(
- f"`executor` must be an `Executor` or `None`, got `{executor}`"
- )
- self.executor = executor
-
- super().__init__(
- labels=labels, on_start=on_start, on_exit=on_exit, metadata=metadata
- )
-
- def _parse_kwargs(self, user_kwargs: dict) -> tuple:
- """
- Parse the kwargs passed in and separate them out for `register_task_definition`
- and `run_task`. This is required because boto3 does not allow extra kwargs
- and if they are provided it will raise botocore.exceptions.ParamValidationError.
-
- Args:
- - user_kwargs (dict): The kwargs passed to the initialization of the environment
-
- Returns:
- tuple: a tuple of two dictionaries (task_definition_kwargs, task_run_kwargs)
- """
-
- run_kwarg_list = [
- "cluster",
- "taskDefinition",
- "count",
- "startedBy",
- "group",
- "placementConstraints",
- "placementStrategy",
- "platformVersion",
- "networkConfiguration",
- "tags",
- "enableECSManagedTags",
- "propagateTags",
- ]
-
- task_definition_kwargs = {}
- for key, item in user_kwargs.items():
- if key in _DEFINITION_KWARG_LIST:
- task_definition_kwargs.update({key: item})
-
- task_run_kwargs = {}
- for key, item in user_kwargs.items():
- if key in run_kwarg_list:
- task_run_kwargs.update({key: item})
-
- return task_definition_kwargs, task_run_kwargs
-
- @property
- def dependencies(self) -> list:
- return ["boto3", "botocore"]
-
- def _render_task_definition_kwargs(self, flow: "Flow") -> dict:
- task_definition_kwargs = self.task_definition_kwargs.copy()
-
- env_values = [
- {"name": "PREFECT__CLOUD__GRAPHQL", "value": config.cloud.graphql},
- {"name": "PREFECT__CLOUD__USE_LOCAL_SECRETS", "value": "false"},
- {
- "name": "PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudFlowRunner",
- },
- {
- "name": "PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudTaskRunner",
- },
- {"name": "PREFECT__CLOUD__SEND_FLOW_RUN_LOGS", "value": "true"},
- {
- "name": "PREFECT__LOGGING__EXTRA_LOGGERS",
- "value": str(config.logging.extra_loggers),
- },
- ]
-
- # create containerDefinitions if they do not exist
- if not task_definition_kwargs.get("containerDefinitions"):
- task_definition_kwargs["containerDefinitions"] = []
- task_definition_kwargs["containerDefinitions"].append({})
-
- # set environment variables for all containers
- for definition in task_definition_kwargs["containerDefinitions"]:
- if not definition.get("environment"):
- definition["environment"] = []
- definition["environment"].extend(env_values)
-
- # set name on first container
- if not task_definition_kwargs["containerDefinitions"][0].get("name"):
- task_definition_kwargs["containerDefinitions"][0]["name"] = ""
-
- task_definition_kwargs.get("containerDefinitions")[0]["name"] = "flow-container"
-
- # set image on first container
- if not task_definition_kwargs["containerDefinitions"][0].get("image"):
- task_definition_kwargs["containerDefinitions"][0]["image"] = ""
-
- task_definition_kwargs.get("containerDefinitions")[0]["image"] = get_flow_image(
- flow
- )
-
- # set command on first container
- if not task_definition_kwargs["containerDefinitions"][0].get("command"):
- task_definition_kwargs["containerDefinitions"][0]["command"] = []
-
- task_definition_kwargs.get("containerDefinitions")[0]["command"] = [
- "/bin/sh",
- "-c",
- "python -c 'import prefect; prefect.environments.execution.load_and_run_flow()'",
- ]
-
- return task_definition_kwargs
-
- def _validate_task_definition(
- self, existing_task_definition: dict, task_definition_kwargs: dict
- ) -> None:
- def format_container_definition(definition: dict) -> dict:
- """
- Reformat all object arrays in the containerDefinitions so
- the keys are comparable for validation. Most of these won't apply
- to the first container (overriden by Prefect) but it could apply to
- other containers in the definition, so they are included here.
-
- The keys that are overriden here are listed in:
- https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#container_definitions
-
- Essentially only the `object array` types need to be overridden since
- they may be returned from AWS's API out of order.
- """
- return {
- **definition,
- "environment": {
- item["name"]: item["value"]
- for item in definition.get("environment", [])
- },
- "secrets": {
- item["name"]: item["valueFrom"]
- for item in definition.get("secrets", [])
- },
- "mountPoints": {
- item["sourceVolume"]: item
- for item in definition.get("mountPoints", [])
- },
- "extraHosts": {
- item["hostname"]: item["ipAddress"]
- for item in definition.get("extraHosts", [])
- },
- "volumesFrom": {
- item["sourceContainer"]: item
- for item in definition.get("volumesFrom", [])
- },
- "ulimits": {
- item["name"]: item for item in definition.get("ulimits", [])
- },
- "portMappings": {
- item["containerPort"]: item
- for item in definition.get("portMappings", [])
- },
- "logConfiguration": {
- **definition.get("logConfiguration", {}),
- "secretOptions": {
- item["name"]: item["valueFrom"]
- for item in definition.get("logConfiguration", {}).get(
- "secretOptions", []
- )
- },
- },
- }
-
- givenContainerDefinitions = sorted(
- [
- format_container_definition(container_definition)
- for container_definition in task_definition_kwargs[
- "containerDefinitions"
- ]
- ],
- key=operator.itemgetter("name"),
- )
- expectedContainerDefinitions = sorted(
- [
- format_container_definition(container_definition)
- for container_definition in existing_task_definition[
- "containerDefinitions"
- ]
- ],
- key=operator.itemgetter("name"),
- )
-
- containerDifferences = [
- "containerDefinition.{idx}.{key} -> Given: {given}, Expected: {expected}".format(
- idx=container_definition.get("name", idx),
- key=key,
- given=value,
- expected=existing_container_definition.get(key),
- )
- for idx, (
- container_definition,
- existing_container_definition,
- ) in enumerate(zip(givenContainerDefinitions, expectedContainerDefinitions))
- for key, value in container_definition.items()
- if value != existing_container_definition.get(key)
- ]
-
- arnDifferences = [
- "{key} -> Given: {given}, Expected: {expected}".format(
- key=key,
- given=task_definition_kwargs[key],
- expected=existing_task_definition.get(key),
- )
- for key in _DEFINITION_KWARG_LIST
- if key.endswith("Arn")
- and key in task_definition_kwargs
- and (
- existing_task_definition.get(key) != task_definition_kwargs[key]
- and existing_task_definition.get(key, "").split("/")[-1]
- != task_definition_kwargs[key]
- )
- ]
-
- otherDifferences = [
- "{key} -> Given: {given}, Expected: {expected}".format(
- key=key,
- given=task_definition_kwargs[key],
- expected=existing_task_definition.get(key),
- )
- for key in _DEFINITION_KWARG_LIST
- if key != "containerDefinitions"
- and not key.endswith("Arn")
- and key in task_definition_kwargs
- and existing_task_definition.get(key) != task_definition_kwargs[key]
- ]
-
- differences = containerDifferences + arnDifferences + otherDifferences
-
- if differences:
- raise ValueError(
- (
- "The given taskDefinition does not match the existing taskDefinition {}.\n"
- "Detail: \n\t{}\n\n"
- "If the given configuration is desired, deregister the existing\n"
- "taskDefinition and re-run the flow. Alternatively, you can\n"
- "change the family/taskDefinition name in the FargateTaskEnvironment\n"
- "for this flow."
- ).format(
- self.task_definition_kwargs.get("family"), "\n\t".join(differences)
- )
- )
-
- def setup(self, flow: "Flow") -> None: # type: ignore
- """
- Register the task definition if it does not already exist.
-
- Args:
- - flow (Flow): the Flow object
- """
- from boto3 import client as boto3_client
- from botocore.exceptions import ClientError
-
- boto3_c = boto3_client(
- "ecs",
- aws_access_key_id=self.aws_access_key_id,
- aws_secret_access_key=self.aws_secret_access_key,
- aws_session_token=self.aws_session_token,
- region_name=self.region_name,
- )
-
- task_definition_kwargs = self._render_task_definition_kwargs(flow)
- try:
- existing_task_definition = boto3_c.describe_task_definition(
- taskDefinition=self.task_definition_kwargs.get("family")
- )["taskDefinition"]
-
- self._validate_task_definition(
- existing_task_definition, task_definition_kwargs
- )
- except ClientError:
- boto3_c.register_task_definition(**task_definition_kwargs)
-
- def execute(self, flow: "Flow") -> None: # type: ignore
- """
- Run the Fargate task that was defined for this flow.
-
- Args:
- - flow (Flow): the Flow object
- """
- from boto3 import client as boto3_client
-
- flow_run_id = prefect.context.get("flow_run_id", "unknown")
- container_overrides = [
- {
- "name": "flow-container",
- "environment": [
- {
- "name": "PREFECT__CLOUD__AUTH_TOKEN",
- "value": config.cloud.agent.get("auth_token", "")
- or config.cloud.get("auth_token", ""),
- },
- {"name": "PREFECT__CONTEXT__FLOW_RUN_ID", "value": flow_run_id},
- {"name": "PREFECT__CONTEXT__IMAGE", "value": get_flow_image(flow)},
- ],
- }
- ]
-
- boto3_c = boto3_client(
- "ecs",
- aws_access_key_id=self.aws_access_key_id,
- aws_secret_access_key=self.aws_secret_access_key,
- aws_session_token=self.aws_session_token,
- region_name=self.region_name,
- )
-
- boto3_c.run_task(
- overrides={"containerOverrides": container_overrides},
- launchType=self.launch_type,
- **self.task_run_kwargs,
- )
diff --git a/src/prefect/environments/execution/k8s/__init__.py b/src/prefect/environments/execution/k8s/__init__.py
deleted file mode 100644
index a3d31a78f71b..000000000000
--- a/src/prefect/environments/execution/k8s/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from prefect.environments.execution.k8s.job import KubernetesJobEnvironment
diff --git a/src/prefect/environments/execution/k8s/job.py b/src/prefect/environments/execution/k8s/job.py
deleted file mode 100644
index 8a3545ac0640..000000000000
--- a/src/prefect/environments/execution/k8s/job.py
+++ /dev/null
@@ -1,337 +0,0 @@
-import copy
-import os
-import uuid
-from typing import Any, Callable, List, TYPE_CHECKING
-
-import yaml
-
-import prefect
-from prefect.environments.execution.base import Environment, _RunMixin
-from prefect.utilities.storage import get_flow_image
-
-if TYPE_CHECKING:
- from prefect.core.flow import Flow # pylint: disable=W0611
-
-
-class KubernetesJobEnvironment(Environment, _RunMixin):
- """
- KubernetesJobEnvironment is an environment which deploys your flow as a Kubernetes
- job. This environment allows (and requires) a custom job YAML spec to be provided.
-
- DEPRECATED: Environment based configuration is deprecated, please transition to
- configuring `flow.run_config` instead of `flow.environment`. See
- https://docs.prefect.io/orchestration/flow_config/overview.html for more info.
-
- When providing a custom YAML job spec the first container in the spec must be the
- container that the flow runner will be executed on.
-
- The following environment variables, required for cloud, do not need to be
- included––they are automatically added and populated during execution:
-
- - `PREFECT__CLOUD__GRAPHQL`
- - `PREFECT__CLOUD__AUTH_TOKEN`
- - `PREFECT__CONTEXT__FLOW_RUN_ID`
- - `PREFECT__CONTEXT__NAMESPACE`
- - `PREFECT__CONTEXT__IMAGE`
- - `PREFECT__CLOUD__USE_LOCAL_SECRETS`
- - `PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS`
- - `PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS`
- - `PREFECT__CLOUD__SEND_FLOW_RUN_LOGS`
- - `PREFECT__LOGGING__EXTRA_LOGGERS`
-
- Additionally, the following command will be applied to the first container:
- `$ /bin/sh -c "python -c 'import prefect; prefect.environments.execution.load_and_run_flow()'"`
-
- Args:
- - job_spec_file (str, optional): Path to a job spec YAML file. This path is only
- used when the environment is built, so should refer to a file on the machine
- used to build the flow.
- - unique_job_name (bool, optional): whether to use a unique name for each job created
- with this environment. Defaults to `False`
- - executor (Executor, optional): the executor to run the flow with. If not provided, the
- default executor will be used.
- - labels (List[str], optional): a list of labels, which are arbitrary string
- identifiers used by Prefect Agents when polling for work
- - on_start (Callable, optional): a function callback which will be called before the
- flow begins to run
- - on_exit (Callable, optional): a function callback which will be called after the flow
- finishes its run
- - metadata (dict, optional): extra metadata to be set and serialized on this environment
- """
-
- def __init__(
- self,
- job_spec_file: str = None,
- unique_job_name: bool = False,
- executor: "prefect.executors.Executor" = None,
- labels: List[str] = None,
- on_start: Callable = None,
- on_exit: Callable = None,
- metadata: dict = None,
- ) -> None:
- self.job_spec_file = os.path.abspath(job_spec_file) if job_spec_file else None
- self.unique_job_name = unique_job_name
-
- if executor is None:
- executor = prefect.engine.get_default_executor_class()()
- elif not isinstance(executor, prefect.executors.Executor):
- raise TypeError(
- f"`executor` must be an `Executor` or `None`, got `{executor}`"
- )
- self.executor = executor
-
- # Load specs from file if path given, store on object
- self._job_spec = self._load_spec_from_file()
- self._job_spec = self._populate_build_time_job_spec_details(self._job_spec)
-
- self._identifier_label = ""
-
- super().__init__(
- labels=labels, on_start=on_start, on_exit=on_exit, metadata=metadata
- )
-
- @property
- def dependencies(self) -> list:
- return ["kubernetes"]
-
- @property
- def identifier_label(self) -> str:
- if not hasattr(self, "_identifier_label") or not self._identifier_label:
- self._identifier_label = str(uuid.uuid4())
- return self._identifier_label
-
- def __getstate__(self) -> dict:
- state = self.__dict__.copy()
- # Ensure _identifier_label is not persisted
- if "_identifier_label" in state:
- del state["_identifier_label"]
- return state
-
- def __setstate__(self, state: dict) -> None:
- self.__dict__.update(state)
-
- def execute(self, flow: "Flow", **kwargs: Any) -> None: # type: ignore
- """
- Create a single Kubernetes job that runs the flow.
-
- Args:
- - flow (Flow): the Flow object
- - **kwargs (Any): additional keyword arguments to pass to the runner
-
- Raises:
- - Exception: if the environment is unable to create the Kubernetes job
- """
- docker_name = get_flow_image(flow)
-
- from kubernetes import client, config
-
- # Verify environment is running in cluster
- try:
- config.load_incluster_config()
- except config.config_exception.ConfigException as err:
- self.logger.error("Environment not currently running inside a cluster")
- raise EnvironmentError(
- "Environment not currently inside a cluster"
- ) from err
-
- batch_client = client.BatchV1Api()
-
- job = self._populate_run_time_job_spec_details(docker_name=docker_name)
-
- # Create Job
- try:
- batch_client.create_namespaced_job(
- namespace=prefect.context.get("namespace"), body=job
- )
- except Exception as exc:
- self.logger.critical("Failed to create Kubernetes job: {}".format(exc))
- raise exc
-
- ###############################
- # Custom YAML Spec Manipulation
- ###############################
-
- @staticmethod
- def _ensure_required_job_spec_sections(yaml_obj: dict) -> dict:
- """
- Ensure that the required sections exist in the given job YAML.
-
- Makes sure the following sections exist:
-
- * `metadata`
- * `metadata.labels`
- * `spec`
- * `spec.template`
- * `spec.template.metadata`
- * `spec.template.metadata.labels`
- * `spec.template.spec`
- * `spec.template.spec.containers`
- * and on the first container in `spec.template.spec.containers`:
- - `command`
- - `args`
-
- Args:
- - yaml_obj (dict): A dictionary representing the parsed yaml
-
- Returns:
- - dict: a dictionary with the yaml values replaced
- """
- if not yaml_obj.get("metadata"):
- yaml_obj["metadata"] = {}
-
- if not yaml_obj["metadata"].get("labels"):
- yaml_obj["metadata"]["labels"] = {}
-
- if not yaml_obj.get("spec"):
- yaml_obj["spec"] = {}
-
- if not yaml_obj["spec"].get("template"):
- yaml_obj["spec"]["template"] = {}
-
- if not yaml_obj["spec"]["template"].get("spec"):
- yaml_obj["spec"]["template"]["spec"] = {}
-
- if not yaml_obj["spec"]["template"]["spec"].get("containers"):
- yaml_obj["spec"]["template"]["spec"]["containers"] = {}
-
- if not yaml_obj["spec"]["template"].get("metadata"):
- yaml_obj["spec"]["template"]["metadata"] = {}
-
- if not yaml_obj["spec"]["template"]["metadata"].get("labels"):
- yaml_obj["spec"]["template"]["metadata"]["labels"] = {}
-
- if not yaml_obj["spec"]["template"]["spec"].get("containers"):
- yaml_obj["spec"]["template"]["spec"]["containers"] = [{}]
-
- if not yaml_obj["spec"]["template"]["spec"]["containers"][0].get("command"):
- yaml_obj["spec"]["template"]["spec"]["containers"][0]["command"] = []
-
- if not yaml_obj["spec"]["template"]["spec"]["containers"][0].get("args"):
- yaml_obj["spec"]["template"]["spec"]["containers"][0]["args"] = []
-
- return yaml_obj
-
- def _populate_build_time_job_spec_details(self, yaml_obj: dict) -> dict:
- """
- Populate some details of the custom execution job YAML used in this environment.
-
- This method fills in details that are known at the build time (when assigning
- `flow.environment`). Other details which can only be filled in at runtime are
- handled by `_populate_job_spec_yaml()`.
-
- Changes the first container in `spec.template.spec.containers`.
-
- * `/bin/sh -c` as the `command`
- * prefect-specific `args` that run the floow
-
- Args:
- - yaml_obj (dict): A dictionary representing the parsed yaml
-
- Returns:
- - dict: a dictionary with the yaml values replaced
- """
- yaml_obj = self._ensure_required_job_spec_sections(yaml_obj)
-
- # set command on first container
- yaml_obj["spec"]["template"]["spec"]["containers"][0]["command"] = [
- "/bin/sh",
- "-c",
- ]
-
- # set args on first container
- yaml_obj["spec"]["template"]["spec"]["containers"][0]["args"] = [
- 'python -c "import prefect; prefect.environments.execution.load_and_run_flow()"'
- ]
-
- return yaml_obj
-
- def _populate_run_time_job_spec_details(self, docker_name: str) -> dict:
- """
- Fill in the custom execution job yaml object stored in `self._job_spec`
- with relevant details.
-
- * `metadata.name`: adds a random name if `self.unique_job_name` is True
- * `metadata.labels`: appends prefect-specific labels
- * `spec.template.metadata.labels`: appends prefect-specific labels
- * `spec.template.spec.containers` (first container):
- - `env`: appends prefect-specific environment variables
- - `image`: writes in image from flow's storage or evironment metadata
-
- Args:
- - docker_name (str): the full path to the docker image
-
- Returns:
- - dict: a dictionary with the yaml values replaced
- """
- flow_run_id = prefect.context.get("flow_run_id", "unknown")
-
- yaml_obj = copy.deepcopy(self._job_spec)
- yaml_obj = self._ensure_required_job_spec_sections(yaml_obj)
-
- if self.unique_job_name:
- yaml_obj["metadata"][
- "name"
- ] = f"{yaml_obj['metadata']['name']}-{str(uuid.uuid4())[:8]}"
-
- # Populate metadata label fields
- k8s_labels = {
- "prefect.io/identifier": self.identifier_label,
- "prefect.io/flow_run_id": flow_run_id,
- }
- yaml_obj["metadata"]["labels"].update(k8s_labels)
- yaml_obj["spec"]["template"]["metadata"]["labels"].update(k8s_labels)
-
- # Required Cloud environment variables
- env_values = [
- {"name": "PREFECT__CLOUD__GRAPHQL", "value": prefect.config.cloud.graphql},
- {
- "name": "PREFECT__CLOUD__AUTH_TOKEN",
- "value": prefect.config.cloud.auth_token,
- },
- {"name": "PREFECT__CONTEXT__FLOW_RUN_ID", "value": flow_run_id},
- {
- "name": "PREFECT__CONTEXT__NAMESPACE",
- "value": prefect.context.get("namespace", ""),
- },
- {"name": "PREFECT__CONTEXT__IMAGE", "value": docker_name},
- {"name": "PREFECT__CLOUD__USE_LOCAL_SECRETS", "value": "false"},
- {
- "name": "PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudFlowRunner",
- },
- {
- "name": "PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudTaskRunner",
- },
- {"name": "PREFECT__CLOUD__SEND_FLOW_RUN_LOGS", "value": "true"},
- {
- "name": "PREFECT__LOGGING__EXTRA_LOGGERS",
- "value": str(prefect.config.logging.extra_loggers),
- },
- ]
-
- # set environment variables on all containers
- for container in yaml_obj["spec"]["template"]["spec"]["containers"]:
- if not container.get("env"):
- container["env"] = []
- container["env"].extend(env_values)
-
- # set image on first container
- yaml_obj["spec"]["template"]["spec"]["containers"][0]["image"] = docker_name
-
- return yaml_obj
-
- def _load_spec_from_file(self) -> dict:
- """
- Load job spec from provided file path
-
- Returns:
- - dict: job spec dictionary
- """
- job = dict() # type: ignore
-
- if self.job_spec_file:
- with open(self.job_spec_file) as job_spec_file:
- job = yaml.safe_load(job_spec_file)
-
- return job
diff --git a/src/prefect/environments/execution/local.py b/src/prefect/environments/execution/local.py
deleted file mode 100644
index 01cb1f2a3335..000000000000
--- a/src/prefect/environments/execution/local.py
+++ /dev/null
@@ -1,61 +0,0 @@
-from typing import Iterable, Callable, TYPE_CHECKING
-
-import prefect
-from prefect.environments.execution.base import Environment, _RunMixin
-
-if TYPE_CHECKING:
- from prefect.core.flow import Flow # pylint: disable=W0611
-
-
-class LocalEnvironment(Environment, _RunMixin):
- """
- A LocalEnvironment class for executing a flow in the local process.
-
- DEPRECATED: Environment based configuration is deprecated, please transition to
- configuring `flow.run_config` instead of `flow.environment`. See
- https://docs.prefect.io/orchestration/flow_config/overview.html for more info.
-
- Args:
- - executor (Executor, optional): the executor to run the flow with. If not provided, the
- default executor will be used.
- - labels (List[str], optional): a list of labels, which are arbitrary string
- identifiers used by Prefect Agents when polling for work
- - on_start (Callable, optional): a function callback which will be called before the
- flow begins to run
- - on_exit (Callable, optional): a function callback which will be called after the flow
- finishes its run
- - metadata (dict, optional): extra metadata to be set and serialized on this
- environment
- """
-
- def __init__(
- self,
- executor: "prefect.executors.Executor" = None,
- labels: Iterable[str] = None,
- on_start: Callable = None,
- on_exit: Callable = None,
- metadata: dict = None,
- ) -> None:
- if executor is None:
- executor = prefect.engine.get_default_executor_class()()
- elif not isinstance(executor, prefect.executors.Executor):
- raise TypeError(
- f"`executor` must be an `Executor` or `None`, got `{executor}`"
- )
- self.executor = executor
- super().__init__(
- labels=labels, on_start=on_start, on_exit=on_exit, metadata=metadata
- )
-
- @property
- def dependencies(self) -> list:
- return []
-
- def execute(self, flow: "Flow") -> None:
- """
- Executes the flow in the local process.
-
- Args:
- - flow (Flow): the Flow object
- """
- self.run(flow)
diff --git a/src/prefect/exceptions.py b/src/prefect/exceptions.py
index 7b199b0ddcf0..1d667cf4cc0d 100644
--- a/src/prefect/exceptions.py
+++ b/src/prefect/exceptions.py
@@ -1,12 +1,3 @@
-# Import old exceptions for compatibility
-from prefect.utilities.exceptions import (
- PrefectError,
- ClientError as ClientError_,
- AuthorizationError as AuthorizationError_,
- StorageError,
-)
-
-
class PrefectSignal(BaseException):
"""
Signals inherit from `BaseException` and will not be caught by normal error
@@ -52,7 +43,7 @@ def __init__(self, message: str = "") -> None:
super().__init__(message)
-class PrefectException(PrefectError):
+class PrefectException(Exception):
"""
The base exception type for all Prefect related exceptions
@@ -60,13 +51,11 @@ class PrefectException(PrefectError):
- message: A message with additional information about the error
"""
- # NOTE: Should be updated to inherit from `Exception` when `PrefectError` is removed
-
def __init__(self, message: str = "") -> None:
super().__init__(message)
-class ClientError(PrefectException, ClientError_):
+class ClientError(PrefectException):
"""
Raised when there is error in Prefect Client <-> Server communication
@@ -90,7 +79,7 @@ def __init__(self, message: str = "") -> None:
super().__init__(message)
-class AuthorizationError(ClientError, AuthorizationError_):
+class AuthorizationError(ClientError):
"""
Raised when there is an issue authorizing with Prefect Cloud
@@ -102,7 +91,7 @@ def __init__(self, message: str = "") -> None:
super().__init__(message)
-class FlowStorageError(PrefectException, StorageError):
+class FlowStorageError(PrefectException):
"""
Raised when there is an error loading a flow from storage
diff --git a/src/prefect/executors/__init__.py b/src/prefect/executors/__init__.py
index 4598eb274523..982628ade247 100644
--- a/src/prefect/executors/__init__.py
+++ b/src/prefect/executors/__init__.py
@@ -19,3 +19,5 @@
from .base import Executor
from .dask import DaskExecutor, LocalDaskExecutor
from .local import LocalExecutor
+
+__all__ = ["DaskExecutor", "Executor", "LocalDaskExecutor", "LocalExecutor"]
diff --git a/src/prefect/executors/dask.py b/src/prefect/executors/dask.py
index da361d35fa89..dc7695c156de 100644
--- a/src/prefect/executors/dask.py
+++ b/src/prefect/executors/dask.py
@@ -15,7 +15,6 @@
if TYPE_CHECKING:
import dask
from distributed import Future, Event
- import multiprocessing.pool
import concurrent.futures
@@ -462,7 +461,7 @@ def performance_report(self) -> str:
def _multiprocessing_pool_initializer() -> None:
- """Initialize a process used in a `multiprocssing.Pool`.
+ """Initialize a process used in a `concurrent.futures.ProcessPoolExecutor`.
Ensures the standard atexit handlers are run."""
import signal
@@ -484,7 +483,7 @@ class LocalDaskExecutor(Executor):
def __init__(self, scheduler: str = "threads", **kwargs: Any):
self.scheduler = self._normalize_scheduler(scheduler)
self.dask_config = kwargs
- self._pool = None # type: Optional[multiprocessing.pool.Pool]
+ self._pool = None # type: Optional[concurrent.futures.Executor]
super().__init__()
@staticmethod
@@ -512,11 +511,11 @@ def _interrupt_pool(self) -> None:
if self._pool is None:
return
- # Terminate the pool
- self._pool.terminate()
+ # Shutdown the pool
+ self._pool.shutdown(wait=False)
if self.scheduler == "threads":
- # `ThreadPool.terminate()` doesn't stop running tasks, only
+ # `ThreadPoolExecutor.shutdown()` doesn't stop running tasks, only
# prevents new tasks from running. In CPython we can attempt to
# raise an exception in all threads. This exception will be raised
# the next time the task does something with the Python api.
@@ -543,7 +542,7 @@ def _interrupt_pool(self) -> None:
else:
id_type = ctypes.c_long
- for t in self._pool._pool: # type: ignore
+ for t in self._pool._threads: # type: ignore
ctypes.pythonapi.PyThreadState_SetAsyncExc(
id_type(t.ident), ctypes.py_object(KeyboardInterrupt)
)
@@ -574,14 +573,13 @@ def _posttask(self, key, value, dsk, state, id): # type: ignore
else:
num_workers = dask.config.get("num_workers", None) or CPU_COUNT
if self.scheduler == "threads":
- from multiprocessing.pool import ThreadPool
+ from concurrent.futures import ThreadPoolExecutor
- self._pool = ThreadPool(num_workers)
+ self._pool = ThreadPoolExecutor(num_workers)
else:
- from dask.multiprocessing import get_context
+ from concurrent.futures import ProcessPoolExecutor
- context = get_context()
- self._pool = context.Pool(
+ self._pool = ProcessPoolExecutor(
num_workers, initializer=_multiprocessing_pool_initializer
)
try:
@@ -595,8 +593,7 @@ def _posttask(self, key, value, dsk, state, id): # type: ignore
if exiting_early:
self._interrupt_pool()
else:
- self._pool.close()
- self._pool.join()
+ self._pool.shutdown(wait=True)
self._pool = None
def submit(
diff --git a/src/prefect/run_configs/__init__.py b/src/prefect/run_configs/__init__.py
index e7cf5e62ef29..9f9bddcf2099 100644
--- a/src/prefect/run_configs/__init__.py
+++ b/src/prefect/run_configs/__init__.py
@@ -4,3 +4,13 @@
from .docker import DockerRun
from .ecs import ECSRun
from .vertex import VertexRun
+
+__all__ = [
+ "DockerRun",
+ "ECSRun",
+ "KubernetesRun",
+ "LocalRun",
+ "RunConfig",
+ "UniversalRun",
+ "VertexRun",
+]
diff --git a/src/prefect/schedules/__init__.py b/src/prefect/schedules/__init__.py
index ecaf4e6f72c3..c4892a3fa4f4 100644
--- a/src/prefect/schedules/__init__.py
+++ b/src/prefect/schedules/__init__.py
@@ -8,3 +8,5 @@
CronSchedule,
RRuleSchedule,
)
+
+__all__ = ["CronSchedule", "IntervalSchedule", "RRuleSchedule", "Schedule"]
diff --git a/src/prefect/serialization/__init__.py b/src/prefect/serialization/__init__.py
index 148ae24791ca..1d5eca278a17 100644
--- a/src/prefect/serialization/__init__.py
+++ b/src/prefect/serialization/__init__.py
@@ -1,7 +1,6 @@
import prefect.serialization.schedule
import prefect.serialization.task
import prefect.serialization.edge
-import prefect.serialization.environment
import prefect.serialization.flow
import prefect.serialization.state
import prefect.serialization.storage
diff --git a/src/prefect/serialization/environment.py b/src/prefect/serialization/environment.py
deleted file mode 100644
index 3a6a5467e6c8..000000000000
--- a/src/prefect/serialization/environment.py
+++ /dev/null
@@ -1,108 +0,0 @@
-from typing import Any
-
-from marshmallow import fields, post_load
-
-from prefect.environments import (
- DaskKubernetesEnvironment,
- Environment,
- FargateTaskEnvironment,
- KubernetesJobEnvironment,
- LocalEnvironment,
-)
-from prefect.utilities.serialization import (
- ObjectSchema,
- OneOfSchema,
- to_qualified_name,
- JSONCompatible,
- SortedList,
-)
-
-
-class BaseEnvironmentSchema(ObjectSchema):
- class Meta:
- object_class = Environment
-
- labels = SortedList(fields.String())
- metadata = JSONCompatible(allow_none=True)
-
-
-class LocalEnvironmentSchema(ObjectSchema):
- class Meta:
- object_class = LocalEnvironment
-
- labels = SortedList(fields.String())
- metadata = JSONCompatible(allow_none=True)
-
-
-class DaskKubernetesEnvironmentSchema(ObjectSchema):
- class Meta:
- object_class = DaskKubernetesEnvironment
-
- docker_secret = fields.String(allow_none=True)
- labels = SortedList(fields.String())
- metadata = JSONCompatible(allow_none=True)
- private_registry = fields.Boolean(allow_none=False)
- min_workers = fields.Int()
- max_workers = fields.Int()
-
-
-class FargateTaskEnvironmentSchema(ObjectSchema):
- class Meta:
- object_class = FargateTaskEnvironment
-
- labels = SortedList(fields.String())
- metadata = JSONCompatible(allow_none=True)
-
-
-class KubernetesJobEnvironmentSchema(ObjectSchema):
- class Meta:
- object_class = KubernetesJobEnvironment
-
- labels = SortedList(fields.String())
- metadata = JSONCompatible(allow_none=True)
-
-
-class CustomEnvironmentSchema(ObjectSchema):
- class Meta:
- object_class = lambda: Environment
- exclude_fields = ["type"]
-
- labels = SortedList(fields.String())
- metadata = JSONCompatible(allow_none=True)
-
- type = fields.Function(
- lambda environment: to_qualified_name(type(environment)), lambda x: x
- )
-
- @post_load
- def create_object(self, data: dict, **kwargs: Any) -> Environment:
- """
- Because we cannot deserialize a custom class, we return an empty
- Base Environment with the appropriate labels.
- """
- return Environment(labels=data.get("labels"), metadata=data.get("metadata"))
-
-
-class EnvironmentSchema(OneOfSchema):
- """
- Field that chooses between several nested schemas
- """
-
- # map class name to schema
- type_schemas = {
- "DaskKubernetesEnvironment": DaskKubernetesEnvironmentSchema,
- "Environment": BaseEnvironmentSchema,
- "FargateTaskEnvironment": FargateTaskEnvironmentSchema,
- "LocalEnvironment": LocalEnvironmentSchema,
- "KubernetesJobEnvironment": KubernetesJobEnvironmentSchema,
- "CustomEnvironment": CustomEnvironmentSchema,
- "RemoteEnvironment": CustomEnvironmentSchema,
- "RemoteDaskEnvironment": CustomEnvironmentSchema,
- }
-
- def get_obj_type(self, obj: Any) -> str:
- name = obj.__class__.__name__
- if name in self.type_schemas:
- return name
- else:
- return "CustomEnvironment"
diff --git a/src/prefect/serialization/flow.py b/src/prefect/serialization/flow.py
index 6de1771ccc46..9ead2660cf77 100644
--- a/src/prefect/serialization/flow.py
+++ b/src/prefect/serialization/flow.py
@@ -4,7 +4,6 @@
import prefect
from prefect.serialization.edge import EdgeSchema
-from prefect.serialization.environment import EnvironmentSchema
from prefect.serialization.run_config import RunConfigSchema
from prefect.serialization.schedule import ScheduleSchema
from prefect.serialization.storage import StorageSchema
@@ -91,7 +90,6 @@ class Meta:
reference_tasks = Nested(
TaskSchema, value_selection_fn=get_reference_tasks, many=True, only=["slug"]
)
- environment = fields.Nested(EnvironmentSchema, allow_none=True)
run_config = fields.Nested(RunConfigSchema, allow_none=True)
storage = fields.Nested(StorageSchema, allow_none=True)
diff --git a/src/prefect/storage/__init__.py b/src/prefect/storage/__init__.py
index f75406325983..340f9b096f36 100644
--- a/src/prefect/storage/__init__.py
+++ b/src/prefect/storage/__init__.py
@@ -42,3 +42,20 @@ def get_default_storage_class() -> type:
return Local
else:
return config_value
+
+
+__all__ = [
+ "Azure",
+ "Bitbucket",
+ "CodeCommit",
+ "Docker",
+ "GCS",
+ "Git",
+ "GitHub",
+ "GitLab",
+ "Local",
+ "Module",
+ "S3",
+ "Storage",
+ "Webhook",
+]
diff --git a/src/prefect/storage/_healthcheck.py b/src/prefect/storage/_healthcheck.py
index df09638a2e8c..ac20c20c2117 100644
--- a/src/prefect/storage/_healthcheck.py
+++ b/src/prefect/storage/_healthcheck.py
@@ -6,7 +6,6 @@
import ast
import binascii
-import importlib
import json
import sys
import warnings
@@ -116,27 +115,6 @@ def result_check(flows: list, quiet=False):
print("Result check: OK")
-def environment_dependency_check(flows: list):
- # Test for imports that are required by certain environments
- for flow in flows:
- # Load all required dependencies for an environment
- if not hasattr(flow.environment, "dependencies"):
- continue
-
- required_imports = flow.environment.dependencies
- for dependency in required_imports:
- try:
- importlib.import_module(dependency)
- except ModuleNotFoundError as exc:
- raise ModuleNotFoundError(
- "Using {} requires the `{}` dependency".format(
- flow.environment.__class__.__name__, dependency
- )
- ) from exc
-
- print("Environment dependency check: OK")
-
-
if __name__ == "__main__":
flow_file_paths, python_version = sys.argv[1:3]
@@ -152,5 +130,4 @@ def environment_dependency_check(flows: list):
flows = cloudpickle_deserialization_check(flow_file_paths)
result_check(flows)
- environment_dependency_check(flows)
print("All health checks passed.")
diff --git a/src/prefect/storage/docker.py b/src/prefect/storage/docker.py
index 074772b43e08..515fa04ee997 100644
--- a/src/prefect/storage/docker.py
+++ b/src/prefect/storage/docker.py
@@ -72,7 +72,7 @@ class Docker(Storage):
- registry_url (str, optional): URL of a registry to push the image to;
image will not be pushed if not provided
- base_image (str, optional): the base image for this when building this
- image (e.g. `python:3.6`), defaults to the `prefecthq/prefect` image
+ image (e.g. `python:3.7`), defaults to the `prefecthq/prefect` image
matching your python version and prefect core library version used
at runtime.
- dockerfile (str, optional): a path to a Dockerfile to use in building
@@ -177,7 +177,12 @@ def __init__(
version = prefect.__version__.split("+")
if prefect_version is None:
- self.prefect_version = "master" if len(version) > 1 else version[0]
+ self.prefect_version = (
+ "master"
+ # The release candidate is a special development version
+ if len(version) > 1 and not version[0].endswith("rc0")
+ else version[0]
+ )
else:
self.prefect_version = prefect_version
@@ -189,6 +194,16 @@ def __init__(
self.base_image = "prefecthq/prefect:{}-python{}".format(
self.prefect_version, python_version
)
+ elif self.prefect_version.endswith("rc0"):
+ # Development release candidate
+ self.base_image = f"prefecthq/prefect:{self.prefect_version}"
+ elif (
+ re.match(r"^[0-9]+\.[0-9]+rc[0-9]+$", self.prefect_version) is not None
+ ):
+ # Actual release candidate
+ self.base_image = "prefecthq/prefect:{}-python{}".format(
+ self.prefect_version, python_version
+ )
else:
# create an image from python:*-slim directly
self.base_image = "python:{}-slim".format(python_version)
diff --git a/src/prefect/storage/gitlab.py b/src/prefect/storage/gitlab.py
index 6c7907ad3a17..56b49a110c4d 100644
--- a/src/prefect/storage/gitlab.py
+++ b/src/prefect/storage/gitlab.py
@@ -31,7 +31,7 @@ class GitLab(Storage):
- Push this `flow.py` file to the `my/repo` repository under `/flows/flow.py`.
- - Call `prefect register flow -f flow.py` to register this flow with GitLab storage.
+ - Call `prefect register -f flow.py` to register this flow with GitLab storage.
Args:
- repo (str): the project path (i.e., 'namespace/project') or ID
diff --git a/src/prefect/tasks/__init__.py b/src/prefect/tasks/__init__.py
index d74cec21e08f..7a9eeb65aa0d 100644
--- a/src/prefect/tasks/__init__.py
+++ b/src/prefect/tasks/__init__.py
@@ -11,3 +11,5 @@
import prefect.tasks.notifications
import prefect.tasks.secrets
import prefect.tasks.shell
+
+__all__ = ["Task"]
diff --git a/src/prefect/tasks/airbyte/__init__.py b/src/prefect/tasks/airbyte/__init__.py
index 90df61834acd..16edae924207 100644
--- a/src/prefect/tasks/airbyte/__init__.py
+++ b/src/prefect/tasks/airbyte/__init__.py
@@ -2,3 +2,5 @@
This module contains a task for triggering [Airbyte](https://airbyte.io/) connection sync jobs
"""
from .airbyte import AirbyteConnectionTask
+
+__all__ = ["AirbyteConnectionTask"]
diff --git a/src/prefect/tasks/airtable/__init__.py b/src/prefect/tasks/airtable/__init__.py
index 7539b4ada80d..8e31cc175664 100644
--- a/src/prefect/tasks/airtable/__init__.py
+++ b/src/prefect/tasks/airtable/__init__.py
@@ -7,3 +7,5 @@
raise ImportError(
'Using `prefect.tasks.airtable` requires Prefect to be installed with the "airtable" extra.'
) from err
+
+__all__ = ["ReadAirtableRow", "WriteAirtableRow"]
diff --git a/src/prefect/tasks/asana/__init__.py b/src/prefect/tasks/asana/__init__.py
index d3a8a451f495..025ddd3ce9c1 100644
--- a/src/prefect/tasks/asana/__init__.py
+++ b/src/prefect/tasks/asana/__init__.py
@@ -1 +1,3 @@
from prefect.tasks.asana.asana_task import OpenAsanaToDo
+
+__all__ = ["OpenAsanaToDo"]
diff --git a/src/prefect/tasks/aws/__init__.py b/src/prefect/tasks/aws/__init__.py
index b64159fa10d5..5228ca937172 100644
--- a/src/prefect/tasks/aws/__init__.py
+++ b/src/prefect/tasks/aws/__init__.py
@@ -19,3 +19,17 @@
raise ImportError(
'Using `prefect.tasks.aws` requires Prefect to be installed with the "aws" extra.'
) from err
+
+__all__ = [
+ "AWSClientWait",
+ "AWSSecretsManager",
+ "BatchSubmit",
+ "LambdaCreate",
+ "LambdaDelete",
+ "LambdaInvoke",
+ "LambdaList",
+ "S3Download",
+ "S3List",
+ "S3Upload",
+ "StepActivate",
+]
diff --git a/src/prefect/tasks/azure/__init__.py b/src/prefect/tasks/azure/__init__.py
index 962fd6896f34..d195ec061005 100644
--- a/src/prefect/tasks/azure/__init__.py
+++ b/src/prefect/tasks/azure/__init__.py
@@ -13,3 +13,11 @@
raise ImportError(
'Using `prefect.tasks.azure` requires Prefect to be installed with the "azure" extra.'
) from err
+
+__all__ = [
+ "BlobStorageDownload",
+ "BlobStorageUpload",
+ "CosmosDBCreateItem",
+ "CosmosDBQueryItems",
+ "CosmosDBReadItems",
+]
diff --git a/src/prefect/tasks/azureml/__init__.py b/src/prefect/tasks/azureml/__init__.py
index f972777dcd69..0f42ef1466e9 100644
--- a/src/prefect/tasks/azureml/__init__.py
+++ b/src/prefect/tasks/azureml/__init__.py
@@ -20,3 +20,13 @@
raise ImportError(
'Using `prefect.tasks.azureml` requires Prefect to be installed with the "azure" extra.'
) from err
+
+__all__ = [
+ "DatasetCreateFromDelimitedFiles",
+ "DatasetCreateFromFiles",
+ "DatasetCreateFromParquetFiles",
+ "DatastoreGet",
+ "DatastoreList",
+ "DatastoreRegisterBlobContainer",
+ "DatastoreUpload",
+]
diff --git a/src/prefect/tasks/census/__init__.py b/src/prefect/tasks/census/__init__.py
index bed56296a8b7..03fe47b37c67 100644
--- a/src/prefect/tasks/census/__init__.py
+++ b/src/prefect/tasks/census/__init__.py
@@ -2,3 +2,5 @@
This module contains a task for starting and monitoring [Census](https://getcensus.com/) sync jobs
"""
from .census import CensusSyncTask
+
+__all__ = ["CensusSyncTask"]
diff --git a/src/prefect/tasks/control_flow/__init__.py b/src/prefect/tasks/control_flow/__init__.py
index 353052a6d622..2d3970c58bdf 100644
--- a/src/prefect/tasks/control_flow/__init__.py
+++ b/src/prefect/tasks/control_flow/__init__.py
@@ -1,3 +1,5 @@
from prefect.tasks.control_flow.conditional import ifelse, switch, merge
from prefect.tasks.control_flow.filter import FilterTask
from prefect.tasks.control_flow.case import case
+
+__all__ = ["FilterTask", "case", "ifelse", "merge", "switch"]
diff --git a/src/prefect/tasks/database/__init__.py b/src/prefect/tasks/database/__init__.py
index 679268957454..8270cadf26c7 100644
--- a/src/prefect/tasks/database/__init__.py
+++ b/src/prefect/tasks/database/__init__.py
@@ -6,3 +6,5 @@
warnings.warn(
"SQLite tasks require sqlite3 to be installed", UserWarning, stacklevel=2
)
+
+__all__ = ["SQLiteQuery", "SQLiteScript"]
diff --git a/src/prefect/tasks/databricks/__init__.py b/src/prefect/tasks/databricks/__init__.py
index 0f46cde657ea..e8167c4b2cf7 100644
--- a/src/prefect/tasks/databricks/__init__.py
+++ b/src/prefect/tasks/databricks/__init__.py
@@ -4,3 +4,5 @@
from prefect.tasks.databricks.databricks_submitjob import DatabricksSubmitRun
from prefect.tasks.databricks.databricks_submitjob import DatabricksRunNow
+
+__all__ = ["DatabricksRunNow", "DatabricksSubmitRun"]
diff --git a/src/prefect/tasks/dbt/__init__.py b/src/prefect/tasks/dbt/__init__.py
index ac5a7fd79e4d..70651ad5d902 100644
--- a/src/prefect/tasks/dbt/__init__.py
+++ b/src/prefect/tasks/dbt/__init__.py
@@ -8,3 +8,5 @@
raise ImportError(
"Using `prefect.tasks.dbt` requires dbt to be installed."
) from err
+
+__all__ = ["DbtShellTask", "DbtCloudRunJob"]
diff --git a/src/prefect/tasks/docker/__init__.py b/src/prefect/tasks/docker/__init__.py
index 0389fae75e24..e1ea2890687f 100644
--- a/src/prefect/tasks/docker/__init__.py
+++ b/src/prefect/tasks/docker/__init__.py
@@ -33,3 +33,19 @@
RemoveContainer,
WaitOnContainer,
)
+
+__all__ = [
+ "BuildImage",
+ "CreateContainer",
+ "GetContainerLogs",
+ "ListContainers",
+ "ListImages",
+ "PullImage",
+ "PushImage",
+ "RemoveContainer",
+ "RemoveImage",
+ "StartContainer",
+ "StopContainer",
+ "TagImage",
+ "WaitOnContainer",
+]
diff --git a/src/prefect/tasks/dremio/__init__.py b/src/prefect/tasks/dremio/__init__.py
index b719aae02d19..08e743669e84 100644
--- a/src/prefect/tasks/dremio/__init__.py
+++ b/src/prefect/tasks/dremio/__init__.py
@@ -8,3 +8,5 @@
raise ImportError(
'Using `prefect.tasks.dremio` requires Prefect to be installed with the "dremio" extra.'
) from import_error
+
+__all__ = ["DremioFetch"]
diff --git a/src/prefect/tasks/dropbox/__init__.py b/src/prefect/tasks/dropbox/__init__.py
index 7c066a578ea3..43580120f68b 100644
--- a/src/prefect/tasks/dropbox/__init__.py
+++ b/src/prefect/tasks/dropbox/__init__.py
@@ -7,3 +7,5 @@
raise ImportError(
'Using `prefect.tasks.dropbox` requires Prefect to be installed with the "dropbox" extra.'
) from err
+
+__all__ = ["DropboxDownload"]
diff --git a/src/prefect/tasks/exasol/__init__.py b/src/prefect/tasks/exasol/__init__.py
index b5b70d6b3247..4400be58d31a 100644
--- a/src/prefect/tasks/exasol/__init__.py
+++ b/src/prefect/tasks/exasol/__init__.py
@@ -13,3 +13,10 @@
raise ImportError(
'Using `prefect.tasks.exasol` requires Prefect to be installed with the "exasol" extra.'
) from exc
+
+__all__ = [
+ "ExasolExecute",
+ "ExasolExportToFile",
+ "ExasolFetch",
+ "ExasolImportFromIterable",
+]
diff --git a/src/prefect/tasks/files/__init__.py b/src/prefect/tasks/files/__init__.py
index 27c2c88a57ae..5265f4203176 100644
--- a/src/prefect/tasks/files/__init__.py
+++ b/src/prefect/tasks/files/__init__.py
@@ -4,3 +4,5 @@
from .compression import Unzip, Zip
from .operations import Copy, Glob, Move, Remove
+
+__all__ = ["Copy", "Glob", "Move", "Remove", "Unzip", "Zip"]
diff --git a/src/prefect/tasks/fivetran/__init__.py b/src/prefect/tasks/fivetran/__init__.py
index 49458a3f8bb9..bc91863ecd69 100644
--- a/src/prefect/tasks/fivetran/__init__.py
+++ b/src/prefect/tasks/fivetran/__init__.py
@@ -2,3 +2,5 @@
This module contains a task for starting and monitoring [Fivetran](https://fivetran.com/) connector sync jobs
"""
from .fivetran import FivetranSyncTask
+
+__all__ = ["FivetranSyncTask"]
diff --git a/src/prefect/tasks/gcp/__init__.py b/src/prefect/tasks/gcp/__init__.py
index aa2fbb1dc2e9..0d5abf962965 100644
--- a/src/prefect/tasks/gcp/__init__.py
+++ b/src/prefect/tasks/gcp/__init__.py
@@ -23,3 +23,16 @@
raise ImportError(
'Using `prefect.tasks.gcp` requires Prefect to be installed with the "gcp" extra.'
) from err
+
+__all__ = [
+ "BigQueryLoadFile",
+ "BigQueryLoadGoogleCloudStorage",
+ "BigQueryStreamingInsert",
+ "BigQueryTask",
+ "CreateBigQueryTable",
+ "GCPSecret",
+ "GCSBlobExists",
+ "GCSCopy",
+ "GCSDownload",
+ "GCSUpload",
+]
diff --git a/src/prefect/tasks/github/__init__.py b/src/prefect/tasks/github/__init__.py
index 4f03c6e485c4..dc92afe72efa 100644
--- a/src/prefect/tasks/github/__init__.py
+++ b/src/prefect/tasks/github/__init__.py
@@ -6,3 +6,11 @@
from .prs import CreateGitHubPR
from .repos import GetRepoInfo, CreateBranch
from .comments import CreateIssueComment
+
+__all__ = [
+ "CreateBranch",
+ "CreateGitHubPR",
+ "CreateIssueComment",
+ "GetRepoInfo",
+ "OpenGitHubIssue",
+]
diff --git a/src/prefect/tasks/great_expectations/__init__.py b/src/prefect/tasks/great_expectations/__init__.py
index ae2e76a2dc2e..7072cd39286b 100644
--- a/src/prefect/tasks/great_expectations/__init__.py
+++ b/src/prefect/tasks/great_expectations/__init__.py
@@ -12,3 +12,5 @@
raise ImportError(
'Using `prefect.tasks.great_expectations` requires Prefect to be installed with the "ge" extra.'
) from err
+
+__all__ = ["RunGreatExpectationsValidation"]
diff --git a/src/prefect/tasks/gsheets/__init__.py b/src/prefect/tasks/gsheets/__init__.py
index b6d7d9617f65..4e6f5a7ce37a 100644
--- a/src/prefect/tasks/gsheets/__init__.py
+++ b/src/prefect/tasks/gsheets/__init__.py
@@ -10,3 +10,5 @@
raise ImportError(
'Using `prefect.tasks.gsheets` requires Prefect to be installed with the "gsheets" extra.'
) from err
+
+__all__ = ["ReadGsheetRow", "WriteGsheetRow"]
diff --git a/src/prefect/tasks/jira/__init__.py b/src/prefect/tasks/jira/__init__.py
index 317272e3556f..796893164f40 100644
--- a/src/prefect/tasks/jira/__init__.py
+++ b/src/prefect/tasks/jira/__init__.py
@@ -1,2 +1,4 @@
from prefect.tasks.jira.jira_task import JiraTask
from prefect.tasks.jira.jira_service_desk import JiraServiceDeskTask
+
+__all__ = ["JiraServiceDeskTask", "JiraTask"]
diff --git a/src/prefect/tasks/jupyter/__init__.py b/src/prefect/tasks/jupyter/__init__.py
index 02ac49f9a7b7..c51b472224fb 100644
--- a/src/prefect/tasks/jupyter/__init__.py
+++ b/src/prefect/tasks/jupyter/__init__.py
@@ -7,3 +7,5 @@
raise ImportError(
'Using `prefect.tasks.jupyter` requires Prefect to be installed with the "jupyter" extra.'
) from import_error
+
+__all__ = ["ExecuteNotebook"]
diff --git a/src/prefect/tasks/kafka/__init__.py b/src/prefect/tasks/kafka/__init__.py
index 250d7700941e..26f4665d1426 100644
--- a/src/prefect/tasks/kafka/__init__.py
+++ b/src/prefect/tasks/kafka/__init__.py
@@ -8,3 +8,5 @@
raise ImportError(
'Using `prefect.tasks.kafka` requires Prefect to be installed with the "kafka" extra.'
) from err
+
+__all__ = ["KafkaBatchConsume", "KafkaBatchProduce"]
diff --git a/src/prefect/tasks/kubernetes/__init__.py b/src/prefect/tasks/kubernetes/__init__.py
index 797206306a1b..e0558bd90df0 100644
--- a/src/prefect/tasks/kubernetes/__init__.py
+++ b/src/prefect/tasks/kubernetes/__init__.py
@@ -46,3 +46,34 @@
raise ImportError(
'Using `prefect.tasks.kubernetes` requires Prefect to be installed with the "kubernetes" extra.'
) from err
+
+__all__ = [
+ "ConnectGetNamespacedPodExec",
+ "CreateNamespacedDeployment",
+ "CreateNamespacedJob",
+ "CreateNamespacedPod",
+ "CreateNamespacedService",
+ "DeleteNamespacedDeployment",
+ "DeleteNamespacedJob",
+ "DeleteNamespacedPod",
+ "DeleteNamespacedService",
+ "KubernetesSecret",
+ "ListNamespacedDeployment",
+ "ListNamespacedJob",
+ "ListNamespacedPod",
+ "ListNamespacedService",
+ "PatchNamespacedDeployment",
+ "PatchNamespacedJob",
+ "PatchNamespacedPod",
+ "PatchNamespacedService",
+ "ReadNamespacedDeployment",
+ "ReadNamespacedJob",
+ "ReadNamespacedPod",
+ "ReadNamespacedPodLogs",
+ "ReadNamespacedService",
+ "ReplaceNamespacedDeployment",
+ "ReplaceNamespacedJob",
+ "ReplaceNamespacedPod",
+ "ReplaceNamespacedService",
+ "RunNamespacedJob",
+]
diff --git a/src/prefect/tasks/monday/__init__.py b/src/prefect/tasks/monday/__init__.py
index a513c52fea95..e8fb3445a2a6 100644
--- a/src/prefect/tasks/monday/__init__.py
+++ b/src/prefect/tasks/monday/__init__.py
@@ -6,3 +6,5 @@
"""
from prefect.tasks.monday.monday import CreateItem
+
+__all__ = ["CreateItem"]
diff --git a/src/prefect/tasks/mysql/__init__.py b/src/prefect/tasks/mysql/__init__.py
index 5ae992d2df55..e58f6c657372 100644
--- a/src/prefect/tasks/mysql/__init__.py
+++ b/src/prefect/tasks/mysql/__init__.py
@@ -8,3 +8,5 @@
raise ImportError(
'Using `prefect.tasks.mysql` requires Prefect to be installed with the "mysql" extra.'
) from import_error
+
+__all__ = ["MySQLExecute", "MySQLFetch"]
diff --git a/src/prefect/tasks/notifications/__init__.py b/src/prefect/tasks/notifications/__init__.py
index f43057155a96..f7c7bc23e831 100644
--- a/src/prefect/tasks/notifications/__init__.py
+++ b/src/prefect/tasks/notifications/__init__.py
@@ -6,3 +6,5 @@
from prefect.tasks.notifications.email_task import EmailTask
from prefect.tasks.notifications.slack_task import SlackTask
from prefect.tasks.notifications.pushbullet_task import PushbulletTask
+
+__all__ = ["EmailTask", "PushbulletTask", "SlackTask"]
diff --git a/src/prefect/tasks/postgres/__init__.py b/src/prefect/tasks/postgres/__init__.py
index 02046ed87064..f5df2702a57c 100644
--- a/src/prefect/tasks/postgres/__init__.py
+++ b/src/prefect/tasks/postgres/__init__.py
@@ -13,3 +13,5 @@
raise ImportError(
'Using `prefect.tasks.postgres` requires Prefect to be installed with the "postgres" extra.'
) from err
+
+__all__ = ["PostgresExecute", "PostgresExecuteMany", "PostgresFetch"]
diff --git a/src/prefect/tasks/prefect/__init__.py b/src/prefect/tasks/prefect/__init__.py
index 0b36c52567d9..52625ab15f5a 100644
--- a/src/prefect/tasks/prefect/__init__.py
+++ b/src/prefect/tasks/prefect/__init__.py
@@ -10,3 +10,12 @@
from prefect.tasks.prefect.flow_run import StartFlowRun
from prefect.tasks.prefect.flow_run_rename import RenameFlowRun
from prefect.tasks.prefect.flow_run_cancel import CancelFlowRun
+
+__all__ = [
+ "CancelFlowRun",
+ "RenameFlowRun",
+ "StartFlowRun",
+ "create_flow_run",
+ "get_task_run_result",
+ "wait_for_flow_run",
+]
diff --git a/src/prefect/tasks/prefect/flow_run.py b/src/prefect/tasks/prefect/flow_run.py
index f98155a7c68f..b2014dcebcfa 100644
--- a/src/prefect/tasks/prefect/flow_run.py
+++ b/src/prefect/tasks/prefect/flow_run.py
@@ -158,7 +158,7 @@ def create_flow_run(
idempotency_key=idempotency_key,
)
- run_url = client.get_cloud_url("flow-run", flow_run_id, as_user=False)
+ run_url = client.get_cloud_url("flow-run", flow_run_id)
logger.info(f"Created flow run {run_name_dsp!r}: {run_url}")
return flow_run_id
@@ -462,7 +462,7 @@ def run(
self.logger.debug(f"Flow Run {flow_run_id} created.")
self.logger.debug(f"Creating link artifact for Flow Run {flow_run_id}.")
- run_link = client.get_cloud_url("flow-run", flow_run_id, as_user=False)
+ run_link = client.get_cloud_url("flow-run", flow_run_id)
create_link_artifact(urlparse(run_link).path)
self.logger.info(f"Flow Run: {run_link}")
diff --git a/src/prefect/tasks/prometheus/__init__.py b/src/prefect/tasks/prometheus/__init__.py
index 15d3ec4d710d..80c16e290f04 100644
--- a/src/prefect/tasks/prometheus/__init__.py
+++ b/src/prefect/tasks/prometheus/__init__.py
@@ -10,3 +10,5 @@
raise ImportError(
'Using `prefect.tasks.prometheus` requires Prefect to be installed with the "prometheus" extra.'
) from err
+
+__all__ = ["PushAddGaugeToGateway", "PushGaugeToGateway"]
diff --git a/src/prefect/tasks/redis/__init__.py b/src/prefect/tasks/redis/__init__.py
index 0736f4755ae2..3620e73b45d3 100644
--- a/src/prefect/tasks/redis/__init__.py
+++ b/src/prefect/tasks/redis/__init__.py
@@ -9,3 +9,5 @@
raise ImportError(
'Using `prefect.tasks.redis` requires Prefect to be installed with the "redis" extra.'
) from err
+
+__all__ = ["RedisExecute", "RedisGet", "RedisSet"]
diff --git a/src/prefect/tasks/rss/__init__.py b/src/prefect/tasks/rss/__init__.py
index 251f6ab2991b..3e28e2108f56 100644
--- a/src/prefect/tasks/rss/__init__.py
+++ b/src/prefect/tasks/rss/__init__.py
@@ -7,3 +7,5 @@
raise ImportError(
'Using `prefect.tasks.rss` requires Prefect to be installed with the "rss" extra.'
) from err
+
+__all__ = ["ParseRSSFeed"]
diff --git a/src/prefect/tasks/secrets/__init__.py b/src/prefect/tasks/secrets/__init__.py
index 07dfb8888943..b923798a6b9f 100644
--- a/src/prefect/tasks/secrets/__init__.py
+++ b/src/prefect/tasks/secrets/__init__.py
@@ -7,3 +7,5 @@ class for interacting with other secret providers. Secrets always use a special
"""
from .base import SecretBase, PrefectSecret
from .env_var import EnvVarSecret
+
+__all__ = ["EnvVarSecret", "PrefectSecret", "SecretBase"]
diff --git a/src/prefect/tasks/sendgrid/__init__.py b/src/prefect/tasks/sendgrid/__init__.py
index b28d2cd48b21..31cab976f3a1 100644
--- a/src/prefect/tasks/sendgrid/__init__.py
+++ b/src/prefect/tasks/sendgrid/__init__.py
@@ -7,3 +7,5 @@
raise ImportError(
'Using `prefect.tasks.sendgrid` requires Prefect to be installed with the "sendgrid" extra.'
) from exc
+
+__all__ = ["SendEmail"]
diff --git a/src/prefect/tasks/snowflake/__init__.py b/src/prefect/tasks/snowflake/__init__.py
index 9025e1866af9..e155a9351c2e 100644
--- a/src/prefect/tasks/snowflake/__init__.py
+++ b/src/prefect/tasks/snowflake/__init__.py
@@ -12,3 +12,5 @@
raise ImportError(
'Using `prefect.tasks.snowflake` requires Prefect to be installed with the "snowflake" extra.'
) from err
+
+__all__ = ["SnowflakeQueriesFromFile", "SnowflakeQuery"]
diff --git a/src/prefect/tasks/sodaspark/__init__.py b/src/prefect/tasks/sodaspark/__init__.py
index 07c6e5292fec..1d56e5cfb143 100644
--- a/src/prefect/tasks/sodaspark/__init__.py
+++ b/src/prefect/tasks/sodaspark/__init__.py
@@ -8,3 +8,5 @@
raise ImportError(
'Using `prefect.tasks.sodaspark` requires Prefect to be installed with the "sodaspark" extra.'
) from err
+
+__all__ = ["SodaSparkScan"]
diff --git a/src/prefect/tasks/sodasql/__init__.py b/src/prefect/tasks/sodasql/__init__.py
index a18a3e067426..0b7057661989 100644
--- a/src/prefect/tasks/sodasql/__init__.py
+++ b/src/prefect/tasks/sodasql/__init__.py
@@ -8,3 +8,5 @@
raise ImportError(
'Using `prefect.tasks.sodasql` requires Prefect to be installed with the "sodasql" extra.'
) from err
+
+__all__ = ["SodaSQLScan"]
diff --git a/src/prefect/tasks/spacy/__init__.py b/src/prefect/tasks/spacy/__init__.py
index 38689be11d05..017ca4cacc68 100644
--- a/src/prefect/tasks/spacy/__init__.py
+++ b/src/prefect/tasks/spacy/__init__.py
@@ -13,3 +13,5 @@
raise ImportError(
'Using `prefect.tasks.spacy` requires Prefect to be installed with the "spacy" extra.'
) from exc
+
+__all__ = ["SpacyComponent", "SpacyNER", "SpacyNLP", "SpacyParser", "SpacyTagger"]
diff --git a/src/prefect/tasks/sql_server/__init__.py b/src/prefect/tasks/sql_server/__init__.py
index 781255e68ff0..500f69fac400 100644
--- a/src/prefect/tasks/sql_server/__init__.py
+++ b/src/prefect/tasks/sql_server/__init__.py
@@ -13,3 +13,5 @@
raise ImportError(
'Using `prefect.tasks.sql_server` requires Prefect to be installed with the "sql_server" extra.'
) from err
+
+__all__ = ["SqlServerExecute", "SqlServerExecuteMany", "SqlServerFetch"]
diff --git a/src/prefect/tasks/templates/__init__.py b/src/prefect/tasks/templates/__init__.py
index 7bbd2eef5e65..6fdbcca35338 100644
--- a/src/prefect/tasks/templates/__init__.py
+++ b/src/prefect/tasks/templates/__init__.py
@@ -4,3 +4,5 @@
from prefect.tasks.templates.jinja2 import JinjaTemplate
except ImportError:
pass
+
+__all__ = ["JinjaTemplate", "StringFormatter"]
diff --git a/src/prefect/tasks/trello/__init__.py b/src/prefect/tasks/trello/__init__.py
index 8a50d6f1285e..7413e7437e76 100644
--- a/src/prefect/tasks/trello/__init__.py
+++ b/src/prefect/tasks/trello/__init__.py
@@ -6,3 +6,5 @@
"""
from prefect.tasks.trello.trello import CreateCard
+
+__all__ = ["CreateCard"]
diff --git a/src/prefect/tasks/twitter/__init__.py b/src/prefect/tasks/twitter/__init__.py
index cc456620421f..cfddbed5d588 100644
--- a/src/prefect/tasks/twitter/__init__.py
+++ b/src/prefect/tasks/twitter/__init__.py
@@ -7,3 +7,5 @@
raise ImportError(
'Using `prefect.tasks.twitter` requires Prefect to be installed with the "twitter" extra.'
) from exc
+
+__all__ = ["LoadTweetReplies"]
diff --git a/src/prefect/utilities/__init__.py b/src/prefect/utilities/__init__.py
index 0d6b238a873b..ba634e0f432c 100644
--- a/src/prefect/utilities/__init__.py
+++ b/src/prefect/utilities/__init__.py
@@ -4,7 +4,6 @@
import prefect.utilities.configuration
import prefect.utilities.datetimes
import prefect.utilities.diagnostics
-import prefect.utilities.exceptions
import prefect.utilities.graphql
import prefect.utilities.notifications
import prefect.utilities.serialization
diff --git a/src/prefect/utilities/agent.py b/src/prefect/utilities/agent.py
index 1438ffde40db..db135b303281 100644
--- a/src/prefect/utilities/agent.py
+++ b/src/prefect/utilities/agent.py
@@ -23,42 +23,33 @@ def get_flow_image(flow_run: GraphQLResult, default: str = None) -> str:
from prefect.storage import Docker
from prefect.serialization.storage import StorageSchema
from prefect.serialization.run_config import RunConfigSchema
- from prefect.serialization.environment import EnvironmentSchema
has_run_config = getattr(flow_run, "run_config", None) is not None
- has_environment = getattr(flow_run.flow, "environment", None) is not None
storage = StorageSchema().load(flow_run.flow.storage)
- # Not having an environment implies run-config based flow, even if
- # run_config is None.
- if has_run_config or not has_environment:
- # Precedence:
- # - Image on docker storage
- # - Image on run_config
- # - Provided default
- # - `prefecthq/prefect` for flow's core version
- if isinstance(storage, Docker):
- return storage.name
- if has_run_config:
- run_config = RunConfigSchema().load(flow_run.run_config)
- if getattr(run_config, "image", None) is not None:
- return run_config.image
- if default is not None:
- return default
- # core_version should always be present, but just in case
- version = flow_run.flow.get("core_version") or "latest"
- cleaned_version = version.split("+")[0]
- return f"prefecthq/prefect:{cleaned_version}"
- else:
- environment = EnvironmentSchema().load(flow_run.flow.environment)
- if hasattr(environment, "metadata") and hasattr(environment.metadata, "image"):
- return environment.metadata.get("image")
- elif isinstance(storage, Docker):
- return storage.name
- raise ValueError(
- f"Storage for flow run {flow_run.id} is not of type Docker and "
- f"environment has no `image` attribute in the metadata field."
- )
+
+ # Precedence:
+ # - Image on docker storage
+ # - Image on run_config
+ # - Provided default
+ # - `prefecthq/prefect` for flow's core version
+
+ if isinstance(storage, Docker):
+ return storage.name
+
+ if has_run_config:
+ run_config = RunConfigSchema().load(flow_run.run_config)
+ if getattr(run_config, "image", None) is not None:
+ return run_config.image
+
+ if default is not None:
+ return default
+
+ # core_version should always be present, but just in case
+ version = flow_run.flow.get("core_version") or "latest"
+ cleaned_version = version.split("+")[0]
+
+ return f"prefecthq/prefect:{cleaned_version}"
def get_flow_run_command(flow_run: GraphQLResult) -> str:
diff --git a/src/prefect/utilities/diagnostics.py b/src/prefect/utilities/diagnostics.py
index de3e2857f97c..3d429acb4bb4 100644
--- a/src/prefect/utilities/diagnostics.py
+++ b/src/prefect/utilities/diagnostics.py
@@ -121,7 +121,9 @@ def _replace_values(data: dict) -> Dict[Any, Any]:
return True
# Check presence of environment attributes
- if flow.environment:
+ # These removed in 0.15.0 but we will include them in a diagnostics check until
+ # 0.16.0 for debugging purposes
+ if hasattr(flow, "environment"):
environment = {
"type": type(flow.environment).__name__, # type: ignore
**_replace_values(flow.environment.__dict__), # type: ignore
diff --git a/src/prefect/utilities/exceptions.py b/src/prefect/utilities/exceptions.py
deleted file mode 100644
index 48cc9d1d0452..000000000000
--- a/src/prefect/utilities/exceptions.py
+++ /dev/null
@@ -1,109 +0,0 @@
-"""
-The exception types in this module have been deprecated in favor of `prefect.exceptions`
-
-Users should not be using these directly but we leave these for a version in case they
-are being used in try/except clauses
-"""
-
-import warnings
-
-
-class PrefectError(Exception):
- def __init__(self, *args: object) -> None:
- if type(self) == PrefectError:
- warnings.warn(
- "`prefect.utilities.exceptions.PrefectError` has been moved to "
- "`prefect.exceptions.PrefectException` and will be removed in a future "
- "release. Please update your imports.",
- stacklevel=2,
- )
- super().__init__(*args)
-
-
-class TaskTimeoutError(PrefectError):
- def __init__(self, *args: object) -> None:
- warnings.warn(
- "`prefect.utilities.exceptions.TaskTimeoutError` has been moved to "
- "`prefect.exceptions.TaskTimeoutSignal` and will be removed in a future "
- "release. Please update your imports.",
- stacklevel=2,
- )
- super().__init__(*args)
-
-
-class ContextError(KeyError, PrefectError):
- def __init__(self, *args: object) -> None:
- warnings.warn(
- "`prefect.utilities.exceptions.ContextError` has been deprecated "
- "and will be removed in a future release.",
- stacklevel=2,
- )
- super().__init__(*args)
-
-
-class SerializationError(PrefectError):
- def __init__(self, *args: object) -> None:
- warnings.warn(
- "`prefect.utilities.exceptions.SerializationError` has been deprecated "
- "and will be removed in a future release.",
- stacklevel=2,
- )
- super().__init__(*args)
-
-
-class PrefectWarning(UserWarning):
- def __init__(self, *args: object) -> None:
- if type(self) == PrefectWarning:
- warnings.warn(
- "`prefect.utilities.exceptions.PrefectWarning` has been deprecated "
- "and will be removed in a future release.",
- stacklevel=2,
- )
- super().__init__(*args)
-
-
-class ClientError(PrefectError):
- def __init__(self, *args: object) -> None:
- if type(self) == ClientError:
- warnings.warn(
- "`prefect.utilities.exceptions.ClientError` has been moved to "
- "`prefect.exceptions.ClientError` and will be removed in a future "
- "release. Please update your imports.",
- stacklevel=2,
- )
- super().__init__(*args)
-
-
-class VersionLockError(PrefectError):
- def __init__(self, *args: object) -> None:
- warnings.warn(
- "`prefect.utilities.exceptions.VersionLockError` has been moved to "
- "`prefect.exceptions.VersionLockMismatchSignal` and will be removed in a "
- "future release. Please update your imports.",
- stacklevel=2,
- )
- super().__init__(*args)
-
-
-class AuthorizationError(ClientError):
- def __init__(self, *args: object) -> None:
- if type(self) == AuthorizationError:
- warnings.warn(
- "`prefect.utilities.exceptions.AuthorizationError` has been moved to "
- "`prefect.exceptions.AuthorizationError` and will be removed in a "
- "future release. Please update your imports.",
- stacklevel=2,
- )
- super().__init__(*args)
-
-
-class StorageError(PrefectError):
- def __init__(self, *args: object) -> None:
- if type(self) == StorageError:
- warnings.warn(
- "`prefect.utilities.exceptions.StorageError` has been moved to "
- "`prefect.exceptions.FlowStorageError` and will be removed in a future "
- "release. Please update your imports.",
- stacklevel=2,
- )
- super().__init__(*args)
diff --git a/src/prefect/utilities/executors.py b/src/prefect/utilities/executors.py
index cf940566d6f5..dc54055bd1f2 100644
--- a/src/prefect/utilities/executors.py
+++ b/src/prefect/utilities/executors.py
@@ -117,7 +117,6 @@ def subprocess_heartbeat(heartbeat_cmd: List[str], logger: Logger) -> Iterator[N
to_environment_variables(
prefect.context.config,
include={
- "cloud.auth_token",
"cloud.api_key",
"cloud.tenant_id",
"cloud.api",
diff --git a/src/prefect/utilities/logging.py b/src/prefect/utilities/logging.py
index 0ec5e80f7994..52b1bd0b094b 100644
--- a/src/prefect/utilities/logging.py
+++ b/src/prefect/utilities/logging.py
@@ -150,11 +150,6 @@ def emit(self, record: logging.LogRecord) -> None: # type: ignore
if not context.config.cloud.send_flow_run_logs:
return
- # backwards compatibility for `PREFECT__LOGGING__LOG_TO_CLOUD` which is
- # a deprecated config variable as of 0.14.20
- if not context.config.logging.get("log_to_cloud", True):
- return
-
# if its not during a backend flow run, don't emit
if not context.get("running_with_backend"):
return
diff --git a/src/prefect/utilities/notifications/__init__.py b/src/prefect/utilities/notifications/__init__.py
index 26928ee5818e..f9b23c63eaf7 100644
--- a/src/prefect/utilities/notifications/__init__.py
+++ b/src/prefect/utilities/notifications/__init__.py
@@ -3,3 +3,11 @@
from prefect.utilities.notifications.notifications import gmail_notifier
from prefect.utilities.notifications.notifications import slack_message_formatter
from prefect.utilities.notifications.jira_notification import jira_notifier
+
+__all__ = [
+ "callback_factory",
+ "gmail_notifier",
+ "jira_notifier",
+ "slack_message_formatter",
+ "slack_notifier",
+]
diff --git a/src/prefect/utilities/notifications/notifications.py b/src/prefect/utilities/notifications/notifications.py
index 99be913853ad..9268542212d5 100644
--- a/src/prefect/utilities/notifications/notifications.py
+++ b/src/prefect/utilities/notifications/notifications.py
@@ -155,11 +155,11 @@ def slack_message_formatter(
if isinstance(tracked_obj, prefect.Flow):
url = prefect.client.Client().get_cloud_url(
- "flow-run", prefect.context["flow_run_id"], as_user=False
+ "flow-run", prefect.context["flow_run_id"]
)
elif isinstance(tracked_obj, prefect.Task):
url = prefect.client.Client().get_cloud_url(
- "task-run", prefect.context.get("task_run_id", ""), as_user=False
+ "task-run", prefect.context.get("task_run_id", "")
)
if url:
diff --git a/src/prefect/utilities/storage.py b/src/prefect/utilities/storage.py
index 9b84c647fdb6..fbc7ee945768 100644
--- a/src/prefect/utilities/storage.py
+++ b/src/prefect/utilities/storage.py
@@ -18,10 +18,15 @@
def get_flow_image(flow: "Flow") -> str:
"""
+ DEPRECATED
+
Retrieve the image to use for this flow deployment. Will start by looking for
- an `image` value in the flow's `environment.metadata`. If not found then it will fall
+ an `image` value in the flow's `run_config`. If not found then it will fall
back to using the `flow.storage`.
+ This function was deprecated in 0.15.0 and `prefect.utilities.agent.get_flow_image`
+ should be used instead.
+
Args:
- flow (Flow): A flow object
@@ -30,21 +35,17 @@ def get_flow_image(flow: "Flow") -> str:
Raises:
- ValueError: if deployment attempted on unsupported Storage type and `image` not
- present in environment metadata
+ present in run_config type
"""
- environment = flow.environment
- if (
- environment is not None
- and hasattr(environment, "metadata")
- and environment.metadata.get("image")
- ):
- return environment.metadata.get("image", "")
+ run_config = flow.run_config
+ if run_config is not None and hasattr(run_config, "image"):
+ return run_config.image # type: ignore
else:
storage = flow.storage
if not isinstance(storage, prefect.storage.Docker):
raise ValueError(
f"Storage for flow run {flow.name} is not of type Docker and "
- f"environment has no `image` attribute in the metadata field."
+ f"run_config has no `image` attribute in the metadata field."
)
return storage.name
diff --git a/tests/agent/test_agent.py b/tests/agent/test_agent.py
index 095649ed75ab..0af4648d8210 100644
--- a/tests/agent/test_agent.py
+++ b/tests/agent/test_agent.py
@@ -25,13 +25,13 @@ def test_multiple_agent_init_doesnt_duplicate_logs(cloud_api):
def test_agent_config_options(cloud_api):
- with set_temporary_config({"cloud.agent.auth_token": "TEST_TOKEN"}):
+ with set_temporary_config({"cloud.api_key": "TEST_KEY"}):
agent = Agent()
assert agent.agent_config_id == None
assert agent.labels == []
assert agent.env_vars == dict()
assert agent.max_polls is None
- assert agent.client.get_auth_token() == "TEST_TOKEN"
+ assert agent.client.api_key == "TEST_KEY"
assert agent.name == "agent"
assert agent.logger
assert agent.logger.name == "agent"
@@ -56,7 +56,7 @@ def test_agent_name_set_options(monkeypatch, cloud_api):
def test_agent_log_level(cloud_api):
- with set_temporary_config({"cloud.agent.auth_token": "TEST_TOKEN"}):
+ with set_temporary_config({"cloud.api_key": "TEST_KEY"}):
agent = Agent()
assert agent.logger.level == 20
@@ -64,7 +64,7 @@ def test_agent_log_level(cloud_api):
def test_agent_log_level_responds_to_config(cloud_api):
with set_temporary_config(
{
- "cloud.agent.auth_token": "TEST_TOKEN",
+ "cloud.api_key": "TEST_KEY",
"cloud.agent.level": "DEBUG",
"cloud.agent.agent_address": "http://localhost:8000",
}
@@ -77,7 +77,7 @@ def test_agent_log_level_responds_to_config(cloud_api):
@pytest.mark.parametrize("toggle", [True, False])
def test_agent_cloud_logs_responds_to_config_by_default(cloud_api, toggle):
with set_temporary_config(
- {"cloud.agent.auth_token": "TEST_TOKEN", "cloud.send_flow_run_logs": toggle}
+ {"cloud.api_key": "TEST_KEY", "cloud.send_flow_run_logs": toggle}
):
agent = Agent()
assert agent.log_to_cloud is toggle
@@ -87,14 +87,14 @@ def test_agent_cloud_logs_responds_to_config_by_default(cloud_api, toggle):
def test_agent_cloud_logs_allows_explicit_override(cloud_api, toggle):
# Set the config to the opposite so we can ensure it's ignored
with set_temporary_config(
- {"cloud.agent.auth_token": "TEST_TOKEN", "cloud.send_flow_run_logs": not toggle}
+ {"cloud.api_key": "TEST_KEY", "cloud.send_flow_run_logs": not toggle}
):
agent = Agent(no_cloud_logs=not toggle)
assert agent.log_to_cloud is toggle
def test_agent_env_vars(cloud_api):
- with set_temporary_config({"cloud.agent.auth_token": "TEST_TOKEN"}):
+ with set_temporary_config({"cloud.api_key": "TEST_KEY"}):
agent = Agent(env_vars=dict(AUTH_THING="foo"))
assert agent.env_vars == dict(AUTH_THING="foo")
@@ -102,7 +102,7 @@ def test_agent_env_vars(cloud_api):
def test_agent_env_vars_from_config(cloud_api):
with set_temporary_config(
{
- "cloud.agent.auth_token": "TEST_TOKEN",
+ "cloud.api_key": "TEST_KEY",
"cloud.agent.env_vars": {"test1": "test2", "test3": "test4"},
}
):
@@ -111,13 +111,13 @@ def test_agent_env_vars_from_config(cloud_api):
def test_agent_max_polls(cloud_api):
- with set_temporary_config({"cloud.agent.auth_token": "TEST_TOKEN"}):
+ with set_temporary_config({"cloud.api_key": "TEST_KEY"}):
agent = Agent(max_polls=10)
assert agent.max_polls == 10
def test_agent_labels(cloud_api):
- with set_temporary_config({"cloud.agent.auth_token": "TEST_TOKEN"}):
+ with set_temporary_config({"cloud.api_key": "TEST_KEY"}):
agent = Agent(labels=["test", "2"])
assert agent.labels == ["test", "2"]
@@ -130,35 +130,15 @@ def test_agent_labels_from_config_var(cloud_api):
def test_agent_log_level_debug(cloud_api):
with set_temporary_config(
- {"cloud.agent.auth_token": "TEST_TOKEN", "cloud.agent.level": "DEBUG"}
+ {"cloud.api_key": "TEST_KEY", "cloud.agent.level": "DEBUG"}
):
agent = Agent()
assert agent.logger.level == 10
-def test_agent_fails_no_auth_token(cloud_api):
- with pytest.raises(RuntimeError, match="Error while contacting API") as err:
+def test_agent_fails_no_api_key(cloud_api):
+ with pytest.raises(ValueError, match="You have not set an API key"):
Agent().start()
- assert isinstance(err.value.__cause__, AuthorizationError)
-
-
-def test_agent_fails_no_runner_token(monkeypatch, cloud_api):
- post = MagicMock(
- return_value=MagicMock(
- json=MagicMock(
- return_value=dict(
- data=dict(auth_info=MagicMock(api_token_scope="USER"))
- )
- )
- )
- )
- session = MagicMock()
- session.return_value.post = post
- monkeypatch.setattr("requests.Session", session)
-
- with pytest.raises(RuntimeError, match="Error while contacting API") as err:
- Agent().start()
- assert isinstance(err.value.__cause__, AuthorizationError)
def test_get_ready_flow_runs(monkeypatch, cloud_api):
@@ -354,8 +334,7 @@ def test_heartbeat_is_noop_by_default(cloud_api):
def test_setup_api_connection_runs_test_query(test_query_succeeds, cloud_api):
agent = Agent()
- # Ignore the token check and registration
- agent._verify_token = MagicMock()
+ # Ignore registration
agent._register_agent = MagicMock()
if test_query_succeeds:
@@ -549,8 +528,7 @@ def test_setup_api_connection_attaches_agent_id(cloud_api):
# Return a fake id from the "backend"
agent.client.register_agent = MagicMock(return_value="ID")
- # Ignore the token check and test graphql query
- agent._verify_token = MagicMock()
+ # Ignore the test graphql query
agent.client.graphql = MagicMock()
agent._setup_api_connection()
@@ -600,7 +578,7 @@ def test_agent_api_health_check(cloud_api):
assert not agent._api_server_thread.is_alive()
-def test_agent_poke_api(monkeypatch, runner_token, cloud_api):
+def test_agent_poke_api(monkeypatch, cloud_api):
import threading
requests = pytest.importorskip("requests")
@@ -647,6 +625,10 @@ def _poke_agent(agent_address):
agent_start_time = time.time()
agent = Agent(agent_address=agent_address, max_polls=1)
+
+ # Ignore registration
+ agent._register_agent = MagicMock()
+
# Override loop interval to 5 seconds.
agent._loop_intervals = {0: 5.0}
agent.start()
@@ -660,7 +642,7 @@ def _poke_agent(agent_address):
assert setup_api_connection.call_count == 1
-def test_catch_errors_in_heartbeat_thread(monkeypatch, runner_token, cloud_api, caplog):
+def test_catch_errors_in_heartbeat_thread(monkeypatch, cloud_api, caplog):
"""Check that errors in the heartbeat thread are caught, logged, and the thread keeps going"""
monkeypatch.setattr(
"prefect.agent.agent.Agent._submit_deploy_flow_run_jobs", MagicMock()
@@ -668,9 +650,14 @@ def test_catch_errors_in_heartbeat_thread(monkeypatch, runner_token, cloud_api,
monkeypatch.setattr(
"prefect.agent.agent.Agent._setup_api_connection", MagicMock(return_value="id")
)
+
heartbeat = MagicMock(side_effect=ValueError)
monkeypatch.setattr("prefect.agent.agent.Agent.heartbeat", heartbeat)
agent = Agent(max_polls=2)
+
+ # Ignore registration
+ agent._register_agent = MagicMock()
+
agent.heartbeat_period = 0.1
agent.start()
diff --git a/tests/agent/test_docker_agent.py b/tests/agent/test_docker_agent.py
index aa691f912d27..e6ffe504b994 100644
--- a/tests/agent/test_docker_agent.py
+++ b/tests/agent/test_docker_agent.py
@@ -2,12 +2,11 @@
from unittest.mock import MagicMock
import pytest
+import uuid
import prefect
-from prefect.utilities.compatibility import nullcontext
from prefect import context
from prefect.agent.docker.agent import DockerAgent, _stream_container_logs
-from prefect.environments import LocalEnvironment
from prefect.storage import Docker, Local
from prefect.run_configs import DockerRun, LocalRun, UniversalRun
from prefect.utilities.configuration import set_temporary_config
@@ -16,14 +15,6 @@
docker = pytest.importorskip("docker")
-@pytest.fixture
-def config_with_token(cloud_api):
- with set_temporary_config(
- {"cloud.agent.auth_token": "TEST_TOKEN", "cloud.send_flow_run_logs": True}
- ):
- yield
-
-
@pytest.fixture
def api(monkeypatch):
client = MagicMock()
@@ -53,25 +44,25 @@ def test_docker_agent_init(api):
("win32", "npipe:////./pipe/docker_engine"),
],
)
-def test_docker_agent_config_options(platform, url, monkeypatch, config_with_token):
+def test_docker_agent_config_options(platform, url, monkeypatch, config_with_api_key):
api = MagicMock()
monkeypatch.setattr("docker.APIClient", api)
monkeypatch.setattr("prefect.agent.docker.agent.platform", platform)
agent = DockerAgent(name="test")
assert agent.name == "test"
- assert agent.client.get_auth_token() == "TEST_TOKEN"
+ assert agent.client.api_key == config_with_api_key.cloud.api_key
assert agent.logger
assert not agent.no_pull
assert api.call_args[1]["base_url"] == url
-def test_docker_agent_config_options_populated(monkeypatch, config_with_token):
+def test_docker_agent_config_options_populated(monkeypatch, config_with_api_key):
api = MagicMock()
monkeypatch.setattr("docker.APIClient", api)
agent = DockerAgent(base_url="url", no_pull=True, docker_client_timeout=123)
- assert agent.client.get_auth_token() == "TEST_TOKEN"
+ assert agent.client.api_key == config_with_api_key.cloud.api_key
assert agent.logger
assert agent.no_pull
assert api.call_args[1]["base_url"] == "url"
@@ -153,48 +144,52 @@ def test_populate_env_vars(api, backend):
assert env_vars == expected_vars
-def test_environment_has_agent_token_from_config(api, config_with_token):
+def test_environment_has_api_key_from_config(api, config_with_api_key):
agent = DockerAgent()
env_vars = agent.populate_env_vars(
- GraphQLResult({"id": "id", "name": "name", "flow": {"id": "foo"}}), "test-image"
+ GraphQLResult({"id": "id", "name": "name", "flow": {"id": "foo"}}),
+ "test-image",
)
- assert env_vars["PREFECT__CLOUD__AUTH_TOKEN"] == "TEST_TOKEN"
+ assert env_vars["PREFECT__CLOUD__API_KEY"] == config_with_api_key.cloud.api_key
+ assert env_vars["PREFECT__CLOUD__AUTH_TOKEN"] == config_with_api_key.cloud.api_key
+ assert env_vars["PREFECT__CLOUD__TENANT_ID"] == config_with_api_key.cloud.tenant_id
-@pytest.mark.parametrize("tenant_id", ["ID", None])
-def test_environment_has_api_key_from_config(api, tenant_id):
- with set_temporary_config(
- {
- "cloud.api_key": "TEST_KEY",
- "cloud.tenant_id": tenant_id,
- "cloud.agent.auth_token": None,
- }
- ):
+def test_environment_has_tenant_id_from_server(api, config_with_api_key):
+ tenant_id = uuid.uuid4()
+
+ with set_temporary_config({"cloud.tenant_id": None}):
agent = DockerAgent()
- agent.client._get_auth_tenant = MagicMock(return_value="ID")
+ agent.client._get_auth_tenant = MagicMock(return_value=tenant_id)
- env_vars = agent.populate_env_vars(
+ env = agent.populate_env_vars(
GraphQLResult({"id": "id", "name": "name", "flow": {"id": "foo"}}),
"test-image",
)
- assert env_vars["PREFECT__CLOUD__API_KEY"] == "TEST_KEY"
- assert env_vars["PREFECT__CLOUD__AUTH_TOKEN"] == "TEST_KEY"
- assert env_vars["PREFECT__CLOUD__TENANT_ID"] == "ID"
+ assert env["PREFECT__CLOUD__API_KEY"] == config_with_api_key.cloud.api_key
+ assert env["PREFECT__CLOUD__AUTH_TOKEN"] == config_with_api_key.cloud.api_key
+ assert env["PREFECT__CLOUD__TENANT_ID"] == tenant_id
-@pytest.mark.parametrize("tenant_id", ["ID", None])
-def test_environment_has_api_key_from_disk(api, monkeypatch, tenant_id):
+def test_environment_has_api_key_from_disk(api, monkeypatch):
"""Check that the API key is passed through from the on disk cache"""
+
+ tenant_id = str(uuid.uuid4())
+
monkeypatch.setattr(
"prefect.Client.load_auth_from_disk",
- MagicMock(return_value={"api_key": "TEST_KEY", "tenant_id": tenant_id}),
+ MagicMock(
+ return_value={
+ "api_key": "TEST_KEY",
+ "tenant_id": tenant_id,
+ }
+ ),
)
agent = DockerAgent()
- agent.client._get_auth_tenant = MagicMock(return_value="ID")
env = agent.populate_env_vars(
GraphQLResult({"id": "id", "name": "name", "flow": {"id": "foo"}}),
@@ -203,7 +198,7 @@ def test_environment_has_api_key_from_disk(api, monkeypatch, tenant_id):
assert env["PREFECT__CLOUD__API_KEY"] == "TEST_KEY"
assert env["PREFECT__CLOUD__AUTH_TOKEN"] == "TEST_KEY"
- assert env["PREFECT__CLOUD__TENANT_ID"] == "ID"
+ assert env["PREFECT__CLOUD__TENANT_ID"] == tenant_id
def test_populate_env_vars_includes_agent_labels(api):
@@ -216,7 +211,7 @@ def test_populate_env_vars_includes_agent_labels(api):
@pytest.mark.parametrize("flag", [True, False])
-def test_populate_env_vars_sets_log_to_cloud(flag, api, config_with_token):
+def test_populate_env_vars_sets_log_to_cloud(flag, api, config_with_api_key):
agent = DockerAgent(labels=["42", "marvin"], no_cloud_logs=flag)
env_vars = agent.populate_env_vars(
@@ -373,7 +368,6 @@ def test_docker_agent_deploy_flow(core_version, command, api):
"storage": Docker(
registry_url="test", image_name="name", image_tag="tag"
).serialize(),
- "environment": LocalEnvironment().serialize(),
"core_version": core_version,
}
),
@@ -398,38 +392,6 @@ def test_docker_agent_deploy_flow(core_version, command, api):
assert api.start.call_args[1]["container"] == "container_id"
-def test_docker_agent_deploy_flow_uses_environment_metadata(api):
- agent = DockerAgent()
- agent.deploy_flow(
- flow_run=GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "id": "foo",
- "name": "flow-name",
- "storage": Local().serialize(),
- "environment": LocalEnvironment(
- metadata={"image": "repo/name:tag"}
- ).serialize(),
- "core_version": "0.13.0",
- }
- ),
- "id": "id",
- "name": "name",
- }
- )
- )
-
- assert api.pull.called
- assert api.create_container.called
- assert api.start.called
-
- assert api.create_host_config.call_args[1]["auto_remove"] is True
- assert api.create_container.call_args[1]["command"] == "prefect execute flow-run"
- assert api.create_container.call_args[1]["host_config"]["AutoRemove"] is True
- assert api.start.call_args[1]["container"] == "container_id"
-
-
@pytest.mark.parametrize("collision_count", (0, 1, 5))
def test_docker_agent_deploy_flow_sets_container_name_with_index(api, collision_count):
"""
@@ -463,9 +425,6 @@ def fail_if_name_exists(*args, **kwargs):
"id": "foo",
"name": "flow-name",
"storage": Local().serialize(),
- "environment": LocalEnvironment(
- metadata={"image": "repo/name:tag"}
- ).serialize(),
"core_version": "0.13.0",
}
),
@@ -512,9 +471,6 @@ def test_docker_agent_deploy_flow_sets_container_name_with_slugify(
"id": "foo",
"name": "flow-name",
"storage": Local().serialize(),
- "environment": LocalEnvironment(
- metadata={"image": "repo/name:tag"}
- ).serialize(),
"core_version": "0.13.0",
}
),
@@ -658,34 +614,6 @@ def test_docker_agent_deploy_flow_unsupported_run_config(api):
assert not api.pull.called
-def test_docker_agent_deploy_flow_storage_raises(monkeypatch, api):
- monkeypatch.setattr("prefect.agent.agent.Client", MagicMock())
-
- agent = DockerAgent()
-
- with pytest.raises(ValueError):
- agent.deploy_flow(
- flow_run=GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Local().serialize(),
- "id": "foo",
- "name": "flow-name",
- "environment": LocalEnvironment().serialize(),
- "core_version": "0.13.0",
- }
- ),
- "id": "id",
- "name": "name",
- "version": "version",
- }
- )
- )
-
- assert not api.pull.called
-
-
def test_docker_agent_deploy_flow_no_pull(api):
agent = DockerAgent(no_pull=True)
agent.deploy_flow(
@@ -698,34 +626,6 @@ def test_docker_agent_deploy_flow_no_pull(api):
"storage": Docker(
registry_url="test", image_name="name", image_tag="tag"
).serialize(),
- "environment": LocalEnvironment().serialize(),
- "core_version": "0.13.0",
- }
- ),
- "id": "id",
- "name": "name",
- }
- )
- )
-
- assert not api.pull.called
- assert api.create_container.called
- assert api.start.called
-
-
-def test_docker_agent_deploy_flow_no_pull_using_environment_metadata(api):
- agent = DockerAgent(no_pull=True)
- agent.deploy_flow(
- flow_run=GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "id": "foo",
- "name": "flow-name",
- "storage": Local().serialize(),
- "environment": LocalEnvironment(
- metadata={"image": "name:tag"}
- ).serialize(),
"core_version": "0.13.0",
}
),
@@ -753,7 +653,6 @@ def test_docker_agent_deploy_flow_reg_allow_list_allowed(api):
"storage": Docker(
registry_url="test1", image_name="name", image_tag="tag"
).serialize(),
- "environment": LocalEnvironment().serialize(),
"core_version": "0.13.0",
}
),
@@ -782,7 +681,6 @@ def test_docker_agent_deploy_flow_reg_allow_list_not_allowed(api):
"storage": Docker(
registry_url="test2", image_name="name", image_tag="tag"
).serialize(),
- "environment": LocalEnvironment().serialize(),
"core_version": "0.13.0",
}
),
@@ -818,7 +716,6 @@ def test_docker_agent_deploy_flow_show_flow_logs(api, monkeypatch):
"storage": Docker(
registry_url="test", image_name="name", image_tag="tag"
).serialize(),
- "environment": LocalEnvironment().serialize(),
"core_version": "0.13.0",
}
),
@@ -869,7 +766,6 @@ def test_docker_agent_deploy_flow_no_registry_does_not_pull(api):
"storage": Docker(
registry_url="", image_name="name", image_tag="tag"
).serialize(),
- "environment": LocalEnvironment().serialize(),
"core_version": "0.13.0",
}
),
@@ -1140,7 +1036,6 @@ def test_docker_agent_networks(api):
"storage": Docker(
registry_url="test", image_name="name", image_tag="tag"
).serialize(),
- "environment": LocalEnvironment().serialize(),
"core_version": "0.13.0",
}
),
diff --git a/tests/agent/test_ecs_agent.py b/tests/agent/test_ecs_agent.py
index ee3a8a8a870f..f9feb48c0ad7 100644
--- a/tests/agent/test_ecs_agent.py
+++ b/tests/agent/test_ecs_agent.py
@@ -1,5 +1,7 @@
from unittest.mock import MagicMock
+
+import uuid
import box
import pytest
import yaml
@@ -422,9 +424,10 @@ def test_generate_task_definition_multiple_containers(self):
class TestGetRunTaskKwargs:
- def get_run_task_kwargs(self, run_config, **kwargs):
+ def get_run_task_kwargs(self, run_config, tenant_id: str = None, **kwargs):
agent = ECSAgent(**kwargs)
- agent.client._get_auth_tenant = MagicMock(return_value="ID")
+ if tenant_id:
+ agent.client._get_auth_tenant = MagicMock(return_value=tenant_id)
flow_run = GraphQLResult(
{
"flow": GraphQLResult(
@@ -563,36 +566,34 @@ def test_get_run_task_kwargs_environment(self, tmpdir, backend):
"CUSTOM4": "VALUE4",
}
- def test_environment_has_agent_token_from_config(self):
- with set_temporary_config({"cloud.agent.auth_token": "TEST_TOKEN"}):
- env_list = self.get_run_task_kwargs(ECSRun())["overrides"][
- "containerOverrides"
- ][0]["environment"]
- env = {item["name"]: item["value"] for item in env_list}
+ def test_environment_has_api_key_from_config(self, config_with_api_key):
+ env_list = self.get_run_task_kwargs(ECSRun())["overrides"][
+ "containerOverrides"
+ ][0]["environment"]
+ env = {item["name"]: item["value"] for item in env_list}
- assert env["PREFECT__CLOUD__AUTH_TOKEN"] == "TEST_TOKEN"
+ assert env["PREFECT__CLOUD__API_KEY"] == config_with_api_key.cloud.api_key
+ assert env["PREFECT__CLOUD__AUTH_TOKEN"] == config_with_api_key.cloud.api_key
+ assert env["PREFECT__CLOUD__TENANT_ID"] == config_with_api_key.cloud.tenant_id
- @pytest.mark.parametrize("tenant_id", ["ID", None])
- def test_environment_has_api_key_from_config(self, tenant_id):
- with set_temporary_config(
- {
- "cloud.api_key": "TEST_KEY",
- "cloud.tenant_id": tenant_id,
- "cloud.agent.auth_token": None,
- }
- ):
- env_list = self.get_run_task_kwargs(ECSRun())["overrides"][
- "containerOverrides"
- ][0]["environment"]
+ def test_environment_has_tenant_id_from_server(self, config_with_api_key):
+ tenant_id = uuid.uuid4()
+
+ with set_temporary_config({"cloud.tenant_id": None}):
+
+ env_list = self.get_run_task_kwargs(ECSRun(), tenant_id=tenant_id)[
+ "overrides"
+ ]["containerOverrides"][0]["environment"]
env = {item["name"]: item["value"] for item in env_list}
- assert env["PREFECT__CLOUD__API_KEY"] == "TEST_KEY"
- assert env["PREFECT__CLOUD__AUTH_TOKEN"] == "TEST_KEY"
- assert env["PREFECT__CLOUD__TENANT_ID"] == "ID"
+ assert env["PREFECT__CLOUD__API_KEY"] == config_with_api_key.cloud.api_key
+ assert env["PREFECT__CLOUD__AUTH_TOKEN"] == config_with_api_key.cloud.api_key
+ assert env["PREFECT__CLOUD__TENANT_ID"] == tenant_id
- @pytest.mark.parametrize("tenant_id", ["ID", None])
- def test_environment_has_api_key_from_disk(self, monkeypatch, tenant_id):
+ def test_environment_has_api_key_from_disk(self, monkeypatch):
"""Check that the API key is passed through from the on disk cache"""
+ tenant_id = str(uuid.uuid4())
+
monkeypatch.setattr(
"prefect.Client.load_auth_from_disk",
MagicMock(return_value={"api_key": "TEST_KEY", "tenant_id": tenant_id}),
@@ -605,7 +606,7 @@ def test_environment_has_api_key_from_disk(self, monkeypatch, tenant_id):
assert env["PREFECT__CLOUD__API_KEY"] == "TEST_KEY"
assert env["PREFECT__CLOUD__AUTH_TOKEN"] == "TEST_KEY"
- assert env["PREFECT__CLOUD__TENANT_ID"] == "ID"
+ assert env["PREFECT__CLOUD__TENANT_ID"] == tenant_id
@pytest.mark.parametrize(
"config, agent_env_vars, run_config_env_vars, expected_logging_level",
diff --git a/tests/agent/test_fargate_agent.py b/tests/agent/test_fargate_agent.py
deleted file mode 100644
index 08eae627decf..000000000000
--- a/tests/agent/test_fargate_agent.py
+++ /dev/null
@@ -1,1998 +0,0 @@
-from unittest.mock import MagicMock
-
-import pytest
-
-pytest.importorskip("boto3")
-pytest.importorskip("botocore")
-pytestmark = pytest.mark.filterwarnings("ignore:`FargateAgent` is deprecated")
-
-from botocore.exceptions import ClientError
-
-import prefect
-from prefect.agent.fargate import FargateAgent
-from prefect.environments import LocalEnvironment
-from prefect.storage import Docker, Local
-from prefect.utilities.configuration import set_temporary_config
-from prefect.utilities.graphql import GraphQLResult
-from prefect.utilities.aws import _CLIENT_CACHE
-
-
-@pytest.fixture(autouse=True)
-def clear_boto3_cache():
- _CLIENT_CACHE.clear()
-
-
-def test_fargate_agent_init(monkeypatch, cloud_api):
- boto3_client = MagicMock()
- monkeypatch.setattr("boto3.client", boto3_client)
-
- agent = FargateAgent()
- assert agent
- assert agent.boto3_client
-
-
-def test_fargate_agent_init_with_network_mode(monkeypatch, cloud_api):
- boto3_client = MagicMock()
- monkeypatch.setattr("boto3.client", boto3_client)
-
- agent = FargateAgent(networkMode="bridge")
- assert agent
- assert agent.boto3_client
- assert agent.task_definition_kwargs["networkMode"] == "bridge"
-
-
-def test_fargate_agent_config_options_default(monkeypatch, cloud_api):
- boto3_client = MagicMock()
- monkeypatch.setattr("boto3.client", boto3_client)
-
- agent = FargateAgent()
- assert agent
- assert agent.agent_config_id == None
- assert agent.labels == []
- assert agent.name == "agent"
- assert agent.task_definition_kwargs == {}
- assert agent.task_run_kwargs == {}
- assert agent.boto3_client
-
-
-def test_fargate_agent_config_options(monkeypatch, cloud_api):
- boto3_client = MagicMock()
- monkeypatch.setattr("boto3.client", boto3_client)
-
- botocore_config = MagicMock()
- monkeypatch.setattr("botocore.config.Config", botocore_config)
-
- # Client args
- monkeypatch.setenv("AWS_ACCESS_KEY_ID", "")
- monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "")
- monkeypatch.setenv("AWS_SESSION_TOKEN", "")
- monkeypatch.setenv("REGION_NAME", "")
-
- monkeypatch.delenv("AWS_ACCESS_KEY_ID")
- monkeypatch.delenv("AWS_SECRET_ACCESS_KEY")
- monkeypatch.delenv("AWS_SESSION_TOKEN")
- monkeypatch.delenv("REGION_NAME")
-
- with set_temporary_config({"cloud.agent.auth_token": "TEST_TOKEN"}):
- agent = FargateAgent(name="test", labels=["test"])
- assert agent
- assert agent.labels == ["test"]
- assert agent.name == "test"
- assert agent.client.get_auth_token() == "TEST_TOKEN"
- assert agent.logger
- assert agent.boto3_client
-
- assert boto3_client.call_args[0][0] == "ecs"
- assert boto3_client.call_args[1]["aws_access_key_id"] == None
- assert boto3_client.call_args[1]["aws_secret_access_key"] == None
- assert boto3_client.call_args[1]["aws_session_token"] == None
- assert boto3_client.call_args[1]["region_name"] == None
-
- assert botocore_config.called
- assert botocore_config.call_args == {}
-
-
-def test_parse_task_definition_kwargs(monkeypatch, cloud_api):
- boto3_client = MagicMock()
- monkeypatch.setattr("boto3.client", boto3_client)
-
- agent = FargateAgent(networkMode="bridge")
-
- kwarg_dict = {
- "taskRoleArn": "test",
- "executionRoleArn": "test",
- "networkMode": "bridge",
- "volumes": "test",
- "placementConstraints": "test",
- "cpu": "test",
- "memory": "test",
- "tags": "test",
- "pidMode": "test",
- "ipcMode": "test",
- "proxyConfiguration": "test",
- "inferenceAccelerators": "test",
- }
-
- (
- task_definition_kwargs,
- task_run_kwargs,
- container_definitions_kwargs,
- ) = agent._parse_kwargs(kwarg_dict)
-
- assert task_definition_kwargs == kwarg_dict
- assert task_run_kwargs == {"placementConstraints": "test", "tags": "test"}
-
-
-def test_parse_task_definition_kwargs_errors(monkeypatch, cloud_api):
- boto3_client = MagicMock()
- monkeypatch.setattr("boto3.client", boto3_client)
-
- agent = FargateAgent()
-
- kwarg_dict = {
- "placementConstraints": "taskRoleArn='arn:aws:iam::543216789012:role/Dev"
- }
-
- (
- task_definition_kwargs,
- task_run_kwargs,
- container_definitions_kwargs,
- ) = agent._parse_kwargs(kwarg_dict)
-
- assert task_definition_kwargs == kwarg_dict
- assert task_run_kwargs == {
- "placementConstraints": "taskRoleArn='arn:aws:iam::543216789012:role/Dev"
- }
-
-
-def test_parse_task_run_kwargs(monkeypatch, cloud_api):
- boto3_client = MagicMock()
- monkeypatch.setattr("boto3.client", boto3_client)
-
- agent = FargateAgent()
-
- kwarg_dict = {
- "cluster": "test",
- "count": "test",
- "startedBy": "test",
- "group": "test",
- "placementConstraints": "test",
- "placementStrategy": "test",
- "platformVersion": "test",
- "networkConfiguration": "test",
- "tags": "test",
- "enableECSManagedTags": "test",
- "propagateTags": "test",
- }
-
- (
- task_definition_kwargs,
- task_run_kwargs,
- container_definitions_kwargs,
- ) = agent._parse_kwargs(kwarg_dict)
-
- assert task_run_kwargs == kwarg_dict
- assert task_definition_kwargs == {"placementConstraints": "test", "tags": "test"}
-
-
-def test_parse_container_definition_kwargs(monkeypatch, cloud_api):
- boto3_client = MagicMock()
- monkeypatch.setattr("boto3.client", boto3_client)
-
- agent = FargateAgent()
-
- kwarg_dict = {
- "containerDefinitions": [
- {
- "environment": "test",
- "secrets": "test",
- "mountPoints": "test",
- "logConfiguration": "test",
- "repositoryCredentials": "repo",
- }
- ]
- }
-
- (
- task_definition_kwargs,
- task_run_kwargs,
- container_definitions_kwargs,
- ) = agent._parse_kwargs(kwarg_dict)
-
- assert container_definitions_kwargs == {
- "environment": "test",
- "secrets": "test",
- "mountPoints": "test",
- "logConfiguration": "test",
- "repositoryCredentials": "repo",
- }
-
-
-def test_parse_container_definition_kwargs_provided_as_string(monkeypatch, cloud_api):
- boto3_client = MagicMock()
- monkeypatch.setattr("boto3.client", boto3_client)
-
- agent = FargateAgent()
-
- kwarg_dict = {
- "containerDefinitions": str(
- [
- {
- "environment": "test",
- "secrets": "test",
- "mountPoints": "test",
- "logConfiguration": "test",
- "repositoryCredentials": "repo",
- }
- ]
- )
- }
-
- (
- task_definition_kwargs,
- task_run_kwargs,
- container_definitions_kwargs,
- ) = agent._parse_kwargs(kwarg_dict)
-
- assert container_definitions_kwargs == {
- "environment": "test",
- "secrets": "test",
- "mountPoints": "test",
- "logConfiguration": "test",
- "repositoryCredentials": "repo",
- }
-
-
-def test_parse_container_definition_kwargs_errors_on_multiple(monkeypatch, cloud_api):
- boto3_client = MagicMock()
- monkeypatch.setattr("boto3.client", boto3_client)
-
- agent = FargateAgent()
-
- kwarg_dict = {
- "containerDefinitions": [
- {
- "environment": "test",
- "secrets": "test",
- "mountPoints": "test",
- "logConfiguration": "test",
- "repositoryCredentials": "repo",
- },
- {"test": "here"},
- ]
- }
-
- with pytest.raises(ValueError):
- (
- task_definition_kwargs,
- task_run_kwargs,
- container_definitions_kwargs,
- ) = agent._parse_kwargs(kwarg_dict)
-
-
-def test_parse_container_definition_kwargs_errors(monkeypatch, cloud_api):
- boto3_client = MagicMock()
- monkeypatch.setattr("boto3.client", boto3_client)
-
- agent = FargateAgent()
-
- kwarg_dict = {
- "containerDefinitions": [
- {
- "secrets": [
- {
- "name": "TEST_SECRET1",
- "valueFrom": "arn:aws:ssm:us-east-1:123456789101:parameter/test/test",
- }
- ],
- }
- ]
- }
-
- (
- task_definition_kwargs,
- task_run_kwargs,
- container_definitions_kwargs,
- ) = agent._parse_kwargs(kwarg_dict)
-
- assert container_definitions_kwargs == {
- "secrets": [
- {
- "name": "TEST_SECRET1",
- "valueFrom": "arn:aws:ssm:us-east-1:123456789101:parameter/test/test",
- }
- ]
- }
-
-
-def test_parse_task_definition_and_run_kwargs(monkeypatch, cloud_api):
- boto3_client = MagicMock()
- monkeypatch.setattr("boto3.client", boto3_client)
-
- agent = FargateAgent()
-
- def_kwarg_dict = {
- "taskRoleArn": "test",
- "executionRoleArn": "test",
- "volumes": "test",
- "placementConstraints": "test",
- "cpu": "test",
- "memory": "test",
- "tags": "test",
- "pidMode": "test",
- "ipcMode": "test",
- "proxyConfiguration": "test",
- "inferenceAccelerators": "test",
- }
-
- run_kwarg_dict = {
- "cluster": "test",
- "count": "test",
- "startedBy": "test",
- "group": "test",
- "placementConstraints": "test",
- "placementStrategy": "test",
- "platformVersion": "test",
- "networkConfiguration": "test",
- "tags": "test",
- "enableECSManagedTags": "test",
- "propagateTags": "test",
- }
-
- kwarg_dict = {
- "taskRoleArn": "test",
- "executionRoleArn": "test",
- "volumes": "test",
- "placementConstraints": "test",
- "cpu": "test",
- "memory": "test",
- "tags": "test",
- "pidMode": "test",
- "ipcMode": "test",
- "proxyConfiguration": "test",
- "inferenceAccelerators": "test",
- "cluster": "test",
- "count": "test",
- "startedBy": "test",
- "group": "test",
- "placementStrategy": "test",
- "platformVersion": "test",
- "networkConfiguration": "test",
- "enableECSManagedTags": "test",
- "propagateTags": "test",
- }
-
- (
- task_definition_kwargs,
- task_run_kwargs,
- container_definitions_kwargs,
- ) = agent._parse_kwargs(kwarg_dict)
-
- assert task_definition_kwargs == def_kwarg_dict
- assert task_run_kwargs == run_kwarg_dict
-
-
-def test_parse_task_kwargs_invalid_value_removed(monkeypatch, cloud_api):
- boto3_client = MagicMock()
- monkeypatch.setattr("boto3.client", boto3_client)
-
- agent = FargateAgent()
-
- kwarg_dict = {
- "test": "not_real",
- "containerDefinitions": [
- {
- "test": "not_real",
- }
- ],
- }
-
- (
- task_definition_kwargs,
- task_run_kwargs,
- container_definitions_kwargs,
- ) = agent._parse_kwargs(kwarg_dict)
-
- assert task_definition_kwargs == {}
- assert task_run_kwargs == {}
- assert container_definitions_kwargs == {}
-
-
-def test_fargate_agent_config_options_init(monkeypatch, cloud_api):
- boto3_client = MagicMock()
- monkeypatch.setattr("boto3.client", boto3_client)
-
- botocore_config = MagicMock()
- monkeypatch.setattr("botocore.config.Config", botocore_config)
-
- def_kwarg_dict = {
- "taskRoleArn": "test",
- "executionRoleArn": "test",
- "volumes": "test",
- "placementConstraints": "test",
- "cpu": "test",
- "memory": "test",
- "tags": "test",
- "pidMode": "test",
- "ipcMode": "test",
- "proxyConfiguration": "test",
- "inferenceAccelerators": "test",
- }
-
- run_kwarg_dict = {
- "cluster": "test",
- "count": "test",
- "startedBy": "test",
- "group": "test",
- "placementConstraints": "test",
- "placementStrategy": "test",
- "platformVersion": "test",
- "networkConfiguration": "test",
- "tags": "test",
- "enableECSManagedTags": "test",
- "propagateTags": "test",
- }
-
- container_def_kwargs_dict = {
- "environment": "test",
- "secrets": "test",
- "mountPoints": "test",
- "logConfiguration": "test",
- }
-
- kwarg_dict = {
- "taskRoleArn": "test",
- "executionRoleArn": "test",
- "volumes": "test",
- "placementConstraints": "test",
- "cpu": "test",
- "memory": "test",
- "tags": "test",
- "pidMode": "test",
- "ipcMode": "test",
- "proxyConfiguration": "test",
- "inferenceAccelerators": "test",
- "cluster": "test",
- "count": "test",
- "startedBy": "test",
- "group": "test",
- "placementStrategy": "test",
- "platformVersion": "test",
- "networkConfiguration": "test",
- "enableECSManagedTags": "test",
- "propagateTags": "test",
- "containerDefinitions": [
- {
- "environment": "test",
- "secrets": "test",
- "mountPoints": "test",
- "logConfiguration": "test",
- }
- ],
- }
-
- agent = FargateAgent(
- name="test",
- aws_access_key_id="id",
- aws_secret_access_key="secret",
- aws_session_token="token",
- region_name="region",
- botocore_config={"test": "config"},
- **kwarg_dict
- )
- assert agent
- assert agent.name == "test"
- assert agent.task_definition_kwargs == def_kwarg_dict
- assert agent.task_run_kwargs == run_kwarg_dict
- assert agent.container_definitions_kwargs == container_def_kwargs_dict
-
- assert boto3_client.call_args[0][0] == "ecs"
- assert boto3_client.call_args[1]["aws_access_key_id"] == "id"
- assert boto3_client.call_args[1]["aws_secret_access_key"] == "secret"
- assert boto3_client.call_args[1]["aws_session_token"] == "token"
- assert boto3_client.call_args[1]["region_name"] == "region"
-
- assert botocore_config.called
- assert botocore_config.call_args[1] == {"test": "config"}
-
-
-def test_fargate_agent_config_env_vars(monkeypatch, cloud_api):
- boto3_client = MagicMock()
- monkeypatch.setattr("boto3.client", boto3_client)
-
- botocore_config = MagicMock()
- monkeypatch.setattr("botocore.config.Config", botocore_config)
-
- def_kwarg_dict = {
- "taskRoleArn": "test",
- "executionRoleArn": "test",
- "volumes": "test",
- "placementConstraints": "test",
- "cpu": "test",
- "memory": "test",
- "tags": "test",
- "pidMode": "test",
- "ipcMode": "test",
- "proxyConfiguration": "test",
- "inferenceAccelerators": "test",
- }
-
- run_kwarg_dict = {
- "cluster": "test",
- "count": "test",
- "startedBy": "test",
- "group": "test",
- "placementConstraints": "test",
- "placementStrategy": "test",
- "platformVersion": "test",
- "networkConfiguration": "test",
- "tags": "test",
- "enableECSManagedTags": "test",
- "propagateTags": "test",
- }
-
- container_def_kwargs_dict = {
- "environment": "test",
- "secrets": "test",
- "mountPoints": "test",
- "logConfiguration": "test",
- }
-
- # Client args
- monkeypatch.setenv("AWS_ACCESS_KEY_ID", "id")
- monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "secret")
- monkeypatch.setenv("AWS_SESSION_TOKEN", "token")
- monkeypatch.setenv("REGION_NAME", "region")
-
- # Def / run args
- monkeypatch.setenv("taskRoleArn", "test")
- monkeypatch.setenv("executionRoleArn", "test")
- monkeypatch.setenv("volumes", "test")
- monkeypatch.setenv("placementConstraints", "test")
- monkeypatch.setenv("cpu", "test")
- monkeypatch.setenv("memory", "test")
- monkeypatch.setenv("tags", "test")
- monkeypatch.setenv("pidMode", "test")
- monkeypatch.setenv("ipcMode", "test")
- monkeypatch.setenv("proxyConfiguration", "test")
- monkeypatch.setenv("inferenceAccelerators", "test")
- monkeypatch.setenv("cluster", "test")
- monkeypatch.setenv("count", "test")
- monkeypatch.setenv("startedBy", "test")
- monkeypatch.setenv("group", "test")
- monkeypatch.setenv("placementStrategy", "test")
- monkeypatch.setenv("platformVersion", "test")
- monkeypatch.setenv("networkConfiguration", "test")
- monkeypatch.setenv("enableECSManagedTags", "test")
- monkeypatch.setenv("propagateTags", "test")
- monkeypatch.setenv("containerDefinitions_environment", "test")
- monkeypatch.setenv("containerDefinitions_secrets", "test")
- monkeypatch.setenv("containerDefinitions_mountPoints", "test")
- monkeypatch.setenv("containerDefinitions_logConfiguration", "test")
-
- agent = FargateAgent(subnets=["subnet"])
- assert agent
- assert agent.task_definition_kwargs == def_kwarg_dict
- assert agent.task_run_kwargs == run_kwarg_dict
- assert agent.container_definitions_kwargs == container_def_kwargs_dict
-
- assert boto3_client.call_args[0][0] == "ecs"
- assert boto3_client.call_args[1]["aws_access_key_id"] == "id"
- assert boto3_client.call_args[1]["aws_secret_access_key"] == "secret"
- assert boto3_client.call_args[1]["aws_session_token"] == "token"
- assert boto3_client.call_args[1]["region_name"] == "region"
-
- assert botocore_config.called
- assert botocore_config.call_args == {}
-
-
-def test_fargate_agent_config_env_vars_lists_dicts(monkeypatch, cloud_api):
- boto3_client = MagicMock()
- monkeypatch.setattr("boto3.client", boto3_client)
-
- botocore_config = MagicMock()
- monkeypatch.setattr("botocore.config.Config", botocore_config)
-
- def_kwarg_dict = {
- "placementConstraints": ["test"],
- "proxyConfiguration": {"test": "test"},
- }
-
- run_kwarg_dict = {
- "placementConstraints": ["test"],
- "networkConfiguration": {"test": "test"},
- }
-
- container_def_kwargs_dict = {
- "environment": [{"name": "test", "value": "test"}],
- "secrets": [{"name": "test", "valueFrom": "test"}],
- "mountPoints": [
- {"sourceVolume": "myEfsVolume", "containerPath": "/data", "readOnly": False}
- ],
- }
-
- # Client args
- monkeypatch.setenv("AWS_ACCESS_KEY_ID", "id")
- monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "secret")
- monkeypatch.setenv("AWS_SESSION_TOKEN", "token")
- monkeypatch.setenv("REGION_NAME", "region")
-
- # Def / run args
- monkeypatch.setenv("placementConstraints", "['test']")
- monkeypatch.setenv("proxyConfiguration", "{'test': 'test'}")
- monkeypatch.setenv("networkConfiguration", "{'test': 'test'}")
- monkeypatch.setenv(
- "containerDefinitions_environment", '[{"name": "test", "value": "test"}]'
- )
- monkeypatch.setenv(
- "containerDefinitions_secrets", '[{"name": "test", "valueFrom": "test"}]'
- )
- monkeypatch.setenv(
- "containerDefinitions_mountPoints",
- '[{"sourceVolume": "myEfsVolume", "containerPath": "/data", "readOnly": False}]',
- )
-
- agent = FargateAgent(subnets=["subnet"])
- assert agent
- assert agent.task_definition_kwargs == def_kwarg_dict
- assert agent.task_run_kwargs == run_kwarg_dict
- assert agent.container_definitions_kwargs == container_def_kwargs_dict
-
- assert boto3_client.call_args[0][0] == "ecs"
- assert boto3_client.call_args[1]["aws_access_key_id"] == "id"
- assert boto3_client.call_args[1]["aws_secret_access_key"] == "secret"
- assert boto3_client.call_args[1]["aws_session_token"] == "token"
- assert boto3_client.call_args[1]["region_name"] == "region"
-
- assert botocore_config.called
- assert botocore_config.call_args == {}
-
-
-def test_deploy_flow_local_storage_raises(monkeypatch, cloud_api):
- boto3_client = MagicMock()
-
- boto3_client.describe_task_definition.return_value = {}
- boto3_client.run_task.return_value = {"tasks": [{"taskArn": "test"}]}
-
- monkeypatch.setattr("boto3.client", MagicMock(return_value=boto3_client))
-
- agent = FargateAgent()
-
- with pytest.raises(ValueError):
- agent.deploy_flow(
- flow_run=GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Local().serialize(),
- "id": "id",
- "environment": LocalEnvironment().serialize(),
- "core_version": "0.13.0",
- }
- ),
- "id": "id",
- }
- ),
- )
-
- assert not boto3_client.describe_task_definition.called
- assert not boto3_client.run_task.called
-
-
-def test_deploy_flow_docker_storage_raises(monkeypatch, cloud_api):
- boto3_client = MagicMock()
-
- boto3_client.describe_task_definition.return_value = {}
- boto3_client.run_task.return_value = {"tasks": [{"taskArn": "test"}]}
-
- monkeypatch.setattr("boto3.client", MagicMock(return_value=boto3_client))
-
- agent = FargateAgent()
- agent.deploy_flow(
- flow_run=GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Docker(
- registry_url="test", image_name="name", image_tag="tag"
- ).serialize(),
- "environment": LocalEnvironment().serialize(),
- "id": "id",
- "core_version": "0.13.0",
- }
- ),
- "id": "id",
- "name": "name",
- }
- )
- )
-
- assert boto3_client.describe_task_definition.called
- assert boto3_client.run_task.called
-
-
-def test_deploy_flow_all_args(monkeypatch, cloud_api):
- boto3_client = MagicMock()
-
- boto3_client.describe_task_definition.return_value = {}
- boto3_client.run_task.return_value = {"tasks": [{"taskArn": "test"}]}
-
- monkeypatch.setattr("boto3.client", MagicMock(return_value=boto3_client))
-
- kwarg_dict = {
- "taskRoleArn": "test",
- "executionRoleArn": "test",
- "volumes": "test",
- "placementConstraints": "test",
- "cpu": "test",
- "memory": "test",
- "tags": "test",
- "pidMode": "test",
- "ipcMode": "test",
- "proxyConfiguration": "test",
- "inferenceAccelerators": "test",
- "cluster": "cluster",
- "count": "test",
- "startedBy": "test",
- "group": "test",
- "placementStrategy": "test",
- "platformVersion": "test",
- "networkConfiguration": {
- "awsvpcConfiguration": {
- "subnets": ["subnet"],
- "assignPublicIp": "DISABLED",
- "securityGroups": ["security_group"],
- }
- },
- "enableECSManagedTags": "test",
- "propagateTags": "test",
- }
-
- agent = FargateAgent(
- aws_access_key_id="id",
- aws_secret_access_key="secret",
- aws_session_token="token",
- region_name="region",
- **kwarg_dict
- )
- agent.deploy_flow(
- flow_run=GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Docker(
- registry_url="test", image_name="name", image_tag="tag"
- ).serialize(),
- "environment": LocalEnvironment().serialize(),
- "id": "id",
- "core_version": "0.13.0",
- }
- ),
- "id": "id",
- }
- )
- )
-
- assert boto3_client.describe_task_definition.called
- assert boto3_client.run_task.called
- assert boto3_client.run_task.call_args[1]["cluster"] == "cluster"
- assert boto3_client.run_task.call_args[1]["taskDefinition"] == "prefect-task-id"
- assert boto3_client.run_task.call_args[1]["launchType"] == "FARGATE"
- assert boto3_client.run_task.call_args[1]["overrides"] == {
- "containerOverrides": [
- {
- "name": "flow",
- "environment": [
- {"name": "PREFECT__CLOUD__AUTH_TOKEN", "value": ""},
- {"name": "PREFECT__CONTEXT__FLOW_RUN_ID", "value": "id"},
- {"name": "PREFECT__CONTEXT__FLOW_ID", "value": "id"},
- ],
- }
- ]
- }
- assert boto3_client.run_task.call_args[1]["networkConfiguration"] == {
- "awsvpcConfiguration": {
- "subnets": ["subnet"],
- "assignPublicIp": "DISABLED",
- "securityGroups": ["security_group"],
- }
- }
-
-
-def test_deploy_flow_register_task_definition(monkeypatch, cloud_api):
- boto3_client = MagicMock()
-
- boto3_client.describe_task_definition.side_effect = ClientError({}, None)
- boto3_client.run_task.return_value = {"tasks": [{"taskArn": "test"}]}
- boto3_client.register_task_definition.return_value = {}
-
- monkeypatch.setattr("boto3.client", MagicMock(return_value=boto3_client))
-
- agent = FargateAgent()
- agent.deploy_flow(
- flow_run=GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Docker(
- registry_url="test", image_name="name", image_tag="tag"
- ).serialize(),
- "environment": LocalEnvironment().serialize(),
- "id": "id",
- "core_version": "0.13.0",
- }
- ),
- "id": "id",
- }
- )
- )
-
- assert boto3_client.describe_task_definition.called
- assert boto3_client.register_task_definition.called
- assert (
- boto3_client.register_task_definition.call_args[1]["family"]
- == "prefect-task-id"
- )
-
-
-def test_deploy_flow_register_task_definition_uses_environment_metadata(
- monkeypatch, cloud_api
-):
- boto3_client = MagicMock()
-
- boto3_client.describe_task_definition.side_effect = ClientError({}, None)
- boto3_client.run_task.return_value = {"tasks": [{"taskArn": "test"}]}
- boto3_client.register_task_definition.return_value = {}
-
- monkeypatch.setattr("boto3.client", MagicMock(return_value=boto3_client))
-
- agent = FargateAgent()
- agent.deploy_flow(
- flow_run=GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Local().serialize(),
- "environment": LocalEnvironment(
- metadata={"image": "repo/name:tag"}
- ).serialize(),
- "id": "id",
- "core_version": "0.13.0",
- }
- ),
- "id": "id",
- }
- )
- )
-
- assert boto3_client.describe_task_definition.called
- assert boto3_client.register_task_definition.called
- assert (
- boto3_client.register_task_definition.call_args[1]["containerDefinitions"][0][
- "image"
- ]
- == "repo/name:tag"
- )
- assert (
- boto3_client.register_task_definition.call_args[1]["family"]
- == "prefect-task-id"
- )
-
-
-def test_deploy_flow_register_task_definition_uses_user_env_vars(
- monkeypatch, cloud_api
-):
- boto3_client = MagicMock()
-
- boto3_client.describe_task_definition.side_effect = ClientError({}, None)
- boto3_client.run_task.return_value = {"tasks": [{"taskArn": "test"}]}
- boto3_client.register_task_definition.return_value = {}
-
- monkeypatch.setattr("boto3.client", MagicMock(return_value=boto3_client))
-
- agent = FargateAgent(env_vars=dict(AUTH_THING="foo", PKG_SETTING="bar"))
- agent.deploy_flow(
- flow_run=GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Docker(
- registry_url="test", image_name="name", image_tag="tag"
- ).serialize(),
- "environment": LocalEnvironment().serialize(),
- "id": "id",
- "core_version": "0.13.0",
- }
- ),
- "id": "id",
- }
- )
- )
-
- assert boto3_client.describe_task_definition.called
- assert boto3_client.register_task_definition.called
- assert (
- boto3_client.register_task_definition.call_args[1]["family"]
- == "prefect-task-id"
- )
-
- container_defs = boto3_client.register_task_definition.call_args[1][
- "containerDefinitions"
- ]
-
- user_vars = [
- dict(name="AUTH_THING", value="foo"),
- dict(name="PKG_SETTING", value="bar"),
- ]
- assert container_defs[0]["environment"][-1] in user_vars
- assert container_defs[0]["environment"][-2] in user_vars
-
-
-@pytest.mark.parametrize(
- "core_version,command",
- [
- ("0.10.0", "prefect execute cloud-flow"),
- ("0.6.0+134", "prefect execute cloud-flow"),
- ("0.13.0", "prefect execute flow-run"),
- ("0.13.1+134", "prefect execute flow-run"),
- ],
-)
-def test_deploy_flow_register_task_definition_all_args(
- core_version,
- command,
- monkeypatch,
- backend,
-):
- boto3_client = MagicMock()
-
- boto3_client.describe_task_definition.side_effect = ClientError({}, None)
- boto3_client.run_task.return_value = {"tasks": [{"taskArn": "test"}]}
- boto3_client.register_task_definition.return_value = {}
-
- monkeypatch.setattr("boto3.client", MagicMock(return_value=boto3_client))
-
- kwarg_dict = {
- "taskRoleArn": "test",
- "executionRoleArn": "test",
- "volumes": "test",
- "placementConstraints": "test",
- "cpu": "1",
- "memory": "2",
- "tags": "test",
- "pidMode": "test",
- "ipcMode": "test",
- "proxyConfiguration": "test",
- "inferenceAccelerators": "test",
- "cluster": "cluster",
- "count": "test",
- "startedBy": "test",
- "group": "test",
- "placementStrategy": "test",
- "platformVersion": "test",
- "networkConfiguration": {
- "awsvpcConfiguration": {
- "subnets": ["subnet"],
- "assignPublicIp": "DISABLED",
- "securityGroups": ["security_group"],
- }
- },
- "enableECSManagedTags": "test",
- "propagateTags": "test",
- "containerDefinitions": [
- {
- "environment": [{"name": "TEST_ENV", "value": "Success!"}],
- "secrets": [
- {
- "name": "TEST_SECRET1",
- "valueFrom": "arn:aws:ssm:us-east-1:123456789101:parameter/test/test",
- },
- {
- "name": "TEST_SECRET",
- "valueFrom": "arn:aws:ssm:us-east-1:123456789101:parameter/test/test",
- },
- ],
- "mountPoints": [
- {
- "sourceVolume": "myEfsVolume",
- "containerPath": "/data",
- "readOnly": False,
- }
- ],
- "logConfiguration": {
- "logDriver": "awslogs",
- "options": {
- "awslogs-group": "prefect",
- "awslogs-region": "us-east-1",
- "awslogs-stream-prefix": "flow-runs",
- "awslogs-create-group": "true",
- },
- },
- "repositoryCredentials": ["repo"],
- }
- ],
- }
-
- agent = FargateAgent(
- aws_access_key_id="id",
- aws_secret_access_key="secret",
- aws_session_token="token",
- region_name="region",
- **kwarg_dict
- )
- agent.deploy_flow(
- flow_run=GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Docker(
- registry_url="test", image_name="name", image_tag="tag"
- ).serialize(),
- "environment": LocalEnvironment().serialize(),
- "id": "id",
- "core_version": core_version,
- }
- ),
- "id": "id",
- }
- )
- )
-
- assert boto3_client.describe_task_definition.called
- assert boto3_client.register_task_definition.called
- assert (
- boto3_client.register_task_definition.call_args[1]["family"]
- == "prefect-task-id"
- )
- assert boto3_client.register_task_definition.call_args[1][
- "containerDefinitions"
- ] == [
- {
- "name": "flow",
- "image": "test/name:tag",
- "command": ["/bin/sh", "-c", command],
- "environment": [
- {"name": "PREFECT__BACKEND", "value": backend},
- {"name": "PREFECT__CLOUD__API", "value": prefect.config.cloud.api},
- {"name": "PREFECT__CLOUD__AGENT__LABELS", "value": "[]"},
- {"name": "PREFECT__CLOUD__USE_LOCAL_SECRETS", "value": "false"},
- {"name": "PREFECT__CLOUD__SEND_FLOW_RUN_LOGS", "value": "true"},
- {"name": "PREFECT__LOGGING__LEVEL", "value": "INFO"},
- {
- "name": "PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudFlowRunner",
- },
- {
- "name": "PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudTaskRunner",
- },
- # Backwards compatibility variable for containers on Prefect <0.15.0
- {"name": "PREFECT__LOGGING__LOG_TO_CLOUD", "value": "true"},
- {"name": "TEST_ENV", "value": "Success!"},
- ],
- "essential": True,
- "secrets": [
- {
- "name": "TEST_SECRET1",
- "valueFrom": "arn:aws:ssm:us-east-1:123456789101:parameter/test/test",
- },
- {
- "name": "TEST_SECRET",
- "valueFrom": "arn:aws:ssm:us-east-1:123456789101:parameter/test/test",
- },
- ],
- "mountPoints": [
- {
- "sourceVolume": "myEfsVolume",
- "containerPath": "/data",
- "readOnly": False,
- }
- ],
- "logConfiguration": {
- "logDriver": "awslogs",
- "options": {
- "awslogs-group": "prefect",
- "awslogs-region": "us-east-1",
- "awslogs-stream-prefix": "flow-runs",
- "awslogs-create-group": "true",
- },
- },
- "repositoryCredentials": ["repo"],
- }
- ]
- assert boto3_client.register_task_definition.call_args[1][
- "requiresCompatibilities"
- ] == ["FARGATE"]
- assert boto3_client.register_task_definition.call_args[1]["networkMode"] == "awsvpc"
- assert boto3_client.register_task_definition.call_args[1]["cpu"] == "1"
- assert boto3_client.register_task_definition.call_args[1]["memory"] == "2"
-
-
-@pytest.mark.parametrize("flag", [True, False])
-def test_deploy_flows_includes_agent_labels_in_environment(
- monkeypatch, cloud_api, flag
-):
- boto3_client = MagicMock()
-
- boto3_client.describe_task_definition.side_effect = ClientError({}, None)
- boto3_client.run_task.return_value = {"tasks": [{"taskArn": "test"}]}
- boto3_client.register_task_definition.return_value = {}
-
- monkeypatch.setattr("boto3.client", MagicMock(return_value=boto3_client))
-
- kwarg_dict = {
- "taskRoleArn": "test",
- "executionRoleArn": "test",
- "volumes": "test",
- "placementConstraints": "test",
- "cpu": "1",
- "memory": "2",
- "tags": "test",
- "pidMode": "test",
- "ipcMode": "test",
- "proxyConfiguration": "test",
- "inferenceAccelerators": "test",
- "cluster": "cluster",
- "count": "test",
- "startedBy": "test",
- "group": "test",
- "placementStrategy": "test",
- "platformVersion": "test",
- "networkConfiguration": {
- "awsvpcConfiguration": {
- "subnets": ["subnet"],
- "assignPublicIp": "DISABLED",
- "securityGroups": ["security_group"],
- }
- },
- "enableECSManagedTags": "test",
- "propagateTags": "test",
- }
-
- agent = FargateAgent(
- aws_access_key_id="id",
- aws_secret_access_key="secret",
- aws_session_token="token",
- region_name="region",
- labels=["aws", "staging"],
- no_cloud_logs=flag,
- **kwarg_dict
- )
- agent.deploy_flow(
- flow_run=GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Docker(
- registry_url="test", image_name="name", image_tag="tag"
- ).serialize(),
- "environment": LocalEnvironment().serialize(),
- "id": "id",
- "core_version": "0.13.0",
- }
- ),
- "id": "id",
- }
- )
- )
-
- assert boto3_client.describe_task_definition.called
- assert boto3_client.register_task_definition.called
- assert (
- boto3_client.register_task_definition.call_args[1]["family"]
- == "prefect-task-id"
- )
- assert boto3_client.register_task_definition.call_args[1][
- "containerDefinitions"
- ] == [
- {
- "name": "flow",
- "image": "test/name:tag",
- "command": ["/bin/sh", "-c", "prefect execute flow-run"],
- "environment": [
- {"name": "PREFECT__BACKEND", "value": "cloud"},
- {"name": "PREFECT__CLOUD__API", "value": prefect.config.cloud.api},
- {
- "name": "PREFECT__CLOUD__AGENT__LABELS",
- "value": "['aws', 'staging']",
- },
- {"name": "PREFECT__CLOUD__USE_LOCAL_SECRETS", "value": "false"},
- {
- "name": "PREFECT__CLOUD__SEND_FLOW_RUN_LOGS",
- "value": str(not flag).lower(),
- },
- {"name": "PREFECT__LOGGING__LEVEL", "value": "INFO"},
- {
- "name": "PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudFlowRunner",
- },
- {
- "name": "PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudTaskRunner",
- },
- {
- "name": "PREFECT__LOGGING__LOG_TO_CLOUD",
- "value": str(not flag).lower(),
- },
- ],
- "essential": True,
- }
- ]
- assert boto3_client.register_task_definition.call_args[1][
- "requiresCompatibilities"
- ] == ["FARGATE"]
- assert boto3_client.register_task_definition.call_args[1]["networkMode"] == "awsvpc"
- assert boto3_client.register_task_definition.call_args[1]["cpu"] == "1"
- assert boto3_client.register_task_definition.call_args[1]["memory"] == "2"
-
-
-def test_deploy_flows_require_docker_storage(monkeypatch, cloud_api):
- boto3_client = MagicMock()
-
- boto3_client.describe_task_definition.return_value = {"tags": []}
- boto3_client.run_task.return_value = {}
-
- monkeypatch.setattr("boto3.client", MagicMock(return_value=boto3_client))
- with pytest.raises(Exception) as excinfo:
- agent = FargateAgent()
- agent.deploy_flow(
- GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Local().serialize(),
- "environment": LocalEnvironment().serialize(),
- "id": "id",
- "version": 2,
- "name": "name",
- "core_version": "0.13.0",
- }
- ),
- "id": "id",
- "name": "name",
- }
- )
- )
- assert boto3_client.describe_task_definition.not_called
- assert boto3_client.run_task.not_called
-
-
-# test to support task revisions and external kwargs
-
-
-def test_deploy_flows_enable_task_revisions_no_tags(monkeypatch, cloud_api):
- boto3_client = MagicMock()
-
- boto3_client.describe_task_definition.return_value = {"tags": []}
- boto3_client.run_task.return_value = {"tasks": [{"taskArn": "test"}]}
- boto3_client.register_task_definition.return_value = {}
-
- monkeypatch.setattr("boto3.client", MagicMock(return_value=boto3_client))
-
- agent = FargateAgent(enable_task_revisions=True)
- agent.deploy_flow(
- GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Docker(
- registry_url="test", image_name="name", image_tag="tag"
- ).serialize(),
- "environment": LocalEnvironment().serialize(),
- "id": "id",
- "version": 2,
- "name": "name",
- "core_version": "0.13.0",
- }
- ),
- "id": "id",
- "name": "name",
- }
- )
- )
- assert boto3_client.describe_task_definition.called
- assert boto3_client.register_task_definition.called
- boto3_client.register_task_definition.assert_called_with(
- containerDefinitions=[
- {
- "name": "flow",
- "image": "test/name:tag",
- "command": ["/bin/sh", "-c", "prefect execute flow-run"],
- "environment": [
- {"name": "PREFECT__BACKEND", "value": "cloud"},
- {"name": "PREFECT__CLOUD__API", "value": prefect.config.cloud.api},
- {"name": "PREFECT__CLOUD__AGENT__LABELS", "value": "[]"},
- {"name": "PREFECT__CLOUD__USE_LOCAL_SECRETS", "value": "false"},
- {"name": "PREFECT__CLOUD__SEND_FLOW_RUN_LOGS", "value": "true"},
- {"name": "PREFECT__LOGGING__LEVEL", "value": "INFO"},
- {
- "name": "PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudFlowRunner",
- },
- {
- "name": "PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudTaskRunner",
- },
- {"name": "PREFECT__LOGGING__LOG_TO_CLOUD", "value": "true"},
- ],
- "essential": True,
- }
- ],
- family="name",
- networkMode="awsvpc",
- requiresCompatibilities=["FARGATE"],
- tags=[
- {"key": "PrefectFlowId", "value": "id"},
- {"key": "PrefectFlowVersion", "value": "2"},
- ],
- )
- assert boto3_client.run_task.called
- assert boto3_client.run_task.called_with(taskDefinition="name")
-
-
-def test_deploy_flows_enable_task_revisions_tags_current(monkeypatch, cloud_api):
- boto3_client = MagicMock()
-
- boto3_client.describe_task_definition.return_value = {
- "tags": [
- {"key": "PrefectFlowId", "value": "id"},
- {"key": "PrefectFlowVersion", "value": "5"},
- ]
- }
- boto3_client.run_task.return_value = {"tasks": [{"taskArn": "test"}]}
- boto3_client.register_task_definition.return_value = {}
-
- monkeypatch.setattr("boto3.client", MagicMock(return_value=boto3_client))
-
- agent = FargateAgent(enable_task_revisions=True)
- agent.deploy_flow(
- GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Docker(
- registry_url="test", image_name="name", image_tag="tag"
- ).serialize(),
- "environment": LocalEnvironment().serialize(),
- "id": "id",
- "version": 5,
- "name": "name #1",
- "core_version": "0.13.0",
- }
- ),
- "id": "id",
- "name": "name",
- }
- )
- )
- assert boto3_client.describe_task_definition.called
- assert boto3_client.register_task_definition.not_called
- assert boto3_client.run_task.called_with(taskDefinition="name-1")
-
-
-def test_deploy_flows_enable_task_revisions_old_version_exists(monkeypatch, cloud_api):
- boto3_client = MagicMock()
-
- boto3_client.describe_task_definition.return_value = {
- "tags": [
- {"key": "PrefectFlowId", "value": "current_id"},
- {"key": "PrefectFlowVersion", "value": "5"},
- ]
- }
- boto3_client.run_task.return_value = {"tasks": [{"taskArn": "test"}]}
- boto3_client.get_resources.return_value = {
- "ResourceTagMappingList": [
- {"ResourceARN": "arn:aws:ecs:us-east-1:12345:task-definition/flow:22"}
- ]
- }
-
- monkeypatch.setattr("boto3.client", MagicMock(return_value=boto3_client))
-
- agent = FargateAgent(enable_task_revisions=True)
- agent.deploy_flow(
- GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Docker(
- registry_url="test", image_name="name", image_tag="tag"
- ).serialize(),
- "environment": LocalEnvironment().serialize(),
- "id": "id",
- "version": 3,
- "name": "name",
- "core_version": "0.13.0",
- }
- ),
- "id": "id",
- "name": "name",
- }
- )
- )
- assert boto3_client.describe_task_definition.called
- assert boto3_client.get_resources.called
- assert boto3_client.register_task_definition.not_called
- assert boto3_client.run_task.called_with(
- taskDefinition="arn:aws:ecs:us-east-1:12345:task-definition/flow:22"
- )
-
-
-def test_override_kwargs(monkeypatch, cloud_api):
-
- boto3_resource = MagicMock()
- streaming_body = MagicMock()
- streaming_body.read.return_value.decode.return_value = """{
- "cpu": "256",
- "networkConfiguration": "test",
- "containerDefinitions": [{
- "environment": [{
- "name": "TEST_ENV",
- "value": "Success!"
- }],
- "secrets": [{
- "name": "TEST_SECRET1",
- "valueFrom": "arn:aws:ssm:us-east-1:123456789101:parameter/test/test"
- },
- {
- "name": "TEST_SECRET",
- "valueFrom": "arn:aws:ssm:us-east-1:123456789101:parameter/test/test"
- }
- ],
- "mountPoints": [{
- "sourceVolume": "myEfsVolume",
- "containerPath": "/data",
- "readOnly": false
- }]
- }]
- }"""
- boto3_resource.return_value.Object.return_value.get.return_value = {
- "Body": streaming_body
- }
- monkeypatch.setattr("boto3.resource", boto3_resource)
-
- agent = FargateAgent(
- use_external_kwargs=True,
- external_kwargs_s3_bucket="test-bucket",
- external_kwargs_s3_key="prefect-artifacts/kwargs",
- aws_access_key_id="id",
- aws_secret_access_key="secret",
- aws_session_token="token",
- region_name="region",
- labels=["aws", "staging"],
- )
- definition_kwargs = {}
- run_kwargs = {}
- container_definitions_kwargs = {}
- agent._override_kwargs(
- GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Docker(
- registry_url="test", image_name="name", image_tag="tag"
- ).serialize(),
- "environment": LocalEnvironment().serialize(),
- "id": "id",
- "version": 2,
- "name": "name",
- "core_version": "0.13.0",
- }
- ),
- "id": "id",
- "name": "name",
- }
- ),
- definition_kwargs,
- run_kwargs,
- container_definitions_kwargs,
- )
- print(container_definitions_kwargs)
- assert boto3_resource.called
- assert streaming_body.read().decode.called
- assert definition_kwargs == {"cpu": "256"}
- assert run_kwargs == {"networkConfiguration": "test"}
- assert container_definitions_kwargs == {
- "environment": [{"name": "TEST_ENV", "value": "Success!"}],
- "secrets": [
- {
- "name": "TEST_SECRET1",
- "valueFrom": "arn:aws:ssm:us-east-1:123456789101:parameter/test/test",
- },
- {
- "name": "TEST_SECRET",
- "valueFrom": "arn:aws:ssm:us-east-1:123456789101:parameter/test/test",
- },
- ],
- "mountPoints": [
- {"sourceVolume": "myEfsVolume", "containerPath": "/data", "readOnly": False}
- ],
- }
-
-
-def test_override_kwargs_exception(monkeypatch, cloud_api):
-
- boto3_resource = MagicMock()
- streaming_body = MagicMock()
- streaming_body.read.return_value.decode.side_effect = ClientError({}, None)
- boto3_resource.return_value.Object.return_value.get.return_value = {
- "Body": streaming_body
- }
- monkeypatch.setattr("boto3.resource", boto3_resource)
-
- agent = FargateAgent(
- use_external_kwargs=True,
- external_kwargs_s3_bucket="test-bucket",
- external_kwargs_s3_key="prefect-artifacts/kwargs",
- aws_access_key_id="id",
- aws_secret_access_key="secret",
- aws_session_token="token",
- region_name="region",
- labels=["aws", "staging"],
- )
- definition_kwargs = {}
- run_kwargs = {}
- container_definitions_kwargs = {}
- agent._override_kwargs(
- GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Docker(
- registry_url="test", image_name="name", image_tag="tag"
- ).serialize(),
- "environment": LocalEnvironment().serialize(),
- "id": "id",
- "version": 2,
- "name": "name",
- "core_version": "0.13.0",
- }
- ),
- "id": "id",
- "name": "name",
- }
- ),
- definition_kwargs,
- run_kwargs,
- container_definitions_kwargs,
- )
-
- assert boto3_resource.called
- assert streaming_body.read().decode.called
- assert definition_kwargs == {}
- assert run_kwargs == {}
- assert container_definitions_kwargs == {}
-
-
-def test_deploy_flows_enable_task_revisions_tags_passed_in(monkeypatch, cloud_api):
- boto3_client = MagicMock()
-
- boto3_client.describe_task_definition.return_value = {
- "tags": [{"key": "PrefectFlowId", "value": "id"}]
- }
- boto3_client.run_task.return_value = {"tasks": [{"taskArn": "test"}]}
- boto3_client.register_task_definition.return_value = {}
-
- monkeypatch.setattr("boto3.client", MagicMock(return_value=boto3_client))
-
- agent = FargateAgent(
- enable_task_revisions=True,
- tags=[
- {"key": "PrefectFlowId", "value": "id"},
- {"key": "PrefectFlowVersion", "value": "2"},
- ],
- )
- agent.deploy_flow(
- GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Docker(
- registry_url="test", image_name="name", image_tag="tag"
- ).serialize(),
- "environment": LocalEnvironment().serialize(),
- "id": "id",
- "version": 2,
- "name": "name",
- "core_version": "0.13.0",
- }
- ),
- "id": "id",
- "name": "name",
- }
- )
- )
- assert agent.task_definition_kwargs == {
- "tags": [
- {"key": "PrefectFlowId", "value": "id"},
- {"key": "PrefectFlowVersion", "value": "2"},
- ]
- }
- assert boto3_client.describe_task_definition.called
- assert boto3_client.register_task_definition.not_called
- assert boto3_client.run_task.called
- assert boto3_client.run_task.called_with(taskDefinition="name")
-
-
-def test_deploy_flows_enable_task_revisions_with_external_kwargs(
- monkeypatch, cloud_api
-):
- boto3_client = MagicMock()
- boto3_resource = MagicMock()
- streaming_body = MagicMock()
-
- streaming_body.read.return_value.decode.return_value = '{"cpu": "256", "networkConfiguration": "test", "tags": [{"key": "test", "value": "test"}]}'
- boto3_resource.return_value.Object.return_value.get.return_value = {
- "Body": streaming_body
- }
-
- boto3_client.describe_task_definition.return_value = {
- "tags": [
- {"key": "PrefectFlowId", "value": "id"},
- {"key": "PrefectFlowVersion", "value": "5"},
- ]
- }
- boto3_client.run_task.return_value = {"tasks": [{"taskArn": "test"}]}
- boto3_client.register_task_definition.return_value = {}
-
- monkeypatch.setattr("boto3.client", MagicMock(return_value=boto3_client))
- monkeypatch.setattr("boto3.resource", boto3_resource)
-
- agent = FargateAgent(
- enable_task_revisions=True,
- use_external_kwargs=True,
- external_kwargs_s3_bucket="test-bucket",
- external_kwargs_s3_key="prefect-artifacts/kwargs",
- aws_access_key_id="id",
- aws_secret_access_key="secret",
- aws_session_token="token",
- region_name="region",
- cluster="test",
- tags=[{"key": "team", "value": "data"}],
- labels=["aws", "staging"],
- no_cloud_logs=True,
- )
- agent.deploy_flow(
- GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Docker(
- registry_url="test", image_name="name", image_tag="tag"
- ).serialize(),
- "environment": LocalEnvironment().serialize(),
- "id": "new_id",
- "version": 6,
- "name": "name",
- "core_version": "0.13.0",
- }
- ),
- "id": "id",
- "name": "name",
- }
- )
- )
- assert boto3_client.describe_task_definition.called
- boto3_client.register_task_definition.assert_called_with(
- containerDefinitions=[
- {
- "name": "flow",
- "image": "test/name:tag",
- "command": ["/bin/sh", "-c", "prefect execute flow-run"],
- "environment": [
- {"name": "PREFECT__BACKEND", "value": "cloud"},
- {"name": "PREFECT__CLOUD__API", "value": prefect.config.cloud.api},
- {
- "name": "PREFECT__CLOUD__AGENT__LABELS",
- "value": "['aws', 'staging']",
- },
- {"name": "PREFECT__CLOUD__USE_LOCAL_SECRETS", "value": "false"},
- {"name": "PREFECT__CLOUD__SEND_FLOW_RUN_LOGS", "value": "false"},
- {"name": "PREFECT__LOGGING__LEVEL", "value": "INFO"},
- {
- "name": "PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudFlowRunner",
- },
- {
- "name": "PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudTaskRunner",
- },
- {"name": "PREFECT__LOGGING__LOG_TO_CLOUD", "value": "false"},
- ],
- "essential": True,
- }
- ],
- cpu="256",
- family="name",
- networkMode="awsvpc",
- requiresCompatibilities=["FARGATE"],
- tags=[
- {"key": "test", "value": "test"},
- {"key": "PrefectFlowId", "value": "new_id"},
- {"key": "PrefectFlowVersion", "value": "6"},
- ],
- )
- boto3_client.run_task.assert_called_with(
- launchType="FARGATE",
- networkConfiguration="test",
- cluster="test",
- overrides={
- "containerOverrides": [
- {
- "name": "flow",
- "environment": [
- {"name": "PREFECT__CLOUD__AUTH_TOKEN", "value": ""},
- {"name": "PREFECT__CONTEXT__FLOW_RUN_ID", "value": "id"},
- {"name": "PREFECT__CONTEXT__FLOW_ID", "value": "new_id"},
- ],
- }
- ]
- },
- taskDefinition="name",
- tags=[{"key": "test", "value": "test"}],
- )
- assert boto3_client.run_task.called_with(taskDefinition="name")
-
-
-def test_deploy_flows_disable_task_revisions_with_external_kwargs(
- monkeypatch, cloud_api
-):
- boto3_client = MagicMock()
- boto3_resource = MagicMock()
- streaming_body = MagicMock()
-
- streaming_body.read.return_value.decode.return_value = '{"cpu": "256", "networkConfiguration": "test", "tags": [{"key": "test", "value": "test"}]}'
- boto3_resource.return_value.Object.return_value.get.return_value = {
- "Body": streaming_body
- }
-
- boto3_client.describe_task_definition.return_value = {}
- boto3_client.run_task.return_value = {"tasks": [{"taskArn": "test"}]}
- boto3_client.register_task_definition.return_value = {}
-
- monkeypatch.setattr("boto3.client", MagicMock(return_value=boto3_client))
- monkeypatch.setattr("boto3.resource", boto3_resource)
-
- agent = FargateAgent(
- enable_task_revisions=False,
- use_external_kwargs=True,
- external_kwargs_s3_bucket="test-bucket",
- external_kwargs_s3_key="prefect-artifacts/kwargs",
- aws_access_key_id="id",
- aws_secret_access_key="secret",
- aws_session_token="token",
- region_name="region",
- cluster="test",
- labels=["aws", "staging"],
- )
- agent.deploy_flow(
- GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Docker(
- registry_url="test", image_name="name", image_tag="tag"
- ).serialize(),
- "environment": LocalEnvironment().serialize(),
- "id": "new_id",
- "version": 6,
- "name": "name",
- "core_version": "0.13.0",
- }
- ),
- "id": "id",
- "name": "name",
- }
- )
- )
- assert agent.task_definition_kwargs == {}
- assert boto3_client.describe_task_definition.called
- assert boto3_client.register_task_definition.not_called
- boto3_client.run_task.assert_called_with(
- launchType="FARGATE",
- networkConfiguration="test",
- cluster="test",
- overrides={
- "containerOverrides": [
- {
- "name": "flow",
- "environment": [
- {"name": "PREFECT__CLOUD__AUTH_TOKEN", "value": ""},
- {"name": "PREFECT__CONTEXT__FLOW_RUN_ID", "value": "id"},
- {"name": "PREFECT__CONTEXT__FLOW_ID", "value": "new_id"},
- ],
- }
- ]
- },
- taskDefinition="prefect-task-new_id",
- tags=[{"key": "test", "value": "test"}],
- )
- assert boto3_client.run_task.called_with(taskDefinition="prefect-task-new_id")
-
-
-def test_deploy_flows_launch_type_ec2(monkeypatch, cloud_api):
- boto3_client = MagicMock()
- boto3_resource = MagicMock()
- streaming_body = MagicMock()
-
- streaming_body.read.return_value.decode.return_value = '{"cpu": "256", "networkConfiguration": "test", "tags": [{"key": "test", "value": "test"}]}'
- boto3_resource.return_value.Object.return_value.get.return_value = {
- "Body": streaming_body
- }
-
- boto3_client.describe_task_definition.return_value = {}
- boto3_client.run_task.return_value = {"tasks": [{"taskArn": "test"}]}
- boto3_client.register_task_definition.return_value = {}
-
- monkeypatch.setattr("boto3.client", MagicMock(return_value=boto3_client))
- monkeypatch.setattr("boto3.resource", boto3_resource)
-
- agent = FargateAgent(
- launch_type="EC2",
- enable_task_revisions=False,
- use_external_kwargs=True,
- external_kwargs_s3_bucket="test-bucket",
- external_kwargs_s3_key="prefect-artifacts/kwargs",
- aws_access_key_id="id",
- aws_secret_access_key="secret",
- aws_session_token="token",
- region_name="region",
- cluster="test",
- labels=["aws", "staging"],
- )
- agent.deploy_flow(
- GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Docker(
- registry_url="test", image_name="name", image_tag="tag"
- ).serialize(),
- "environment": LocalEnvironment().serialize(),
- "id": "new_id",
- "version": 6,
- "name": "name",
- "core_version": "0.13.0",
- }
- ),
- "id": "id",
- "name": "name",
- }
- )
- )
- assert agent.task_definition_kwargs == {}
- assert boto3_client.describe_task_definition.called
- assert boto3_client.register_task_definition.not_called
- boto3_client.run_task.assert_called_with(
- launchType="EC2",
- networkConfiguration="test",
- cluster="test",
- overrides={
- "containerOverrides": [
- {
- "name": "flow",
- "environment": [
- {"name": "PREFECT__CLOUD__AUTH_TOKEN", "value": ""},
- {"name": "PREFECT__CONTEXT__FLOW_RUN_ID", "value": "id"},
- {"name": "PREFECT__CONTEXT__FLOW_ID", "value": "new_id"},
- ],
- }
- ]
- },
- taskDefinition="prefect-task-new_id",
- tags=[{"key": "test", "value": "test"}],
- )
- assert boto3_client.run_task.called_with(taskDefinition="prefect-task-new_id")
-
-
-def test_deploy_flows_launch_type_none(monkeypatch, cloud_api):
- boto3_client = MagicMock()
- boto3_resource = MagicMock()
- streaming_body = MagicMock()
-
- streaming_body.read.return_value.decode.return_value = '{"cpu": "256", "networkConfiguration": "test", "tags": [{"key": "test", "value": "test"}]}'
- boto3_resource.return_value.Object.return_value.get.return_value = {
- "Body": streaming_body
- }
-
- boto3_client.describe_task_definition.return_value = {}
- boto3_client.run_task.return_value = {"tasks": [{"taskArn": "test"}]}
- boto3_client.register_task_definition.return_value = {}
-
- monkeypatch.setattr("boto3.client", MagicMock(return_value=boto3_client))
- monkeypatch.setattr("boto3.resource", boto3_resource)
-
- agent = FargateAgent(
- launch_type=None,
- enable_task_revisions=False,
- use_external_kwargs=True,
- external_kwargs_s3_bucket="test-bucket",
- external_kwargs_s3_key="prefect-artifacts/kwargs",
- aws_access_key_id="id",
- aws_secret_access_key="secret",
- aws_session_token="token",
- region_name="region",
- cluster="test",
- labels=["aws", "staging"],
- )
- agent.deploy_flow(
- GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Docker(
- registry_url="test", image_name="name", image_tag="tag"
- ).serialize(),
- "environment": LocalEnvironment().serialize(),
- "id": "new_id",
- "version": 6,
- "name": "name",
- "core_version": "0.13.0",
- }
- ),
- "id": "id",
- "name": "name",
- }
- )
- )
- assert agent.task_definition_kwargs == {}
- assert boto3_client.describe_task_definition.called
- assert boto3_client.register_task_definition.not_called
- boto3_client.run_task.assert_called_with(
- networkConfiguration="test",
- cluster="test",
- overrides={
- "containerOverrides": [
- {
- "name": "flow",
- "environment": [
- {"name": "PREFECT__CLOUD__AUTH_TOKEN", "value": ""},
- {"name": "PREFECT__CONTEXT__FLOW_RUN_ID", "value": "id"},
- {"name": "PREFECT__CONTEXT__FLOW_ID", "value": "new_id"},
- ],
- }
- ]
- },
- taskDefinition="prefect-task-new_id",
- tags=[{"key": "test", "value": "test"}],
- )
- assert boto3_client.run_task.called_with(taskDefinition="prefect-task-new_id")
-
-
-def test_agent_configuration_utility(monkeypatch, cloud_api):
- boto3_client = MagicMock()
-
- boto3_client.run_task.return_value = {"tasks": [{"taskArn": "test"}]}
-
- monkeypatch.setattr("boto3.client", MagicMock(return_value=boto3_client))
-
- kwarg_dict = {
- "cluster": "cluster",
- "networkConfiguration": {
- "awsvpcConfiguration": {
- "subnets": ["subnet"],
- "assignPublicIp": "DISABLED",
- "securityGroups": ["security_group"],
- }
- },
- }
-
- agent = FargateAgent(
- aws_access_key_id="id",
- aws_secret_access_key="secret",
- aws_session_token="token",
- region_name="region",
- **kwarg_dict
- )
- agent.validate_configuration()
-
- assert boto3_client.register_task_definition.called
- assert boto3_client.run_task.called
- assert boto3_client.run_task.call_args[1]["cluster"] == "cluster"
- assert "prefect-test-task" in boto3_client.run_task.call_args[1]["taskDefinition"]
- assert boto3_client.run_task.call_args[1]["launchType"] == "FARGATE"
- assert boto3_client.run_task.call_args[1]["networkConfiguration"] == {
- "awsvpcConfiguration": {
- "subnets": ["subnet"],
- "assignPublicIp": "DISABLED",
- "securityGroups": ["security_group"],
- }
- }
diff --git a/tests/agent/test_k8s_agent.py b/tests/agent/test_k8s_agent.py
index 14a6bdda9fe1..a626648c9d10 100644
--- a/tests/agent/test_k8s_agent.py
+++ b/tests/agent/test_k8s_agent.py
@@ -3,7 +3,7 @@
from dataclasses import dataclass
import datetime
import pendulum
-import logging
+import uuid
import pytest
import re
@@ -13,7 +13,6 @@
import prefect
from prefect.agent.kubernetes.agent import KubernetesAgent, read_bytes_from_path
-from prefect.environments import LocalEnvironment
from prefect.storage import Docker, Local
from prefect.run_configs import KubernetesRun, LocalRun, UniversalRun
from prefect.utilities.configuration import set_temporary_config
@@ -64,7 +63,7 @@ def test_k8s_agent_init_out_of_cluster(monkeypatch, cloud_api, mocked_k8s_config
mocked_k8s_config.load_kube_config.assert_called(), "The out of cluster of cluster config was tried next"
-def test_k8s_agent_config_options(monkeypatch, cloud_api):
+def test_k8s_agent_config_options(monkeypatch, config_with_api_key):
k8s_client = MagicMock()
monkeypatch.setattr("kubernetes.client", k8s_client)
@@ -74,445 +73,17 @@ def test_k8s_agent_config_options(monkeypatch, cloud_api):
get_jobs,
)
- with set_temporary_config({"cloud.agent.auth_token": "TEST_TOKEN"}):
- agent = KubernetesAgent(name="test", labels=["test"], namespace="namespace")
- assert agent
- assert agent.labels == ["test"]
- assert agent.name == "test"
- assert agent.namespace == "namespace"
- assert agent.client.get_auth_token() == "TEST_TOKEN"
- assert agent.logger
- assert agent.batch_client
-
-
-@pytest.mark.parametrize(
- "core_version,command",
- [
- ("0.10.0", "prefect execute cloud-flow"),
- ("0.6.0+134", "prefect execute cloud-flow"),
- ("0.13.0", "prefect execute flow-run"),
- ("0.13.1+134", "prefect execute flow-run"),
- ],
-)
-def test_k8s_agent_deploy_flow(core_version, command, monkeypatch, cloud_api):
- get_jobs = MagicMock(return_value=[])
- monkeypatch.setattr(
- "prefect.agent.kubernetes.agent.KubernetesAgent.manage_jobs",
- get_jobs,
- )
-
- agent = KubernetesAgent()
- agent.deploy_flow(
- flow_run=GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Docker(
- registry_url="test", image_name="name", image_tag="tag"
- ).serialize(),
- "environment": LocalEnvironment().serialize(),
- "id": "id",
- "core_version": core_version,
- }
- ),
- "id": "id",
- }
- )
- )
-
- assert agent.batch_client.create_namespaced_job.called
- assert (
- agent.batch_client.create_namespaced_job.call_args[1]["namespace"] == "default"
- )
- assert (
- agent.batch_client.create_namespaced_job.call_args[1]["body"]["apiVersion"]
- == "batch/v1"
- )
- assert agent.batch_client.create_namespaced_job.call_args[1]["body"]["spec"][
- "template"
- ]["spec"]["containers"][0]["args"] == [command]
-
-
-def test_k8s_agent_deploy_flow_uses_environment_metadata(monkeypatch, cloud_api):
- get_jobs = MagicMock(return_value=[])
- monkeypatch.setattr(
- "prefect.agent.kubernetes.agent.KubernetesAgent.manage_jobs",
- get_jobs,
- )
-
- agent = KubernetesAgent()
- agent.core_client.list_namespaced_pod.return_value = MagicMock(items=[])
-
- agent.deploy_flow(
- flow_run=GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Local().serialize(),
- "environment": LocalEnvironment(
- metadata={"image": "repo/name:tag"}
- ).serialize(),
- "id": "id",
- "core_version": "0.13.0",
- }
- ),
- "id": "id",
- }
- )
- )
-
- assert agent.batch_client.create_namespaced_job.called
- assert (
- agent.batch_client.create_namespaced_job.call_args[1]["body"]["spec"][
- "template"
- ]["spec"]["containers"][0]["image"]
- == "repo/name:tag"
- )
-
-
-def test_k8s_agent_deploy_flow_raises(monkeypatch, cloud_api):
- get_jobs = MagicMock(return_value=[])
- monkeypatch.setattr(
- "prefect.agent.kubernetes.agent.KubernetesAgent.manage_jobs",
- get_jobs,
- )
-
- agent = KubernetesAgent()
- agent.core_client.list_namespaced_pod.return_value = MagicMock(items=[])
-
- with pytest.raises(ValueError):
- agent.deploy_flow(
- flow_run=GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Local().serialize(),
- "id": "id",
- "environment": LocalEnvironment().serialize(),
- "core_version": "0.13.0",
- }
- ),
- "id": "id",
- }
- )
- )
-
- assert not agent.batch_client.create_namespaced_job.called
-
-
-def test_k8s_agent_replace_yaml_uses_user_env_vars(monkeypatch, cloud_api):
- get_jobs = MagicMock(return_value=[])
- monkeypatch.setattr(
- "prefect.agent.kubernetes.agent.KubernetesAgent.manage_jobs",
- get_jobs,
- )
-
- monkeypatch.setenv("IMAGE_PULL_SECRETS", "my-secret")
- monkeypatch.setenv("JOB_MEM_REQUEST", "mr")
- monkeypatch.setenv("JOB_MEM_LIMIT", "ml")
- monkeypatch.setenv("JOB_CPU_REQUEST", "cr")
- monkeypatch.setenv("JOB_CPU_LIMIT", "cl")
- monkeypatch.setenv("IMAGE_PULL_POLICY", "custom_policy")
- monkeypatch.setenv("SERVICE_ACCOUNT_NAME", "svc_name")
-
- flow_run = GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Docker(
- registry_url="test", image_name="name", image_tag="tag"
- ).serialize(),
- "environment": LocalEnvironment().serialize(),
- "id": "new_id",
- "core_version": "0.13.0",
- }
- ),
- "id": "id",
- }
- )
-
- with set_temporary_config(
- {"cloud.agent.auth_token": "token", "cloud.send_flow_run_logs": True}
- ):
- agent = KubernetesAgent(env_vars=dict(AUTH_THING="foo", PKG_SETTING="bar"))
- job = agent.generate_job_spec_from_environment(flow_run, image="test/name:tag")
-
- assert job["metadata"]["labels"]["prefect.io/flow_run_id"] == "id"
- assert job["metadata"]["labels"]["prefect.io/flow_id"] == "new_id"
- assert (
- job["spec"]["template"]["metadata"]["labels"]["prefect.io/flow_run_id"]
- == "id"
- )
- assert (
- job["spec"]["template"]["spec"]["containers"][0]["image"] == "test/name:tag"
- )
-
- env = job["spec"]["template"]["spec"]["containers"][0]["env"]
-
- assert env[0]["value"] == "https://api.prefect.io"
- assert env[1]["value"] == "token"
- assert env[2]["value"] == "id"
- assert env[3]["value"] == "new_id"
- assert env[4]["value"] == "default"
- assert env[5]["value"] == "[]"
- assert env[6]["value"] == "true"
-
- user_vars = [
- dict(name="AUTH_THING", value="foo"),
- dict(name="PKG_SETTING", value="bar"),
- ]
- assert env[-1] in user_vars
- assert env[-2] in user_vars
-
- assert (
- job["spec"]["template"]["spec"]["containers"][0]["imagePullPolicy"]
- == "custom_policy"
- )
- assert job["spec"]["template"]["spec"]["serviceAccountName"] == "svc_name"
-
- assert job["spec"]["template"]["spec"]["imagePullSecrets"] == [
- {"name": "my-secret"}
- ]
-
-
-def test_k8s_agent_replace_yaml_respects_multiple_image_secrets(monkeypatch, cloud_api):
- get_jobs = MagicMock(return_value=[])
- monkeypatch.setattr(
- "prefect.agent.kubernetes.agent.KubernetesAgent.manage_jobs",
- get_jobs,
- )
-
- monkeypatch.setenv("IMAGE_PULL_SECRETS", "some-secret,other-secret")
- monkeypatch.setenv("IMAGE_PULL_POLICY", "custom_policy")
-
- flow_run = GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Docker(
- registry_url="test", image_name="name", image_tag="tag"
- ).serialize(),
- "environment": LocalEnvironment().serialize(),
- "id": "new_id",
- "core_version": "0.13.0",
- }
- ),
- "id": "id",
- }
- )
-
- with set_temporary_config(
- {"cloud.agent.auth_token": "token", "cloud.send_flow_run_logs": True}
- ):
- agent = KubernetesAgent(env_vars=dict(AUTH_THING="foo", PKG_SETTING="bar"))
- job = agent.generate_job_spec_from_environment(flow_run, image="test/name:tag")
- expected_secrets = [{"name": "some-secret"}, {"name": "other-secret"}]
- assert job["spec"]["template"]["spec"]["imagePullSecrets"] == expected_secrets
-
-
-def test_k8s_agent_replace_yaml(monkeypatch, cloud_api):
- get_jobs = MagicMock(return_value=[])
- monkeypatch.setattr(
- "prefect.agent.kubernetes.agent.KubernetesAgent.manage_jobs",
- get_jobs,
- )
-
- monkeypatch.setenv("IMAGE_PULL_SECRETS", "my-secret")
- monkeypatch.setenv("JOB_MEM_REQUEST", "mr")
- monkeypatch.setenv("JOB_MEM_LIMIT", "ml")
- monkeypatch.setenv("JOB_CPU_REQUEST", "cr")
- monkeypatch.setenv("JOB_CPU_LIMIT", "cl")
-
- flow_run = GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Docker(
- registry_url="test", image_name="name", image_tag="tag"
- ).serialize(),
- "environment": LocalEnvironment().serialize(),
- "id": "new_id",
- "core_version": "0.13.0",
- }
- ),
- "id": "id",
- }
- )
-
- with set_temporary_config(
- {"cloud.agent.auth_token": "token", "cloud.send_flow_run_logs": True}
- ):
- volume_mounts = [{"name": "my-vol", "mountPath": "/mnt/my-mount"}]
- volumes = [{"name": "my-vol", "hostPath": "/host/folder"}]
- agent = KubernetesAgent(volume_mounts=volume_mounts, volumes=volumes)
- job = agent.generate_job_spec_from_environment(flow_run, image="test/name:tag")
-
- assert job["metadata"]["labels"]["prefect.io/flow_run_id"] == "id"
- assert job["metadata"]["labels"]["prefect.io/flow_id"] == "new_id"
- assert (
- job["spec"]["template"]["metadata"]["labels"]["prefect.io/flow_run_id"]
- == "id"
- )
- assert (
- job["spec"]["template"]["spec"]["containers"][0]["image"] == "test/name:tag"
- )
-
- env = job["spec"]["template"]["spec"]["containers"][0]["env"]
-
- assert env[0]["value"] == "https://api.prefect.io"
- assert env[1]["value"] == "token"
- assert env[2]["value"] == "id"
- assert env[3]["value"] == "new_id"
- assert env[4]["value"] == "default"
- assert env[5]["value"] == "[]"
- assert env[6]["value"] == "true"
-
- assert (
- job["spec"]["template"]["spec"]["imagePullSecrets"][0]["name"]
- == "my-secret"
- )
-
- resources = job["spec"]["template"]["spec"]["containers"][0]["resources"]
- assert resources["requests"]["memory"] == "mr"
- assert resources["limits"]["memory"] == "ml"
- assert resources["requests"]["cpu"] == "cr"
- assert resources["limits"]["cpu"] == "cl"
-
- volumeMounts = job["spec"]["template"]["spec"]["containers"][0]["volumeMounts"]
- assert volumeMounts[0]["name"] == "my-vol"
- assert volumeMounts[0]["mountPath"] == "/mnt/my-mount"
-
- assert (
- job["spec"]["template"]["spec"]["containers"][0]["imagePullPolicy"]
- == "IfNotPresent"
- )
-
- volumes = job["spec"]["template"]["spec"]["volumes"]
- assert volumes[0]["name"] == "my-vol"
- assert volumes[0]["hostPath"] == "/host/folder"
-
- assert job["spec"]["template"]["spec"].get("serviceAccountName", None) is None
-
-
-@pytest.mark.parametrize("flag", [True, False])
-def test_k8s_agent_replace_yaml_responds_to_logging_config(
- monkeypatch, cloud_api, flag
-):
- get_jobs = MagicMock(return_value=[])
- monkeypatch.setattr(
- "prefect.agent.kubernetes.agent.KubernetesAgent.manage_jobs",
- get_jobs,
- )
-
- flow_run = GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Docker(
- registry_url="test", image_name="name", image_tag="tag"
- ).serialize(),
- "environment": LocalEnvironment().serialize(),
- "id": "new_id",
- "core_version": "0.13.0",
- }
- ),
- "id": "id",
- "name": "name",
- }
- )
-
- agent = KubernetesAgent(no_cloud_logs=flag)
- job = agent.generate_job_spec_from_environment(flow_run, image="test/name:tag")
- env = job["spec"]["template"]["spec"]["containers"][0]["env"]
- assert env[6]["value"] == str(not flag).lower()
-
-
-def test_k8s_agent_replace_yaml_no_pull_secrets(monkeypatch, cloud_api):
- get_jobs = MagicMock(return_value=[])
- monkeypatch.setattr(
- "prefect.agent.kubernetes.agent.KubernetesAgent.manage_jobs",
- get_jobs,
- )
-
- flow_run = GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Docker(
- registry_url="test", image_name="name", image_tag="tag"
- ).serialize(),
- "environment": LocalEnvironment().serialize(),
- "id": "id",
- "core_version": "0.13.0",
- }
- ),
- "id": "id",
- }
- )
-
- agent = KubernetesAgent()
- job = agent.generate_job_spec_from_environment(flow_run, image="test/name:tag")
-
- assert not job["spec"]["template"]["spec"].get("imagePullSecrets", None)
-
-
-def test_k8s_agent_removes_yaml_no_volume(monkeypatch, cloud_api):
- flow_run = GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Docker(
- registry_url="test", image_name="name", image_tag="tag"
- ).serialize(),
- "environment": LocalEnvironment().serialize(),
- "id": "id",
- "core_version": "0.13.0",
- }
- ),
- "id": "id",
- }
- )
-
- agent = KubernetesAgent()
- job = agent.generate_job_spec_from_environment(flow_run, image="test/name:tag")
-
- assert not job["spec"]["template"]["spec"].get("volumes", None)
- assert not job["spec"]["template"]["spec"]["containers"][0].get(
- "volumeMounts", None
- )
-
-
-def test_k8s_agent_includes_agent_labels_in_job(monkeypatch, cloud_api):
- get_jobs = MagicMock(return_value=[])
- monkeypatch.setattr(
- "prefect.agent.kubernetes.agent.KubernetesAgent.manage_jobs",
- get_jobs,
- )
-
- flow_run = GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Docker(
- registry_url="test", image_name="name", image_tag="tag"
- ).serialize(),
- "environment": LocalEnvironment().serialize(),
- "id": "new_id",
- "core_version": "0.13.0",
- }
- ),
- "id": "id",
- }
- )
-
- agent = KubernetesAgent(labels=["foo", "bar"])
- job = agent.generate_job_spec_from_environment(flow_run, image="test/name:tag")
- env = job["spec"]["template"]["spec"]["containers"][0]["env"]
- assert env[5]["value"] == "['foo', 'bar']"
+ agent = KubernetesAgent(name="test", labels=["test"], namespace="namespace")
+ assert agent
+ assert agent.labels == ["test"]
+ assert agent.name == "test"
+ assert agent.namespace == "namespace"
+ assert agent.client.api_key == config_with_api_key.cloud.api_key
+ assert agent.logger
+ assert agent.batch_client
-@pytest.mark.parametrize("use_token", [True, False])
-def test_k8s_agent_generate_deployment_yaml(monkeypatch, cloud_api, use_token):
+def test_k8s_agent_generate_deployment_yaml(monkeypatch, cloud_api):
get_jobs = MagicMock(return_value=[])
monkeypatch.setattr(
"prefect.agent.kubernetes.agent.KubernetesAgent.manage_jobs",
@@ -521,9 +92,8 @@ def test_k8s_agent_generate_deployment_yaml(monkeypatch, cloud_api, use_token):
agent = KubernetesAgent()
deployment = agent.generate_deployment_yaml(
- token="test_token" if use_token else None,
- key="test-key" if not use_token else None,
- tenant_id="test-tenant" if not use_token else None,
+ key="test-key",
+ tenant_id="test-tenant",
api="test_api",
namespace="test_namespace",
backend="backend-test",
@@ -533,17 +103,17 @@ def test_k8s_agent_generate_deployment_yaml(monkeypatch, cloud_api, use_token):
agent_env = deployment["spec"]["template"]["spec"]["containers"][0]["env"]
- assert agent_env[0]["value"] == ("test_token" if use_token else "")
+ assert agent_env[0]["value"] == "test-key"
assert agent_env[1]["value"] == "test_api"
assert agent_env[2]["value"] == "test_namespace"
assert agent_env[11]["value"] == "backend-test"
assert agent_env[13] == {
"name": "PREFECT__CLOUD__API_KEY",
- "value": "test-key" if not use_token else "",
+ "value": "test-key",
}
assert agent_env[14] == {
"name": "PREFECT__CLOUD__TENANT_ID",
- "value": "test-tenant" if not use_token else "",
+ "value": "test-tenant",
}
@@ -606,7 +176,7 @@ def test_k8s_agent_generate_deployment_yaml_backend_default(monkeypatch, server_
@pytest.mark.parametrize(
"version",
[
- ("0.6.3", "0.6.3-python3.6"),
+ ("0.6.3", "0.6.3-python3.7"),
("0.5.3+114.g35bc7ba4", "latest"),
("0.5.2+999.gr34343.dirty", "latest"),
],
@@ -624,7 +194,7 @@ def test_k8s_agent_generate_deployment_yaml_local_version(
agent = KubernetesAgent()
deployment = agent.generate_deployment_yaml(
- token="test_token",
+ key="test-key",
api="test_api",
namespace="test_namespace",
)
@@ -645,7 +215,7 @@ def test_k8s_agent_generate_deployment_yaml_latest(monkeypatch, cloud_api):
agent = KubernetesAgent()
deployment = agent.generate_deployment_yaml(
- token="test_token",
+ key="test-key",
api="test_api",
namespace="test_namespace",
latest=True,
@@ -667,7 +237,7 @@ def test_k8s_agent_generate_deployment_yaml_labels(monkeypatch, cloud_api):
agent = KubernetesAgent()
deployment = agent.generate_deployment_yaml(
- token="test_token",
+ key="test-key",
api="test_api",
namespace="test_namespace",
labels=["test_label1", "test_label2"],
@@ -677,7 +247,7 @@ def test_k8s_agent_generate_deployment_yaml_labels(monkeypatch, cloud_api):
agent_env = deployment["spec"]["template"]["spec"]["containers"][0]["env"]
- assert agent_env[0]["value"] == "test_token"
+ assert agent_env[0]["value"] == "test-key"
assert agent_env[1]["value"] == "test_api"
assert agent_env[2]["value"] == "test_namespace"
assert agent_env[4]["value"] == "['test_label1', 'test_label2']"
@@ -696,7 +266,7 @@ def test_k8s_agent_generate_deployment_yaml_no_image_pull_secrets(
agent = KubernetesAgent()
deployment = agent.generate_deployment_yaml(
- token="test_token", api="test_api", namespace="test_namespace"
+ key="test-key", api="test_api", namespace="test_namespace"
)
deployment = yaml.safe_load(deployment)
@@ -722,7 +292,6 @@ def test_k8s_agent_generate_deployment_yaml_empty_image_pull_secrets(
agent = KubernetesAgent()
deployment = agent.generate_deployment_yaml(
- token="test_token",
api="test_api",
namespace="test_namespace",
image_pull_secrets="",
@@ -752,7 +321,6 @@ def test_k8s_agent_generate_deployment_yaml_env_contains_empty_image_pull_secret
agent = KubernetesAgent()
deployment = agent.generate_deployment_yaml(
- token="test_token",
api="test_api",
namespace="test_namespace",
)
@@ -775,7 +343,7 @@ def test_k8s_agent_generate_deployment_yaml_contains_image_pull_secrets(
agent = KubernetesAgent()
deployment = agent.generate_deployment_yaml(
- token="test_token",
+ key="test-key",
api="test_api",
namespace="test_namespace",
image_pull_secrets="secrets",
@@ -800,7 +368,7 @@ def test_k8s_agent_generate_deployment_yaml_contains_resources(monkeypatch, clou
agent = KubernetesAgent()
deployment = agent.generate_deployment_yaml(
- token="test_token",
+ key="test-key",
api="test_api",
namespace="test_namespace",
mem_request="mr",
@@ -832,7 +400,7 @@ def test_k8s_agent_generate_deployment_yaml_rbac(monkeypatch, cloud_api):
agent = KubernetesAgent()
deployment = agent.generate_deployment_yaml(
- token="test_token", api="test_api", namespace="test_namespace", rbac=True
+ key="test-key", api="test_api", namespace="test_namespace", rbac=True
)
deployment = yaml.safe_load_all(deployment)
@@ -1309,12 +877,10 @@ def build_flow_run(self, config, storage=None, core_version="0.13.0"):
@pytest.mark.parametrize("run_config", [None, UniversalRun()])
def test_generate_job_spec_null_or_universal_run_config(self, run_config):
- self.agent.generate_job_spec_from_run_config = MagicMock(
- wraps=self.agent.generate_job_spec_from_run_config
- )
+ self.agent.generate_job_spec = MagicMock(wraps=self.agent.generate_job_spec)
flow_run = self.build_flow_run(run_config)
self.agent.generate_job_spec(flow_run)
- assert self.agent.generate_job_spec_from_run_config.called
+ assert self.agent.generate_job_spec.called
def test_generate_job_spec_errors_if_non_kubernetesrun_run_config(self):
with pytest.raises(
@@ -1496,48 +1062,44 @@ def test_generate_job_spec_environment_variables(self, tmpdir, backend):
"CUSTOM4": "VALUE4",
}
- def test_environment_has_agent_token_from_config(self):
- """Check that the API token is passed through from the config via environ"""
+ def test_environment_has_api_key_from_config(self, config_with_api_key):
+ """Check that the API key is passed through from the config via environ"""
flow_run = self.build_flow_run(KubernetesRun())
- with set_temporary_config({"cloud.agent.auth_token": "TEST_TOKEN"}):
- job = KubernetesAgent(
- namespace="testing",
- ).generate_job_spec(flow_run)
+ agent = KubernetesAgent(
+ namespace="testing",
+ )
+ job = agent.generate_job_spec(flow_run)
env_list = job["spec"]["template"]["spec"]["containers"][0]["env"]
env = {item["name"]: item["value"] for item in env_list}
- assert env["PREFECT__CLOUD__AUTH_TOKEN"] == "TEST_TOKEN"
+ assert env["PREFECT__CLOUD__API_KEY"] == "TEST_KEY"
+ assert env["PREFECT__CLOUD__AUTH_TOKEN"] == "TEST_KEY"
+ assert env["PREFECT__CLOUD__TENANT_ID"] == config_with_api_key.cloud.tenant_id
- @pytest.mark.parametrize("tenant_id", ["ID", None])
- def test_environment_has_api_key_from_config(self, tenant_id):
+ def test_environment_has_tenant_id_from_server(self, config_with_api_key):
"""Check that the API key is passed through from the config via environ"""
flow_run = self.build_flow_run(KubernetesRun())
+ tenant_id = uuid.uuid4()
- with set_temporary_config(
- {
- "cloud.api_key": "TEST_KEY",
- "cloud.tenant_id": tenant_id,
- "cloud.agent.auth_token": None,
- }
- ):
- agent = KubernetesAgent(
- namespace="testing",
- )
- agent.client._get_auth_tenant = MagicMock(return_value="ID")
+ with set_temporary_config({"cloud.tenant_id": None}):
+ agent = KubernetesAgent(namespace="testing")
+
+ agent.client._get_auth_tenant = MagicMock(return_value=tenant_id)
job = agent.generate_job_spec(flow_run)
- env_list = job["spec"]["template"]["spec"]["containers"][0]["env"]
- env = {item["name"]: item["value"] for item in env_list}
+ env_list = job["spec"]["template"]["spec"]["containers"][0]["env"]
+ env = {item["name"]: item["value"] for item in env_list}
assert env["PREFECT__CLOUD__API_KEY"] == "TEST_KEY"
assert env["PREFECT__CLOUD__AUTH_TOKEN"] == "TEST_KEY"
- assert env["PREFECT__CLOUD__TENANT_ID"] == "ID"
+ assert env["PREFECT__CLOUD__TENANT_ID"] == tenant_id
- @pytest.mark.parametrize("tenant_id", ["ID", None])
- def test_environment_has_api_key_from_disk(self, monkeypatch, tenant_id):
+ def test_environment_has_api_key_from_disk(self, monkeypatch):
"""Check that the API key is passed through from the on disk cache"""
+ tenant_id = str(uuid.uuid4())
+
monkeypatch.setattr(
"prefect.Client.load_auth_from_disk",
MagicMock(return_value={"api_key": "TEST_KEY", "tenant_id": tenant_id}),
@@ -1547,7 +1109,7 @@ def test_environment_has_api_key_from_disk(self, monkeypatch, tenant_id):
agent = KubernetesAgent(
namespace="testing",
)
- agent.client._get_auth_tenant = MagicMock(return_value="ID")
+ agent.client._get_auth_tenant = MagicMock(return_value=tenant_id)
job = agent.generate_job_spec(flow_run)
env_list = job["spec"]["template"]["spec"]["containers"][0]["env"]
@@ -1555,7 +1117,7 @@ def test_environment_has_api_key_from_disk(self, monkeypatch, tenant_id):
assert env["PREFECT__CLOUD__API_KEY"] == "TEST_KEY"
assert env["PREFECT__CLOUD__AUTH_TOKEN"] == "TEST_KEY"
- assert env["PREFECT__CLOUD__TENANT_ID"] == "ID"
+ assert env["PREFECT__CLOUD__TENANT_ID"] == tenant_id
@pytest.mark.parametrize(
"config, agent_env_vars, run_config_env_vars, expected_logging_level",
diff --git a/tests/agent/test_local_agent.py b/tests/agent/test_local_agent.py
index 9de7958f34fc..66ff37aae4de 100644
--- a/tests/agent/test_local_agent.py
+++ b/tests/agent/test_local_agent.py
@@ -1,6 +1,7 @@
import os
import socket
import sys
+import uuid
from unittest.mock import MagicMock
import pytest
@@ -35,11 +36,8 @@
@pytest.fixture(autouse=True)
-def mock_cloud_config(cloud_api):
- with set_temporary_config(
- {"cloud.agent.auth_token": "TEST_TOKEN", "cloud.send_flow_run_logs": True}
- ):
- yield
+def autouse_api_key_config(config_with_api_key):
+ yield config_with_api_key
def test_local_agent_init():
@@ -54,14 +52,14 @@ def test_local_agent_deduplicates_labels():
assert sorted(agent.labels) == sorted(DEFAULT_AGENT_LABELS)
-def test_local_agent_config_options():
+def test_local_agent_config_options(config_with_api_key):
agent = LocalAgent(
name="test",
labels=["test_label"],
import_paths=["test_path"],
)
assert agent.name == "test"
- assert agent.client.get_auth_token() == "TEST_TOKEN"
+ assert agent.client.api_key == config_with_api_key.cloud.api_key
assert agent.logger
assert agent.log_to_cloud is True
assert agent.processes == set()
@@ -86,7 +84,7 @@ def test_local_agent_uses_ip_if_dockerdesktop_hostname(monkeypatch):
assert "IP" in agent.labels
-def test_populate_env_vars(monkeypatch, backend):
+def test_populate_env_vars(monkeypatch, backend, config_with_api_key):
agent = LocalAgent()
# The python path may be a single item and we want to ensure the correct separator
@@ -102,7 +100,8 @@ def test_populate_env_vars(monkeypatch, backend):
"PYTHONPATH": os.getcwd() + os.pathsep + expected.get("PYTHONPATH", ""),
"PREFECT__BACKEND": backend,
"PREFECT__CLOUD__API": prefect.config.cloud.api,
- "PREFECT__CLOUD__AUTH_TOKEN": "TEST_TOKEN",
+ "PREFECT__CLOUD__API_KEY": config_with_api_key.cloud.api_key,
+ "PREFECT__CLOUD__TENANT_ID": config_with_api_key.cloud.tenant_id,
"PREFECT__CLOUD__AGENT__LABELS": str(DEFAULT_AGENT_LABELS),
"PREFECT__CONTEXT__FLOW_RUN_ID": "id",
"PREFECT__CONTEXT__FLOW_ID": "foo",
@@ -125,51 +124,43 @@ def test_populate_env_vars_sets_log_to_cloud(flag):
assert env_vars["PREFECT__CLOUD__SEND_FLOW_RUN_LOGS"] == str(not flag).lower()
-def test_environment_has_agent_token_from_config():
- """Check that the API token is passed through from the config via environ"""
+def test_environment_has_api_key_from_config(config_with_api_key):
+ """Check that the API key is passed through from the config via environ"""
- with set_temporary_config({"cloud.agent.auth_token": "TEST_TOKEN"}):
- agent = LocalAgent()
- env = agent.populate_env_vars(TEST_FLOW_RUN_DATA)
+ agent = LocalAgent()
+ agent.client._get_auth_tenant = MagicMock(return_value="ID")
+ env = agent.populate_env_vars(TEST_FLOW_RUN_DATA)
- assert env["PREFECT__CLOUD__AUTH_TOKEN"] == "TEST_TOKEN"
+ assert env["PREFECT__CLOUD__API_KEY"] == config_with_api_key.cloud.api_key
+ assert env["PREFECT__CLOUD__TENANT_ID"] == config_with_api_key.cloud.tenant_id
-@pytest.mark.parametrize("tenant_id", ["ID", None])
-def test_environment_has_api_key_from_config(tenant_id):
- """Check that the API key is passed through from the config via environ"""
+def test_environment_has_tenant_id_from_server(config_with_api_key):
+ tenant_id = uuid.uuid4()
- with set_temporary_config(
- {
- "cloud.api_key": "TEST_KEY",
- "cloud.tenant_id": tenant_id,
- "cloud.agent.auth_token": None,
- }
- ):
+ with set_temporary_config({"cloud.tenant_id": None}):
agent = LocalAgent()
- agent.client._get_auth_tenant = MagicMock(return_value="ID")
+ agent.client._get_auth_tenant = MagicMock(return_value=tenant_id)
env = agent.populate_env_vars(TEST_FLOW_RUN_DATA)
- assert env["PREFECT__CLOUD__API_KEY"] == "TEST_KEY"
- assert env["PREFECT__CLOUD__AUTH_TOKEN"] == "TEST_KEY"
- assert env["PREFECT__CLOUD__TENANT_ID"] == "ID"
+ assert env["PREFECT__CLOUD__API_KEY"] == config_with_api_key.cloud.api_key
+ assert env["PREFECT__CLOUD__TENANT_ID"] == tenant_id
-@pytest.mark.parametrize("tenant_id", ["ID", None])
-def test_environment_has_api_key_from_disk(monkeypatch, tenant_id):
+def test_environment_has_api_key_from_disk(monkeypatch):
"""Check that the API key is passed through from the on disk cache"""
+ tenant_id = str(uuid.uuid4())
+
monkeypatch.setattr(
"prefect.Client.load_auth_from_disk",
MagicMock(return_value={"api_key": "TEST_KEY", "tenant_id": tenant_id}),
)
- with set_temporary_config({"cloud.agent.auth_token": None}):
+ with set_temporary_config({"cloud.tenant_id": None}):
agent = LocalAgent()
- agent.client._get_auth_tenant = MagicMock(return_value="ID")
- env = agent.populate_env_vars(TEST_FLOW_RUN_DATA)
+ env = agent.populate_env_vars(TEST_FLOW_RUN_DATA)
assert env["PREFECT__CLOUD__API_KEY"] == "TEST_KEY"
- assert env["PREFECT__CLOUD__AUTH_TOKEN"] == "TEST_KEY"
- assert env["PREFECT__CLOUD__TENANT_ID"] == "ID"
+ assert env["PREFECT__CLOUD__TENANT_ID"] == tenant_id
def test_populate_env_vars_from_agent_config():
@@ -485,23 +476,6 @@ def test_local_agent_deploy_run_config_missing_working_dir(monkeypatch, tmpdir):
assert not agent.processes
-def test_generate_supervisor_conf_with_token():
- # Covers deprecated token based auth
- agent = LocalAgent()
-
- conf = agent.generate_supervisor_conf(
- token="token",
- labels=["label"],
- import_paths=["path"],
- env_vars={"TESTKEY": "TESTVAL"},
- )
-
- assert "-t token" in conf
- assert "-l label" in conf
- assert "-p path" in conf
- assert "-e TESTKEY=TESTVAL" in conf
-
-
def test_generate_supervisor_conf_with_key():
agent = LocalAgent()
@@ -520,20 +494,6 @@ def test_generate_supervisor_conf_with_key():
assert "-e TESTKEY=TESTVAL" in conf
-def test_generate_supervisor_conf_with_token_and_key():
- # Covers deprecated token based auth colliding with key based auth
- agent = LocalAgent()
-
- with pytest.raises(ValueError, match="Given both a API token and API key"):
- agent.generate_supervisor_conf(
- token="token",
- key="key",
- labels=["label"],
- import_paths=["path"],
- env_vars={"TESTKEY": "TESTVAL"},
- )
-
-
@pytest.mark.parametrize(
"returncode,show_flow_logs,logs",
(
diff --git a/tests/agent/test_vertex_agent.py b/tests/agent/test_vertex_agent.py
index f8a9ad956aa4..97c71c46fefb 100644
--- a/tests/agent/test_vertex_agent.py
+++ b/tests/agent/test_vertex_agent.py
@@ -2,6 +2,7 @@
import box
import pytest
+import uuid
pytest.importorskip("google.cloud.aiplatform")
@@ -107,33 +108,29 @@ def test_environment_overrides(self, project, region):
expected["c"] = 2
assert env == expected
- @pytest.mark.parametrize("tenant_id", ["ID", None])
- def test_environment_has_api_key_from_config(self, agent, tenant_id):
- with set_temporary_config(
- {
- "cloud.api_key": "TEST_KEY",
- "cloud.tenant_id": tenant_id,
- "cloud.agent.auth_token": None,
- }
- ):
- run_config = UniversalRun()
- flow_run = graphql_result(run_config)
- env = agent.populate_env_vars(flow_run)
+ def test_environment_has_api_key_from_config(self, agent, config_with_api_key):
+ run_config = UniversalRun()
+ flow_run = graphql_result(run_config)
+ env = agent.populate_env_vars(flow_run)
expected = self.DEFAULT.copy()
- expected["PREFECT__CLOUD__API_KEY"] == "TEST_KEY"
- expected["PREFECT__CLOUD__AUTH_TOKEN"] == "TEST_KEY"
- expected["PREFECT__CLOUD__TENANT_ID"] == "ID"
+ expected["PREFECT__CLOUD__API_KEY"] == config_with_api_key.cloud.api_key
+ expected["PREFECT__CLOUD__AUTH_TOKEN"] == config_with_api_key.cloud.api_key
+ expected["PREFECT__CLOUD__TENANT_ID"] == config_with_api_key.cloud.tenant_id
assert env == expected
- def test_environment_has_agent_token_from_config(self, agent):
- with set_temporary_config({"cloud.agent.auth_token": "TEST_TOKEN"}):
+ def test_environment_has_tenant_id_from_server(self, agent, config_with_api_key):
+ tenant_id = uuid.uuid4()
+
+ with set_temporary_config({"cloud.tenant_id": None}):
run_config = UniversalRun()
flow_run = graphql_result(run_config)
env = agent.populate_env_vars(flow_run)
expected = self.DEFAULT.copy()
- expected["PREFECT__CLOUD__AUTH_TOKEN"] = "TEST_TOKEN"
+ expected["PREFECT__CLOUD__API_KEY"] == config_with_api_key.cloud.api_key
+ expected["PREFECT__CLOUD__AUTH_TOKEN"] == config_with_api_key.cloud.api_key
+ expected["PREFECT__CLOUD__TENANT_ID"] == tenant_id
assert env == expected
@@ -259,7 +256,6 @@ class TestDeployFlow:
"name": "PREFECT__CLOUD__API",
"value": config.cloud.api,
},
- {"name": "PREFECT__CLOUD__AUTH_TOKEN", "value": ""},
{"name": "PREFECT__CLOUD__API_KEY", "value": ""},
{"name": "PREFECT__CLOUD__TENANT_ID", "value": ""},
{"name": "PREFECT__CLOUD__AGENT__LABELS", "value": "[]"},
@@ -285,6 +281,7 @@ class TestDeployFlow:
"value": "prefect.engine.cloud.CloudTaskRunner",
},
{"name": "PREFECT__LOGGING__LOG_TO_CLOUD", "value": "true"},
+ {"name": "PREFECT__CLOUD__AUTH_TOKEN", "value": ""},
],
},
}
diff --git a/tests/backend/test_execution.py b/tests/backend/test_execution.py
index 68cb8369a314..c793b2412c8f 100644
--- a/tests/backend/test_execution.py
+++ b/tests/backend/test_execution.py
@@ -1,5 +1,6 @@
import pendulum
import pytest
+import uuid
import os
import sys
from unittest.mock import MagicMock, call
@@ -21,6 +22,9 @@
from prefect.utilities.configuration import set_temporary_config
+CONFIG_TENANT_ID = str(uuid.uuid4())
+
+
@pytest.fixture()
def cloud_mocks(monkeypatch):
class CloudMocks:
@@ -83,6 +87,7 @@ def test_creates_subprocess_correctly(self, cloud_mocks, mocks, include_local_en
"PREFECT__CLOUD__API": "https://api.prefect.io",
"PREFECT__CLOUD__TENANT_ID": "",
"PREFECT__CLOUD__API_KEY": cloud_mocks.Client().api_key,
+ "PREFECT__CLOUD__AUTH_TOKEN": cloud_mocks.Client().api_key,
"PREFECT__CONTEXT__FLOW_RUN_ID": "flow-run-id",
"PREFECT__CONTEXT__FLOW_ID": cloud_mocks.FlowRunView.from_flow_run_id().flow_id,
"PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS": "prefect.engine.cloud.CloudFlowRunner",
@@ -183,10 +188,10 @@ def test_generate_flow_run_environ():
"cloud.send_flow_run_logs": "CONFIG_SEND_RUN_LOGS",
"backend": "CONFIG_BACKEND",
"cloud.api": "CONFIG_API",
- "cloud.tenant_id": "CONFIG_TENANT_ID",
- # Deprecated tokens are included if available but overriden by `run_api_key`
+ "cloud.tenant_id": CONFIG_TENANT_ID,
+ # Deprecated tokens are ignored _always_ since 1.0.0
"cloud.agent.auth_token": "CONFIG_AUTH_TOKEN",
- "cloud.auth_token": None,
+ "cloud.auth_token": "CONFIG_AUTH_TOKEN",
}
):
result = generate_flow_run_environ(
@@ -223,7 +228,7 @@ def test_generate_flow_run_environ():
"PREFECT__CLOUD__SEND_FLOW_RUN_LOGS": "CONFIG_SEND_RUN_LOGS",
"PREFECT__BACKEND": "CONFIG_BACKEND",
"PREFECT__CLOUD__API": "CONFIG_API",
- "PREFECT__CLOUD__TENANT_ID": "CONFIG_TENANT_ID",
+ "PREFECT__CLOUD__TENANT_ID": CONFIG_TENANT_ID,
# Overridden by run config
"A": "RUN_CONFIG",
"B": "RUN_CONFIG",
diff --git a/tests/cli/test_agent.py b/tests/cli/test_agent.py
index 867d5eb19082..ba578a4adc05 100644
--- a/tests/cli/test_agent.py
+++ b/tests/cli/test_agent.py
@@ -52,7 +52,7 @@ def test_help(cmd):
(
"--base-url testurl --no-pull --show-flow-logs --volume volume1 "
"--volume volume2 --network testnetwork1 --network testnetwork2 "
- "--no-docker-interface --docker-client-timeout 123"
+ "--docker-client-timeout 123"
),
{
"base_url": "testurl",
@@ -60,7 +60,6 @@ def test_help(cmd):
"networks": ("testnetwork1", "testnetwork2"),
"no_pull": True,
"show_flow_logs": True,
- "docker_interface": False,
"docker_client_timeout": 123,
},
),
@@ -122,7 +121,7 @@ def test_agent_start(name, import_path, extra_cmd, extra_kwargs, monkeypatch):
command = [name, "start"]
command.extend(
(
- "--token TEST-TOKEN --api TEST-API --agent-config-id TEST-AGENT-CONFIG-ID "
+ "--key TEST-KEY --api TEST-API --agent-config-id TEST-AGENT-CONFIG-ID "
"--name TEST-NAME -l label1 -l label2 -e KEY1=VALUE1 -e KEY2=VALUE2 "
"-e KEY3=VALUE=WITH=EQUALS --max-polls 10 --agent-address 127.0.0.1:8080"
).split()
@@ -150,7 +149,7 @@ def test_agent_start(name, import_path, extra_cmd, extra_kwargs, monkeypatch):
agent_obj = MagicMock()
def check_config(*args, **kwargs):
- assert prefect.config.cloud.agent.auth_token == "TEST-TOKEN"
+ assert prefect.config.cloud.api_key == "TEST-KEY"
assert prefect.config.cloud.agent.level.upper() == "DEBUG"
assert prefect.config.cloud.api == "TEST-API"
return agent_obj
@@ -162,8 +161,7 @@ def check_config(*args, **kwargs):
result = CliRunner().invoke(agent, command)
- if result.exception:
- raise result.exception
+ assert not result.exception, result.stdout
agent_cls.assert_called_once()
kwargs = agent_cls.call_args[1]
@@ -172,16 +170,11 @@ def check_config(*args, **kwargs):
assert agent_obj.start.called
-@pytest.mark.parametrize("use_token", [False, True])
-def test_agent_local_install(monkeypatch, use_token):
+def test_agent_local_install(monkeypatch):
from prefect.agent.local import LocalAgent
command = ["local", "install"]
- command.extend(
- (
- "--token TEST-TOKEN" if use_token else "--key TEST-KEY --tenant-id TENANT"
- ).split()
- )
+ command.extend(("--key TEST-KEY --tenant-id TENANT").split())
command.extend(
(
"-l label1 -l label2 -e KEY1=VALUE1 -e KEY2=VALUE2 "
@@ -190,7 +183,6 @@ def test_agent_local_install(monkeypatch, use_token):
)
expected_kwargs = {
- "token": None, # These will be set below, toggled on 'use_token'
"key": None,
"tenant_id": None,
"labels": ["label1", "label2"],
@@ -200,11 +192,8 @@ def test_agent_local_install(monkeypatch, use_token):
"agent_config_id": "foo",
}
- if use_token:
- expected_kwargs["token"] = "TEST-TOKEN"
- else:
- expected_kwargs["key"] = "TEST-KEY"
- expected_kwargs["tenant_id"] = "TENANT"
+ expected_kwargs["key"] = "TEST-KEY"
+ expected_kwargs["tenant_id"] = "TENANT"
generate = MagicMock(wraps=LocalAgent.generate_supervisor_conf)
monkeypatch.setattr(
@@ -218,16 +207,11 @@ def test_agent_local_install(monkeypatch, use_token):
assert "supervisord" in result.output
-@pytest.mark.parametrize("use_token", [False, True])
-def test_agent_kubernetes_install(monkeypatch, use_token):
+def test_agent_kubernetes_install(monkeypatch):
from prefect.agent.kubernetes import KubernetesAgent
command = ["kubernetes", "install"]
- command.extend(
- (
- "--token TEST-TOKEN" if use_token else "--key TEST-KEY --tenant-id TENANT"
- ).split()
- )
+ command.extend("--key TEST-KEY --tenant-id TENANT".split())
command.extend(
(
"-l label1 -l label2 -e KEY1=VALUE1 -e KEY2=VALUE2 "
@@ -240,9 +224,8 @@ def test_agent_kubernetes_install(monkeypatch, use_token):
)
expected_kwargs = {
- "token": None, # These will be set below, toggled on 'use_token'
- "key": None,
- "tenant_id": None,
+ "key": "TEST-KEY",
+ "tenant_id": "TENANT",
"labels": ["label1", "label2"],
"env_vars": {"KEY1": "VALUE1", "KEY2": "VALUE2"},
"api": "TEST_API",
@@ -260,11 +243,8 @@ def test_agent_kubernetes_install(monkeypatch, use_token):
"agent_config_id": "foo",
}
- if use_token:
- expected_kwargs["token"] = "TEST-TOKEN"
- else:
- expected_kwargs["key"] = "TEST-KEY"
- expected_kwargs["tenant_id"] = "TENANT"
+ expected_kwargs["key"] = "TEST-KEY"
+ expected_kwargs["tenant_id"] = "TENANT"
generate = MagicMock(wraps=KubernetesAgent.generate_deployment_yaml)
monkeypatch.setattr(
diff --git a/tests/cli/test_auth.py b/tests/cli/test_auth.py
index 9d4567f34276..71f9877fe62d 100644
--- a/tests/cli/test_auth.py
+++ b/tests/cli/test_auth.py
@@ -28,59 +28,16 @@ def test_auth_help():
assert "Handle Prefect Cloud authorization." in result.output
-def test_auth_login_with_token(patch_post, monkeypatch, cloud_api):
- patch_post(
- dict(
- data=dict(
- tenant="id",
- user=[dict(default_membership=dict(tenant_id=str(uuid.uuid4())))],
- )
- )
- )
-
- Client = MagicMock()
-
- # Raise an error during treating token as a key to get to token compat code
- Client()._get_auth_tenant = MagicMock(side_effect=AuthorizationError)
- Client().api_key = None
-
- Client().login_to_tenant = MagicMock(return_value=True)
- monkeypatch.setattr("prefect.cli.auth.Client", Client)
-
- runner = CliRunner()
- result = runner.invoke(auth, ["login", "--token", "test"])
- assert result.exit_code == 0
- assert "Login successful" in result.output
-
-
-def test_auth_login_with_token_key_is_not_allowed(patch_post, monkeypatch, cloud_api):
- Client = MagicMock()
-
- # Raise an error during treating token as a key to get to token compat code
- Client()._get_auth_tenant = MagicMock(side_effect=AuthorizationError)
- Client().api_key = "foo"
- monkeypatch.setattr("prefect.cli.auth.Client", Client)
-
- runner = CliRunner()
- result = runner.invoke(auth, ["login", "--token", "test"])
- assert result.exit_code == 1
- assert (
- "You have already logged in with an API key and cannot use a token"
- in result.output
- )
-
-
def test_auth_login_client_error(patch_post, cloud_api):
patch_post(dict(errors=[dict(error={})]))
runner = CliRunner()
- result = runner.invoke(auth, ["login", "--token", "test"])
+ result = runner.invoke(auth, ["login", "--key", "test"])
assert result.exit_code == 1
assert "Error attempting to communicate with Prefect Cloud" in result.output
-@pytest.mark.parametrize("as_token", [True, False])
-def test_auth_login_with_api_key(patch_post, monkeypatch, cloud_api, as_token):
+def test_auth_login_with_api_key(patch_post, monkeypatch, cloud_api):
Client = MagicMock()
Client()._get_auth_tenant = MagicMock(return_value="tenant-id")
TenantView = MagicMock()
@@ -91,9 +48,7 @@ def test_auth_login_with_api_key(patch_post, monkeypatch, cloud_api, as_token):
monkeypatch.setattr("prefect.cli.auth.Client", Client)
runner = CliRunner()
- # All `--token` args are treated as keys first for easier transition
- arg = "--token" if as_token else "--key"
- result = runner.invoke(auth, ["login", arg, "test"])
+ result = runner.invoke(auth, ["login", "--key", "test"])
assert result.exit_code == 0
assert "Logged in to Prefect Cloud tenant 'Name' (tenant-slug)" in result.output
@@ -137,15 +92,17 @@ def test_auth_logout_after_login(patch_post, monkeypatch, cloud_api):
)
Client = MagicMock()
- # Raise an error during treating token as a key to get to token compat code
- Client()._get_auth_tenant = MagicMock(side_effect=AuthorizationError)
- Client().api_key = None
- Client().login_to_tenant = MagicMock(return_value=True)
+ Client()._get_auth_tenant = MagicMock(return_value="tenant-id")
+ TenantView = MagicMock()
+ TenantView.from_tenant_id.return_value = prefect.backend.TenantView(
+ tenant_id="id", name="Name", slug="tenant-slug"
+ )
+ monkeypatch.setattr("prefect.cli.auth.TenantView", TenantView)
monkeypatch.setattr("prefect.cli.auth.Client", Client)
runner = CliRunner()
- result = runner.invoke(auth, ["login", "--token", "test"])
+ result = runner.invoke(auth, ["login", "--key", "test"])
assert result.exit_code == 0
result = runner.invoke(auth, ["logout"], input="Y")
@@ -172,83 +129,12 @@ def test_auth_logout_not_logged_in(patch_post, cloud_api):
assert "not logged in to Prefect Cloud" in result.output
-def test_auth_logout_api_token_removes_api_token(patch_post, cloud_api):
- patch_post(dict(data=dict(tenant="id")))
-
- client = prefect.Client(api_token="foo")
- client._save_local_settings({"api_token": client._api_token})
-
- runner = CliRunner()
- result = runner.invoke(auth, ["logout"], input="Y")
- assert result.exit_code == 0
- assert "This will remove your API token" in result.output
-
- client = prefect.Client()
- assert "api_token" not in client._load_local_settings()
-
-
-def test_auth_logout_api_token_with_tenant_removes_tenant_id(patch_posts, cloud_api):
- patch_posts(
- [
- # Login to tenant call during setup
- dict(data=dict(tenant=[dict(id=str(uuid.uuid4()))])),
- # Access token retrieval call during setup
- dict(
- data=dict(
- switch_tenant=dict(
- access_token="access-token",
- expires_at=pendulum.now().isoformat(),
- refresh_token="refresh-token",
- )
- )
- ),
- # Login to tenant call during logout
- dict(data=dict(tenant=[dict(id=str(uuid.uuid4()))])),
- # Access token retrieval call during logout
- dict(
- data=dict(
- switch_tenant=dict(
- access_token="access-token",
- expires_at=pendulum.now().isoformat(),
- refresh_token="refresh-token",
- )
- )
- ),
- ]
- )
-
- client = prefect.Client()
- client._save_local_settings(
- {"api_token": "token", "active_tenant_id": str(uuid.uuid4())}
- )
-
- runner = CliRunner()
- result = runner.invoke(auth, ["logout"], input="Y")
-
- assert result.exit_code == 0
-
- settings = client._load_local_settings()
-
- # Does not remove the API token
- assert "This will remove your API token" not in result.output
- assert "api_token" in settings
-
- # Removes the tenant id
- assert "Logged out from tenant" in result.output
- assert "active_tenant_id" not in settings
-
-
def test_list_tenants(patch_post, cloud_api):
patch_post(
dict(
data=dict(
auth_info={"tenant_id": "id"},
tenant=[{"id": "id", "slug": "slug", "name": "name"}],
- switch_tenant={
- "access_token": "access_token",
- "expires_in": "expires_in",
- "refresh_token": "refresh_token",
- },
)
)
)
@@ -267,87 +153,19 @@ def test_switch_tenants_success(monkeypatch, cloud_api):
runner = CliRunner()
result = runner.invoke(auth, ["switch-tenants", "--slug", "slug"])
- assert result.exit_code == 0
+ assert result.exit_code == 0, result.output
assert "Tenant switched" in result.output
def test_switch_tenants_failed(monkeypatch, cloud_api):
client = MagicMock()
- client.return_value.login_to_tenant = MagicMock(return_value=False)
+ client.return_value.switch_tenant = MagicMock(side_effect=AuthorizationError())
monkeypatch.setattr("prefect.cli.auth.Client", client)
runner = CliRunner()
result = runner.invoke(auth, ["switch-tenants", "--slug", "slug"])
assert result.exit_code == 1
- assert "Unable to switch tenant" in result.output
-
-
-def test_create_token(patch_post, cloud_api):
- patch_post(dict(data=dict(create_api_token={"token": "token"})))
-
- runner = CliRunner()
- result = runner.invoke(auth, ["create-token", "-n", "name", "-s", "scope"])
- assert result.exit_code == 0
- assert "token" in result.output
-
-
-def test_create_token_fails(patch_post, cloud_api):
- patch_post(dict())
-
- runner = CliRunner()
- result = runner.invoke(auth, ["create-token", "-n", "name", "-s", "scope"])
- assert result.exit_code == 0
- assert "Issue creating API token" in result.output
-
-
-def test_list_tokens(patch_post, cloud_api):
- patch_post(dict(data=dict(api_token=[{"id": "id", "name": "name"}])))
-
- runner = CliRunner()
- result = runner.invoke(auth, ["list-tokens"])
- assert result.exit_code == 0
- assert "id" in result.output
- assert "name" in result.output
-
-
-def test_list_tokens_fails(patch_post, cloud_api):
- patch_post(dict())
-
- runner = CliRunner()
- result = runner.invoke(auth, ["list-tokens"])
- assert result.exit_code == 0
- assert "Unable to list API tokens" in result.output
-
-
-def test_revoke_token(patch_post, cloud_api):
- patch_post(dict(data=dict(delete_api_token={"success": True})))
-
- runner = CliRunner()
- result = runner.invoke(auth, ["revoke-token", "--id", "id"])
- assert result.exit_code == 0
- assert "Token successfully revoked" in result.output
-
-
-def test_revoke_token_fails(patch_post, cloud_api):
- patch_post(dict())
-
- runner = CliRunner()
- result = runner.invoke(auth, ["revoke-token", "--id", "id"])
- assert result.exit_code == 0
- assert "Unable to revoke token with ID id" in result.output
-
-
-def test_check_override_function():
- with set_temporary_config({"cloud.auth_token": "TOKEN"}):
- with pytest.raises(click.exceptions.Abort):
- prefect.cli.auth.check_override_auth_token()
-
-
-def test_override_functions_on_commands(cloud_api):
- with set_temporary_config({"cloud.auth_token": "TOKEN"}):
- runner = CliRunner()
- result = runner.invoke(auth, ["revoke-token", "--id", "id"])
- assert result.exit_code == 1
+ assert "Unauthorized. Your API key is not valid for that tenant" in result.output
@pytest.mark.parametrize(
diff --git a/tests/cli/test_build_register.py b/tests/cli/test_build_register.py
index 08eab38d1ffa..1e8fffab1e84 100644
--- a/tests/cli/test_build_register.py
+++ b/tests/cli/test_build_register.py
@@ -23,95 +23,11 @@
expand_paths,
)
from prefect.engine.results import LocalResult
-from prefect.environments.execution import LocalEnvironment
from prefect.run_configs import UniversalRun
from prefect.storage import S3, Local, Module
from prefect.utilities.graphql import GraphQLResult
-def test_register_flow_help():
- runner = CliRunner()
- result = runner.invoke(cli, ["register", "flow", "--help"])
- assert result.exit_code == 0
- assert "Register a flow" in result.output
-
-
-@pytest.mark.parametrize("labels", [[], ["b", "c"]])
-@pytest.mark.parametrize("kind", ["run_config", "environment", "neither"])
-def test_register_flow_call(monkeypatch, tmpdir, kind, labels):
- client = MagicMock()
- monkeypatch.setattr("prefect.Client", MagicMock(return_value=client))
-
- if kind == "environment":
- contents = (
- "from prefect import Flow\n"
- "from prefect.environments.execution import LocalEnvironment\n"
- "from prefect.storage import Local\n"
- "f = Flow('test-flow', environment=LocalEnvironment(labels=['a']),\n"
- " storage=Local(add_default_labels=False))"
- )
- elif kind == "run_config":
- contents = (
- "from prefect import Flow\n"
- "from prefect.run_configs import KubernetesRun\n"
- "from prefect.storage import Local\n"
- "f = Flow('test-flow', run_config=KubernetesRun(labels=['a']),\n"
- " storage=Local(add_default_labels=False))"
- )
- else:
- contents = (
- "from prefect import Flow\n"
- "from prefect.storage import Local\n"
- "f = Flow('test-flow', storage=Local(add_default_labels=False))"
- )
-
- full_path = str(tmpdir.join("flow.py"))
- with open(full_path, "w") as f:
- f.write(contents)
-
- args = [
- "register",
- "flow",
- "--file",
- full_path,
- "--name",
- "test-flow",
- "--project",
- "project",
- "--skip-if-flow-metadata-unchanged",
- ]
- for l in labels:
- args.extend(["-l", l])
-
- runner = CliRunner()
- result = runner.invoke(cli, args)
- assert client.register.called
- assert client.register.call_args[1]["project_name"] == "project"
- assert client.register.call_args[1]["idempotency_key"] is not None
-
- # Check additional labels are set if specified
- flow = client.register.call_args[1]["flow"]
- if kind == "run_config":
- assert flow.run_config.labels == {"a", *labels}
- elif kind == "environment":
- assert flow.environment.labels == {"a", *labels}
- else:
- assert flow.run_config.labels == {*labels}
-
- assert "Warning: `prefect register flow` is deprecated" in result.stdout
- assert result.exit_code == 0
-
-
-def register_flow_errors_if_pass_options_to_register_group():
- """Since we deprecated a subcommand, we need to manually check that
- subcommand options are valid"""
- result = CliRunner().invoke(
- cli, ["register", "--project", "my-project", "flow", "--file", "some_path.py"]
- )
- assert result.exit_code == 1
- assert "Got unexpected extra argument (flow)" in result.stdout
-
-
def test_expand_paths_glob(tmpdir):
glob_path = str(tmpdir.join("**").join("*.py"))
@@ -531,7 +447,7 @@ def build(self):
flow2 = Flow(
"flow 2",
storage=MyModule("testing"),
- environment=LocalEnvironment(labels=["a"]),
+ run_config=UniversalRun(labels=["a"]),
)
storage2 = MyModule("testing")
flow3 = Flow("flow 3", storage=storage2)
@@ -543,9 +459,7 @@ def build(self):
Flow("flow 7", run_config=UniversalRun(labels=["a"])).serialize(build=False)
)
flow8 = box.Box(
- Flow("flow 8", environment=LocalEnvironment(labels=["a"])).serialize(
- build=False
- )
+ Flow("flow 8", run_config=UniversalRun(labels=["a"])).serialize(build=False)
)
flows = [flow1, flow2, flow3, flow4, flow5, flow6, flow7, flow8]
@@ -571,13 +485,13 @@ def build(self):
# Flows are properly configured
assert flow1.result is storage1.result
assert flow1.run_config.labels == {"a", "b", "c"}
- assert flow2.environment.labels == {"a", "b", "c"}
+ assert flow2.run_config.labels == {"a", "b", "c"}
assert isinstance(flow3.run_config, UniversalRun)
assert flow3.run_config.labels == {"b", "c"}
assert isinstance(flow4.run_config, UniversalRun)
assert flow4.run_config.labels == {"b", "c"}
assert set(flow7["run_config"]["labels"]) == {"a", "b", "c"}
- assert set(flow8["environment"]["labels"]) == {"a", "b", "c"}
+ assert set(flow8["run_config"]["labels"]) == {"a", "b", "c"}
# The output contains a traceback, which will vary between machines
# We only check that the following fixed sections exist in the output
diff --git a/tests/cli/test_get.py b/tests/cli/test_get.py
index 93edc3be0f6b..7d8249c2f461 100644
--- a/tests/cli/test_get.py
+++ b/tests/cli/test_get.py
@@ -31,33 +31,32 @@ def test_get_flows_cloud(monkeypatch, cloud_api):
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
- with set_temporary_config({"cloud.auth_token": "secret_token"}):
- runner = CliRunner()
- result = runner.invoke(get, ["flows"])
- assert result.exit_code == 0
- assert (
- "NAME" in result.output
- and "VERSION" in result.output
- and "AGE" in result.output
- and "PROJECT NAME" in result.output
- )
+ runner = CliRunner()
+ result = runner.invoke(get, ["flows"])
+ assert result.exit_code == 0
+ assert (
+ "NAME" in result.output
+ and "VERSION" in result.output
+ and "AGE" in result.output
+ and "PROJECT NAME" in result.output
+ )
- query = """
- query {
- flow(where: { _and: { name: { _eq: null }, version: { _eq: null }, project: { name: { _eq: null } } } }, order_by: { name: asc, version: desc }, distinct_on: name, limit: 10) {
+ query = """
+ query {
+ flow(where: { _and: { name: { _eq: null }, version: { _eq: null }, project: { name: { _eq: null } } } }, order_by: { name: asc, version: desc }, distinct_on: name, limit: 10) {
+ name
+ version
+ created
+ id
+ project {
name
- version
- created
- id
- project {
- name
- }
}
}
- """
+ }
+ """
- assert post.called
- assert post.call_args[1]["json"]["query"].split() == query.split()
+ assert post.called
+ assert post.call_args[1]["json"]["query"].split() == query.split()
def test_get_flows_populated(monkeypatch, cloud_api):
@@ -68,41 +67,40 @@ def test_get_flows_populated(monkeypatch, cloud_api):
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
- with set_temporary_config({"cloud.auth_token": "secret_token"}):
- runner = CliRunner()
- result = runner.invoke(
- get,
- [
- "flows",
- "--name",
- "name",
- "--version",
- "2",
- "--project",
- "project",
- "--limit",
- "100",
- "--all-versions",
- ],
- )
- assert result.exit_code == 0
+ runner = CliRunner()
+ result = runner.invoke(
+ get,
+ [
+ "flows",
+ "--name",
+ "name",
+ "--version",
+ "2",
+ "--project",
+ "project",
+ "--limit",
+ "100",
+ "--all-versions",
+ ],
+ )
+ assert result.exit_code == 0
- query = """
- query {
- flow(where: { _and: { name: { _eq: "name" }, version: { _eq: 2 }, project: { name: { _eq: "project" } } } }, order_by: { name: asc, version: desc }, distinct_on: null, limit: 100) {
+ query = """
+ query {
+ flow(where: { _and: { name: { _eq: "name" }, version: { _eq: 2 }, project: { name: { _eq: "project" } } } }, order_by: { name: asc, version: desc }, distinct_on: null, limit: 100) {
+ name
+ version
+ created
+ id
+ project {
name
- version
- created
- id
- project {
- name
- }
}
}
- """
+ }
+ """
- assert post.called
- assert post.call_args[1]["json"]["query"].split() == query.split()
+ assert post.called
+ assert post.call_args[1]["json"]["query"].split() == query.split()
def test_get_projects(monkeypatch, cloud_api):
@@ -113,34 +111,33 @@ def test_get_projects(monkeypatch, cloud_api):
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
- with set_temporary_config({"cloud.auth_token": "secret_token"}):
- runner = CliRunner()
- result = runner.invoke(get, ["projects"])
- assert result.exit_code == 0
- assert (
- "NAME" in result.output
- and "FLOW COUNT" in result.output
- and "AGE" in result.output
- and "DESCRIPTION" in result.output
- )
+ runner = CliRunner()
+ result = runner.invoke(get, ["projects"])
+ assert result.exit_code == 0
+ assert (
+ "NAME" in result.output
+ and "FLOW COUNT" in result.output
+ and "AGE" in result.output
+ and "DESCRIPTION" in result.output
+ )
- query = """
- query {
- project(where: { _and: { name: { _eq: null } } }, order_by: { name: asc }) {
- name
- created
- description
- flows_aggregate(distinct_on: name) {
- aggregate {
- count
- }
+ query = """
+ query {
+ project(where: { _and: { name: { _eq: null } } }, order_by: { name: asc }) {
+ name
+ created
+ description
+ flows_aggregate(distinct_on: name) {
+ aggregate {
+ count
}
}
}
- """
+ }
+ """
- assert post.called
- assert post.call_args[1]["json"]["query"].split() == query.split()
+ assert post.called
+ assert post.call_args[1]["json"]["query"].split() == query.split()
def test_get_projects_populated(monkeypatch, cloud_api):
@@ -151,28 +148,27 @@ def test_get_projects_populated(monkeypatch, cloud_api):
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
- with set_temporary_config({"cloud.auth_token": "secret_token"}):
- runner = CliRunner()
- result = runner.invoke(get, ["projects", "--name", "name"])
- assert result.exit_code == 0
+ runner = CliRunner()
+ result = runner.invoke(get, ["projects", "--name", "name"])
+ assert result.exit_code == 0
- query = """
- query {
- project(where: { _and: { name: { _eq: "name" } } }, order_by: { name: asc }) {
- name
- created
- description
- flows_aggregate(distinct_on: name) {
- aggregate {
- count
- }
+ query = """
+ query {
+ project(where: { _and: { name: { _eq: "name" } } }, order_by: { name: asc }) {
+ name
+ created
+ description
+ flows_aggregate(distinct_on: name) {
+ aggregate {
+ count
}
}
}
- """
+ }
+ """
- assert post.called
- assert post.call_args[1]["json"]["query"].split() == query.split()
+ assert post.called
+ assert post.call_args[1]["json"]["query"].split() == query.split()
def test_get_flow_runs_cloud(monkeypatch, cloud_api):
@@ -185,35 +181,34 @@ def test_get_flow_runs_cloud(monkeypatch, cloud_api):
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
- with set_temporary_config({"cloud.auth_token": "secret_token"}):
- runner = CliRunner()
- result = runner.invoke(get, ["flow-runs"])
- assert result.exit_code == 0
- assert (
- "NAME" in result.output
- and "FLOW NAME" in result.output
- and "STATE" in result.output
- and "AGE" in result.output
- and "START TIME" in result.output
- )
+ runner = CliRunner()
+ result = runner.invoke(get, ["flow-runs"])
+ assert result.exit_code == 0
+ assert (
+ "NAME" in result.output
+ and "FLOW NAME" in result.output
+ and "STATE" in result.output
+ and "AGE" in result.output
+ and "START TIME" in result.output
+ )
- query = """
- query {
- flow_run(where: { flow: { _and: { name: { _eq: null }, project: { name: { _eq: null } } } } }, limit: 10, order_by: { created: desc }) {
- flow {
- name
- }
- id
- created
- state
+ query = """
+ query {
+ flow_run(where: { flow: { _and: { name: { _eq: null }, project: { name: { _eq: null } } } } }, limit: 10, order_by: { created: desc }) {
+ flow {
name
- start_time
}
+ id
+ created
+ state
+ name
+ start_time
}
- """
+ }
+ """
- assert post.called
- assert post.call_args[1]["json"]["query"].split() == query.split()
+ assert post.called
+ assert post.call_args[1]["json"]["query"].split() == query.split()
def test_get_flow_runs_populated(monkeypatch, cloud_api):
@@ -226,40 +221,39 @@ def test_get_flow_runs_populated(monkeypatch, cloud_api):
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
- with set_temporary_config({"cloud.auth_token": "secret_token"}):
- runner = CliRunner()
- result = runner.invoke(
- get,
- [
- "flow-runs",
- "--limit",
- "100",
- "--flow",
- "flow",
- "--project",
- "project",
- "--started",
- ],
- )
- assert result.exit_code == 0
+ runner = CliRunner()
+ result = runner.invoke(
+ get,
+ [
+ "flow-runs",
+ "--limit",
+ "100",
+ "--flow",
+ "flow",
+ "--project",
+ "project",
+ "--started",
+ ],
+ )
+ assert result.exit_code == 0
- query = """
- query {
- flow_run(where: { _and: { flow: { _and: { name: { _eq: "flow" }, project: { name: { _eq: "project" } } } }, start_time: { _is_null: false } } }, limit: 100, order_by: { start_time: desc }) {
- flow {
- name
- }
- id
- created
- state
+ query = """
+ query {
+ flow_run(where: { _and: { flow: { _and: { name: { _eq: "flow" }, project: { name: { _eq: "project" } } } }, start_time: { _is_null: false } } }, limit: 100, order_by: { start_time: desc }) {
+ flow {
name
- start_time
}
+ id
+ created
+ state
+ name
+ start_time
}
- """
+ }
+ """
- assert post.called
- assert post.call_args[1]["json"]["query"].split() == query.split()
+ assert post.called
+ assert post.call_args[1]["json"]["query"].split() == query.split()
def test_get_tasks_cloud(monkeypatch, cloud_api):
@@ -270,36 +264,35 @@ def test_get_tasks_cloud(monkeypatch, cloud_api):
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
- with set_temporary_config({"cloud.auth_token": "secret_token"}):
- runner = CliRunner()
- result = runner.invoke(get, ["tasks"])
- assert result.exit_code == 0
- assert (
- "NAME" in result.output
- and "FLOW NAME" in result.output
- and "FLOW VERSION" in result.output
- and "AGE" in result.output
- and "MAPPED" in result.output
- and "TYPE" in result.output
- )
+ runner = CliRunner()
+ result = runner.invoke(get, ["tasks"])
+ assert result.exit_code == 0
+ assert (
+ "NAME" in result.output
+ and "FLOW NAME" in result.output
+ and "FLOW VERSION" in result.output
+ and "AGE" in result.output
+ and "MAPPED" in result.output
+ and "TYPE" in result.output
+ )
- query = """
- query {
- task(where: { _and: { name: { _eq: null }, flow: { name: { _eq: null }, version: { _eq: null }, project: { name: { _eq: null } } } } }, limit: 10, order_by: { created: desc }) {
+ query = """
+ query {
+ task(where: { _and: { name: { _eq: null }, flow: { name: { _eq: null }, version: { _eq: null }, project: { name: { _eq: null } } } } }, limit: 10, order_by: { created: desc }) {
+ name
+ created
+ flow {
name
- created
- flow {
- name
- version
- }
- mapped
- type
+ version
}
+ mapped
+ type
}
- """
+ }
+ """
- assert post.called
- assert post.call_args[1]["json"]["query"].split() == query.split()
+ assert post.called
+ assert post.call_args[1]["json"]["query"].split() == query.split()
def test_get_tasks_populated(monkeypatch, cloud_api):
@@ -310,43 +303,42 @@ def test_get_tasks_populated(monkeypatch, cloud_api):
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
- with set_temporary_config({"cloud.auth_token": "secret_token"}):
- runner = CliRunner()
- result = runner.invoke(
- get,
- [
- "tasks",
- "--name",
- "task",
- "--flow-name",
- "flow",
- "--flow-version",
- "2",
- "--project",
- "project",
- "--limit",
- "100",
- ],
- )
- assert result.exit_code == 0
+ runner = CliRunner()
+ result = runner.invoke(
+ get,
+ [
+ "tasks",
+ "--name",
+ "task",
+ "--flow-name",
+ "flow",
+ "--flow-version",
+ "2",
+ "--project",
+ "project",
+ "--limit",
+ "100",
+ ],
+ )
+ assert result.exit_code == 0
- query = """
- query {
- task(where: { _and: { name: { _eq: "task" }, flow: { name: { _eq: "flow" }, version: { _eq: 2 }, project: { name: { _eq: "project" } } } } }, limit: 100, order_by: { created: desc }) {
+ query = """
+ query {
+ task(where: { _and: { name: { _eq: "task" }, flow: { name: { _eq: "flow" }, version: { _eq: 2 }, project: { name: { _eq: "project" } } } } }, limit: 100, order_by: { created: desc }) {
+ name
+ created
+ flow {
name
- created
- flow {
- name
- version
- }
- mapped
- type
+ version
}
+ mapped
+ type
}
- """
+ }
+ """
- assert post.called
- assert post.call_args[1]["json"]["query"].split() == query.split()
+ assert post.called
+ assert post.call_args[1]["json"]["query"].split() == query.split()
def test_get_logs(monkeypatch, cloud_api):
@@ -375,32 +367,31 @@ def test_get_logs(monkeypatch, cloud_api):
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
- with set_temporary_config({"cloud.auth_token": "secret_token"}):
- runner = CliRunner()
- result = runner.invoke(get, ["logs", "--name", "flow_run"])
- assert result.exit_code == 0
- assert (
- "TIMESTAMP" in result.output
- and "LEVEL" in result.output
- and "MESSAGE" in result.output
- and "level" in result.output
- )
+ runner = CliRunner()
+ result = runner.invoke(get, ["logs", "--name", "flow_run"])
+ assert result.exit_code == 0
+ assert (
+ "TIMESTAMP" in result.output
+ and "LEVEL" in result.output
+ and "MESSAGE" in result.output
+ and "level" in result.output
+ )
- query = """
- query {
- flow_run(where: { name: { _eq: "flow_run" }, id: { _eq: null } }, order_by: { start_time: desc }) {
- logs(order_by: { timestamp: asc }) {
- timestamp
- message
- level
- }
- start_time
+ query = """
+ query {
+ flow_run(where: { name: { _eq: "flow_run" }, id: { _eq: null } }, order_by: { start_time: desc }) {
+ logs(order_by: { timestamp: asc }) {
+ timestamp
+ message
+ level
}
+ start_time
}
- """
+ }
+ """
- assert post.called
- assert post.call_args[1]["json"]["query"].split() == query.split()
+ assert post.called
+ assert post.call_args[1]["json"]["query"].split() == query.split()
def test_get_logs_info(monkeypatch, cloud_api):
@@ -415,26 +406,25 @@ def test_get_logs_info(monkeypatch, cloud_api):
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
- with set_temporary_config({"cloud.auth_token": "secret_token"}):
- runner = CliRunner()
- result = runner.invoke(get, ["logs", "--name", "flow_run", "--info"])
- assert result.exit_code == 0
- assert "OUTPUT" in result.output
-
- query = """
- query {
- flow_run(where: { name: { _eq: "flow_run" }, id: { _eq: null } }, order_by: { start_time: desc }) {
- logs(order_by: { timestamp: asc }) {
- timestamp
- info
- }
- start_time
+ runner = CliRunner()
+ result = runner.invoke(get, ["logs", "--name", "flow_run", "--info"])
+ assert result.exit_code == 0
+ assert "OUTPUT" in result.output
+
+ query = """
+ query {
+ flow_run(where: { name: { _eq: "flow_run" }, id: { _eq: null } }, order_by: { start_time: desc }) {
+ logs(order_by: { timestamp: asc }) {
+ timestamp
+ info
}
+ start_time
}
- """
+ }
+ """
- assert post.called
- assert post.call_args[1]["json"]["query"].split() == query.split()
+ assert post.called
+ assert post.call_args[1]["json"]["query"].split() == query.split()
def test_get_logs_fails(monkeypatch, cloud_api):
@@ -447,11 +437,10 @@ def test_get_logs_fails(monkeypatch, cloud_api):
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
- with set_temporary_config({"cloud.auth_token": "secret_token"}):
- runner = CliRunner()
- result = runner.invoke(get, ["logs", "--name", "flow_run"])
- assert result.exit_code == 0
- assert "flow_run not found" in result.output
+ runner = CliRunner()
+ result = runner.invoke(get, ["logs", "--name", "flow_run"])
+ assert result.exit_code == 0
+ assert "flow_run not found" in result.output
def test_get_logs_by_id(monkeypatch, cloud_api):
@@ -480,32 +469,31 @@ def test_get_logs_by_id(monkeypatch, cloud_api):
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
- with set_temporary_config({"cloud.auth_token": "secret_token"}):
- runner = CliRunner()
- result = runner.invoke(get, ["logs", "--id", "id"])
- assert result.exit_code == 0
- assert (
- "TIMESTAMP" in result.output
- and "LEVEL" in result.output
- and "MESSAGE" in result.output
- and "level" in result.output
- )
+ runner = CliRunner()
+ result = runner.invoke(get, ["logs", "--id", "id"])
+ assert result.exit_code == 0
+ assert (
+ "TIMESTAMP" in result.output
+ and "LEVEL" in result.output
+ and "MESSAGE" in result.output
+ and "level" in result.output
+ )
- query = """
- query {
- flow_run(where: { name: { _eq: null }, id: { _eq: "id" } }, order_by: { start_time: desc }) {
- logs(order_by: { timestamp: asc }) {
- timestamp
- message
- level
- }
- start_time
+ query = """
+ query {
+ flow_run(where: { name: { _eq: null }, id: { _eq: "id" } }, order_by: { start_time: desc }) {
+ logs(order_by: { timestamp: asc }) {
+ timestamp
+ message
+ level
}
+ start_time
}
- """
+ }
+ """
- assert post.called
- assert post.call_args[1]["json"]["query"].split() == query.split()
+ assert post.called
+ assert post.call_args[1]["json"]["query"].split() == query.split()
def test_get_logs_fails_no_name_or_id(monkeypatch, cloud_api):
@@ -518,8 +506,7 @@ def test_get_logs_fails_no_name_or_id(monkeypatch, cloud_api):
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
- with set_temporary_config({"cloud.auth_token": "secret_token"}):
- runner = CliRunner()
- result = runner.invoke(get, ["logs"])
- assert result.exit_code == 0
- assert "must be provided" in result.output
+ runner = CliRunner()
+ result = runner.invoke(get, ["logs"])
+ assert result.exit_code == 0
+ assert "must be provided" in result.output
diff --git a/tests/cli/test_run_flow_old.py b/tests/cli/test_run_flow_old.py
deleted file mode 100644
index 3ec05098fe55..000000000000
--- a/tests/cli/test_run_flow_old.py
+++ /dev/null
@@ -1,619 +0,0 @@
-"""
-This file contains tests for the deprecated command `prefect run flow`
-This command is replaced by `prefect run` which has tests at `test_run.py`
-"""
-import json
-import os
-import re
-import tempfile
-from unittest.mock import MagicMock
-
-import pytest
-from click.testing import CliRunner
-from prefect.cli.run import run
-from prefect.utilities.configuration import set_temporary_config
-
-
-def test_run_help():
- runner = CliRunner()
- result = runner.invoke(run, ["flow", "--help"])
- assert result.exit_code == 0
- assert "Usage: run flow" in result.output
-
-
-def test_run_flow(monkeypatch, cloud_api):
- post = MagicMock(
- return_value=MagicMock(
- json=MagicMock(return_value=dict(data=dict(flow=[{"id": "flow"}])))
- )
- )
- session = MagicMock()
- session.return_value.post = post
- monkeypatch.setattr("requests.Session", session)
-
- monkeypatch.setattr(
- "prefect.client.Client.create_flow_run", MagicMock(return_value="id")
- )
- monkeypatch.setattr(
- "prefect.client.Client.get_default_tenant_slug", MagicMock(return_value="tslug")
- )
-
- runner = CliRunner()
- result = runner.invoke(
- run, ["flow", "--name", "flow", "--project", "project", "--version", "2"]
- )
- assert result.exit_code == 0
- assert "Flow Run" in result.output
-
- query = """
- query {
- flow(where: { _and: { name: { _eq: "flow" }, version: { _eq: 2 }, project: { name: { _eq: "project" } } } }, order_by: { name: asc, version: desc }, distinct_on: name) {
- id
- }
- }
- """
-
- assert post.called
- assert post.call_args[1]["json"]["query"].split() == query.split()
-
-
-def test_run_flow_watch(monkeypatch, cloud_api):
- post = MagicMock(
- return_value=MagicMock(
- json=MagicMock(
- return_value=dict(
- data=dict(
- flow=[{"id": "flow"}],
- flow_run_by_pk=dict(
- states=[
- {"state": "Running", "timestamp": None},
- {"state": "Success", "timestamp": None},
- ]
- ),
- )
- )
- )
- )
- )
- session = MagicMock()
- session.return_value.post = post
- monkeypatch.setattr("requests.Session", session)
-
- monkeypatch.setattr(
- "prefect.client.Client.create_flow_run", MagicMock(return_value="id")
- )
- monkeypatch.setattr(
- "prefect.client.Client.get_default_tenant_slug", MagicMock(return_value="tslug")
- )
-
- runner = CliRunner()
- result = runner.invoke(
- run,
- [
- "flow",
- "--name",
- "flow",
- "--project",
- "project",
- "--version",
- "2",
- "--watch",
- ],
- )
- assert result.exit_code == 0
- assert "Running" in result.output
- assert "Success" in result.output
- assert post.called
-
-
-def test_run_flow_logs(monkeypatch, cloud_api):
- post = MagicMock(
- return_value=MagicMock(
- json=MagicMock(
- return_value=dict(
- data=dict(
- flow=[{"id": "flow"}],
- flow_run=[
- {
- "logs": [
- {
- "timestamp": "test_timestamp",
- "message": "test_message",
- "level": "test_level",
- }
- ],
- "state": "Success",
- }
- ],
- )
- )
- )
- )
- )
- session = MagicMock()
- session.return_value.post = post
- monkeypatch.setattr("requests.Session", session)
-
- monkeypatch.setattr(
- "prefect.client.Client.create_flow_run", MagicMock(return_value="id")
- )
- monkeypatch.setattr(
- "prefect.client.Client.get_default_tenant_slug", MagicMock(return_value="tslug")
- )
-
- runner = CliRunner()
- result = runner.invoke(
- run,
- ["flow", "--name", "flow", "--project", "project", "--version", "2", "--logs"],
- )
- assert result.exit_code == 0
- assert "test_timestamp" in result.output
- assert "test_message" in result.output
- assert "test_level" in result.output
- assert post.called
-
-
-def test_run_flow_fails(monkeypatch, cloud_api):
- post = MagicMock(
- return_value=MagicMock(json=MagicMock(return_value=dict(data=dict(flow=[]))))
- )
- session = MagicMock()
- session.return_value.post = post
- monkeypatch.setattr("requests.Session", session)
-
- runner = CliRunner()
- result = runner.invoke(
- run, ["flow", "--name", "flow", "--project", "project", "--version", "2"]
- )
- assert result.exit_code == 0
- assert "flow not found" in result.output
-
-
-def test_run_flow_no_param_file(monkeypatch, cloud_api):
- runner = CliRunner()
- result = runner.invoke(
- run,
- [
- "flow",
- "--name",
- "flow",
- "--project",
- "project",
- "--version",
- "2",
- "--parameters-file",
- "no_file.json",
- ],
- )
- assert result.exit_code == 2
- # note: click changed the output format for errors between 7.0 & 7.1, this test should be agnostic to which click version is used.
- # ensure message ~= Invalid value for "--parameters-file" / "-pf": Path "no_file.json" does not exist
- assert re.search(
- r"Invalid value for [\"']--parameters-file", result.output, re.MULTILINE
- )
- assert re.search(
- r"Path [\"']no_file.json[\"'] does not exist", result.output, re.MULTILINE
- )
-
-
-def test_run_flow_param_file(monkeypatch, cloud_api):
- post = MagicMock(
- return_value=MagicMock(
- json=MagicMock(return_value=dict(data=dict(flow=[{"id": "flow"}])))
- )
- )
- session = MagicMock()
- session.return_value.post = post
- monkeypatch.setattr("requests.Session", session)
-
- create_flow_run_mock = MagicMock(return_value="id")
- monkeypatch.setattr("prefect.client.Client.create_flow_run", create_flow_run_mock)
- monkeypatch.setattr(
- "prefect.client.Client.get_default_tenant_slug", MagicMock(return_value="tslug")
- )
-
- with tempfile.TemporaryDirectory() as directory:
- file_path = os.path.join(directory, "file.json")
- with open(file_path, "w") as tmp:
- json.dump({"test": 42}, tmp)
-
- runner = CliRunner()
- result = runner.invoke(
- run,
- [
- "flow",
- "--name",
- "flow",
- "--project",
- "project",
- "--version",
- "2",
- "--parameters-file",
- file_path,
- ],
- )
- assert result.exit_code == 0
- assert "Flow Run" in result.output
- assert create_flow_run_mock.called
- assert create_flow_run_mock.call_args[1]["parameters"] == {"test": 42}
-
-
-def test_run_flow_no_labels_provided(monkeypatch, cloud_api):
- post = MagicMock(
- return_value=MagicMock(
- json=MagicMock(return_value=dict(data=dict(flow=[{"id": "flow"}])))
- )
- )
- session = MagicMock()
- session.return_value.post = post
- monkeypatch.setattr("requests.Session", session)
-
- create_flow_run_mock = MagicMock(return_value="id")
- monkeypatch.setattr("prefect.client.Client.create_flow_run", create_flow_run_mock)
- monkeypatch.setattr(
- "prefect.client.Client.get_default_tenant_slug", MagicMock(return_value="tslug")
- )
-
- runner = CliRunner()
- result = runner.invoke(
- run,
- [
- "flow",
- "--name",
- "flow",
- "--project",
- "project",
- ],
- )
- assert result.exit_code == 0
- assert "Flow Run" in result.output
- assert create_flow_run_mock.called
- assert create_flow_run_mock.call_args[1]["labels"] is None
-
-
-def test_run_flow_param_string(monkeypatch, cloud_api):
- post = MagicMock(
- return_value=MagicMock(
- json=MagicMock(return_value=dict(data=dict(flow=[{"id": "flow"}])))
- )
- )
- session = MagicMock()
- session.return_value.post = post
- monkeypatch.setattr("requests.Session", session)
-
- create_flow_run_mock = MagicMock(return_value="id")
- monkeypatch.setattr("prefect.client.Client.create_flow_run", create_flow_run_mock)
- monkeypatch.setattr(
- "prefect.client.Client.get_default_tenant_slug", MagicMock(return_value="tslug")
- )
-
- runner = CliRunner()
- result = runner.invoke(
- run,
- [
- "flow",
- "--name",
- "flow",
- "--project",
- "project",
- "--version",
- "2",
- "--parameters-string",
- '{"test": 42}',
- ],
- )
- assert result.exit_code == 0
- assert "Flow Run" in result.output
- assert create_flow_run_mock.called
- assert create_flow_run_mock.call_args[1]["parameters"] == {"test": 42}
-
-
-def test_run_flow_context_string(monkeypatch, cloud_api):
- post = MagicMock(
- return_value=MagicMock(
- json=MagicMock(return_value=dict(data=dict(flow=[{"id": "flow"}])))
- )
- )
- session = MagicMock()
- session.return_value.post = post
- monkeypatch.setattr("requests.Session", session)
-
- create_flow_run_mock = MagicMock(return_value="id")
- monkeypatch.setattr("prefect.client.Client.create_flow_run", create_flow_run_mock)
- monkeypatch.setattr(
- "prefect.client.Client.get_default_tenant_slug", MagicMock(return_value="tslug")
- )
-
- runner = CliRunner()
- result = runner.invoke(
- run,
- [
- "flow",
- "--name",
- "flow",
- "--project",
- "project",
- "--version",
- "2",
- "--context",
- '{"test": 42}',
- ],
- )
- assert result.exit_code == 0
- assert "Flow Run" in result.output
- assert create_flow_run_mock.called
- assert create_flow_run_mock.call_args[1]["context"] == {"test": 42}
-
-
-def test_run_flow_run_name(monkeypatch, cloud_api):
- post = MagicMock(
- return_value=MagicMock(
- json=MagicMock(return_value=dict(data=dict(flow=[{"id": "flow"}])))
- )
- )
- session = MagicMock()
- session.return_value.post = post
- monkeypatch.setattr("requests.Session", session)
-
- create_flow_run_mock = MagicMock(return_value="id")
- monkeypatch.setattr("prefect.client.Client.create_flow_run", create_flow_run_mock)
- monkeypatch.setattr(
- "prefect.client.Client.get_default_tenant_slug", MagicMock(return_value="tslug")
- )
-
- runner = CliRunner()
- result = runner.invoke(
- run,
- [
- "flow",
- "--name",
- "flow",
- "--project",
- "project",
- "--version",
- "2",
- "--run-name",
- "NAME",
- ],
- )
- assert result.exit_code == 0
- assert "Flow Run" in result.output
- assert create_flow_run_mock.called
- assert create_flow_run_mock.call_args[1]["run_name"] == "NAME"
-
-
-def test_run_flow_param_string_overwrites(monkeypatch, cloud_api):
- post = MagicMock(
- return_value=MagicMock(
- json=MagicMock(return_value=dict(data=dict(flow=[{"id": "flow"}])))
- )
- )
- session = MagicMock()
- session.return_value.post = post
- monkeypatch.setattr("requests.Session", session)
-
- create_flow_run_mock = MagicMock(return_value="id")
- monkeypatch.setattr("prefect.client.Client.create_flow_run", create_flow_run_mock)
- monkeypatch.setattr(
- "prefect.client.Client.get_default_tenant_slug", MagicMock(return_value="tslug")
- )
-
- with tempfile.TemporaryDirectory() as directory:
- file_path = os.path.join(directory, "file.json")
- with open(file_path, "w") as tmp:
- json.dump({"test": 42}, tmp)
-
- runner = CliRunner()
- result = runner.invoke(
- run,
- [
- "flow",
- "--name",
- "flow",
- "--project",
- "project",
- "--version",
- "2",
- "--parameters-file",
- file_path,
- "--parameters-string",
- '{"test": 43}',
- ],
- )
- assert result.exit_code == 0
- assert "Flow Run" in result.output
- assert create_flow_run_mock.called
- assert create_flow_run_mock.call_args[1]["parameters"] == {"test": 43}
-
-
-@pytest.mark.parametrize(
- "api,expected",
- [
- ("https://api.prefect.io", "https://cloud.prefect.io/tslug/flow-run/id"),
- ("https://api-foo.prefect.io", "https://foo.prefect.io/tslug/flow-run/id"),
- ],
-)
-def test_run_flow_flow_run_id_link(monkeypatch, api, expected, cloud_api):
- post = MagicMock(
- return_value=MagicMock(
- json=MagicMock(
- return_value=dict(
- data=dict(flow=[{"id": "flow"}], tenant=[{"id": "id"}])
- )
- )
- )
- )
- session = MagicMock()
- session.return_value.post = post
- monkeypatch.setattr("requests.Session", session)
-
- create_flow_run_mock = MagicMock(return_value="id")
- monkeypatch.setattr("prefect.client.Client.create_flow_run", create_flow_run_mock)
- monkeypatch.setattr(
- "prefect.client.Client.get_default_tenant_slug", MagicMock(return_value="tslug")
- )
-
- with set_temporary_config({"cloud.api": api, "cloud.auth_token": "secret_token"}):
- runner = CliRunner()
- result = runner.invoke(
- run, ["flow", "--name", "flow", "--project", "project", "--version", "2"]
- )
- assert result.exit_code == 0
- assert "Flow Run" in result.output
- assert expected in result.output
-
-
-def test_run_flow_flow_run_id_no_link(monkeypatch, cloud_api):
- post = MagicMock(
- return_value=MagicMock(
- json=MagicMock(return_value=dict(data=dict(flow=[{"id": "flow"}])))
- )
- )
- session = MagicMock()
- session.return_value.post = post
- monkeypatch.setattr("requests.Session", session)
-
- create_flow_run_mock = MagicMock(return_value="id")
- monkeypatch.setattr("prefect.client.Client.create_flow_run", create_flow_run_mock)
- monkeypatch.setattr(
- "prefect.client.Client.get_default_tenant_slug", MagicMock(return_value="tslug")
- )
-
- runner = CliRunner()
- result = runner.invoke(
- run,
- [
- "flow",
- "--name",
- "flow",
- "--project",
- "project",
- "--version",
- "2",
- "--no-url",
- ],
- )
- assert result.exit_code == 0
- assert "Flow Run ID" in result.output
-
-
-def test_run_flow_using_id(monkeypatch, cloud_api):
- post = MagicMock(
- return_value=MagicMock(
- json=MagicMock(return_value=dict(data=dict(flow=[{"id": "flow"}])))
- )
- )
- session = MagicMock()
- session.return_value.post = post
- monkeypatch.setattr("requests.Session", session)
-
- create_flow_run_mock = MagicMock(return_value="id")
- monkeypatch.setattr("prefect.client.Client.create_flow_run", create_flow_run_mock)
- monkeypatch.setattr(
- "prefect.client.Client.get_default_tenant_slug", MagicMock(return_value="tslug")
- )
-
- runner = CliRunner()
- result = runner.invoke(
- run,
- ["flow", "--id", "id"],
- )
- assert result.exit_code == 0
- assert "Flow Run" in result.output
- assert create_flow_run_mock.called
-
-
-def test_run_flow_labels(monkeypatch, cloud_api):
- post = MagicMock(
- return_value=MagicMock(
- json=MagicMock(return_value=dict(data=dict(flow=[{"id": "flow"}])))
- )
- )
- session = MagicMock()
- session.return_value.post = post
- monkeypatch.setattr("requests.Session", session)
-
- create_flow_run_mock = MagicMock(return_value="id")
- monkeypatch.setattr("prefect.client.Client.create_flow_run", create_flow_run_mock)
- monkeypatch.setattr(
- "prefect.client.Client.get_default_tenant_slug", MagicMock(return_value="tslug")
- )
-
- runner = CliRunner()
- result = runner.invoke(
- run,
- [
- "flow",
- "--name",
- "flow",
- "--project",
- "project",
- "--label",
- "label1",
- "--label",
- "label2",
- ],
- )
- assert result.exit_code == 0
- assert "Flow Run" in result.output
- assert create_flow_run_mock.called
-
-
-def test_run_flow_using_version_group_id(monkeypatch, cloud_api):
- post = MagicMock(
- return_value=MagicMock(
- json=MagicMock(return_value=dict(data=dict(flow=[{"id": "flow"}])))
- )
- )
- session = MagicMock()
- session.return_value.post = post
- monkeypatch.setattr("requests.Session", session)
-
- create_flow_run_mock = MagicMock(return_value="id")
- monkeypatch.setattr("prefect.client.Client.create_flow_run", create_flow_run_mock)
- monkeypatch.setattr(
- "prefect.client.Client.get_default_tenant_slug", MagicMock(return_value="tslug")
- )
-
- runner = CliRunner()
- result = runner.invoke(
- run,
- ["flow", "--version-group-id", "v_id"],
- )
- assert result.exit_code == 0
- assert "Flow Run" in result.output
- assert create_flow_run_mock.called
-
-
-def test_run_flow_no_id_or_name_and_project():
- runner = CliRunner()
- result = runner.invoke(
- run,
- [
- "flow",
- ],
- )
- assert (
- "A flow ID, version group ID, or a combination of flow name and project must be provided."
- in result.output
- )
-
-
-def test_run_flow_no_id_or_name_and_project():
- runner = CliRunner()
- result = runner.invoke(
- run,
- [
- "flow",
- "--id",
- "id",
- "--name",
- "flow",
- "--project",
- "project",
- ],
- )
- assert (
- "Only one of flow ID, version group ID, or a name/project combination can be provided."
- in result.output
- )
diff --git a/tests/client/test_client.py b/tests/client/test_client.py
index 2ca3c16643e7..79724951ab05 100644
--- a/tests/client/test_client.py
+++ b/tests/client/test_client.py
@@ -15,13 +15,16 @@
from prefect.utilities.graphql import GraphQLResult
from prefect.engine.result import Result
from prefect.engine.state import Pending, Running, State
-from prefect.environments.execution import LocalEnvironment
from prefect.storage import Local
from prefect.run_configs import LocalRun
from prefect.utilities.configuration import set_temporary_config
from prefect.exceptions import ClientError, AuthorizationError, ObjectNotFoundError
from prefect.utilities.graphql import decompress
+# Note: Because we're running tests in parallel this must be a hard-coded instead of
+# dynamically generating a new UUID
+TEST_TENANT_ID = "b6b350a8-cd8f-4f45-a211-b80469497052"
+
class TestClientAuthentication:
"""
@@ -60,72 +63,80 @@ def test_client_determines_tenant_id_in_expected_order(self):
# 1. Directly passed
# 2. From the config
# 3. From the disk
+ disk_tenant = str(uuid.uuid4())
+ config_tenant = str(uuid.uuid4())
+ direct_tenant = str(uuid.uuid4())
# No key should be present yet
client = Client()
assert client._tenant_id is None
# Save to disk (and set an API key so we don't enter API token logic)
- client = Client(api_key="KEY", tenant_id="DISK_TENANT")
+ client = Client(api_key="KEY", tenant_id=disk_tenant)
client.save_auth_to_disk()
# Set in config
- with set_temporary_config({"cloud.tenant_id": "CONFIG_TENANT"}):
+ with set_temporary_config({"cloud.tenant_id": config_tenant}):
# Should ignore config/disk
- client = Client(tenant_id="DIRECT_TENANT")
- assert client._tenant_id == "DIRECT_TENANT"
+ client = Client(tenant_id=direct_tenant)
+ assert client._tenant_id == direct_tenant
# Should load from config
client = Client()
- assert client._tenant_id == "CONFIG_TENANT"
+ assert client._tenant_id == config_tenant
# Should load from disk
client = Client()
- assert client._tenant_id == "DISK_TENANT"
+ assert client._tenant_id == disk_tenant
def test_client_save_auth_to_disk(self):
# Ensure saving is robust to a missing directory
Path(prefect.context.config.home_dir).rmdir()
- client = Client(api_key="KEY", tenant_id="ID")
+ client = Client(api_key="KEY", tenant_id=TEST_TENANT_ID)
client.save_auth_to_disk()
data = toml.loads(client._auth_file.read_text())
assert set(data.keys()) == {client._api_server_slug}
- assert data[client._api_server_slug] == dict(api_key="KEY", tenant_id="ID")
+ assert data[client._api_server_slug] == dict(
+ api_key="KEY", tenant_id=TEST_TENANT_ID
+ )
old_key = client._api_server_slug
client.api_server = "foo"
client.api_key = "NEW_KEY"
- client.tenant_id = "NEW_ID"
+ new_tenant_id = str(uuid.uuid4())
+ client.tenant_id = new_tenant_id
client.save_auth_to_disk()
data = toml.loads(client._auth_file.read_text())
assert set(data.keys()) == {client._api_server_slug, old_key}
assert data[client._api_server_slug] == dict(
- api_key="NEW_KEY", tenant_id="NEW_ID"
+ api_key="NEW_KEY", tenant_id=new_tenant_id
)
# Old data is unchanged
- assert data[old_key] == dict(api_key="KEY", tenant_id="ID")
+ assert data[old_key] == dict(api_key="KEY", tenant_id=TEST_TENANT_ID)
def test_client_load_auth_from_disk(self):
- client = Client(api_key="KEY", tenant_id="ID")
+ client = Client(api_key="KEY", tenant_id=TEST_TENANT_ID)
client.save_auth_to_disk()
client = Client()
assert client.api_key == "KEY"
- assert client.tenant_id == "ID"
+ assert client.tenant_id == TEST_TENANT_ID
+
+ new_tenant_id = str(uuid.uuid4())
client._auth_file.write_text(
toml.dumps(
{
client._api_server_slug: {
"api_key": "NEW_KEY",
- "tenant_id": "NEW_ID",
+ "tenant_id": new_tenant_id,
}
}
)
@@ -134,10 +145,10 @@ def test_client_load_auth_from_disk(self):
# Does not mutate the client!
assert client.api_key == "KEY"
- assert client.tenant_id == "ID"
+ assert client.tenant_id == TEST_TENANT_ID
assert data["api_key"] == "NEW_KEY"
- assert data["tenant_id"] == "NEW_ID"
+ assert data["tenant_id"] == new_tenant_id
def test_client_sets_api_key_in_header(self, monkeypatch):
Session = MagicMock()
@@ -157,46 +168,28 @@ def test_client_sets_tenant_id_in_header(self, monkeypatch):
Session = MagicMock()
monkeypatch.setattr("requests.Session", Session)
- client = Client(api_key="foo", tenant_id="bar")
+ client = Client(api_key="foo", tenant_id=TEST_TENANT_ID)
client.get("path")
headers = Session().get.call_args[1]["headers"]
assert "Authorization" in headers
assert headers["Authorization"] == "Bearer foo"
assert "X-PREFECT-TENANT-ID" in headers
- assert headers["X-PREFECT-TENANT-ID"] == "bar"
-
- def test_client_does_not_set_tenant_id_in_header_when_using_api_token(
- self, monkeypatch
- ):
- Session = MagicMock()
- monkeypatch.setattr("requests.Session", Session)
-
- client = Client(api_token="foo", tenant_id="bar")
- client.get("path")
+ assert headers["X-PREFECT-TENANT-ID"] == TEST_TENANT_ID
- headers = Session().get.call_args[1]["headers"]
- assert "X-PREFECT-TENANT-ID" not in headers
-
- @pytest.mark.parametrize("tenant_id", [None, "id"])
+ @pytest.mark.parametrize("tenant_id", [None, TEST_TENANT_ID])
def test_client_tenant_id_returns_set_tenant_or_queries(self, tenant_id):
client = Client(api_key="foo", tenant_id=tenant_id)
- client._get_auth_tenant = MagicMock(return_value="id")
+ client._get_auth_tenant = MagicMock(return_value=TEST_TENANT_ID)
- assert client.tenant_id == "id"
+ assert client.tenant_id == TEST_TENANT_ID
if not tenant_id:
client._get_auth_tenant.assert_called_once()
else:
client._get_auth_tenant.assert_not_called()
- def test_client_tenant_id_backwards_compat_for_api_tokens(self, monkeypatch):
- client = Client(api_token="foo")
- client._init_tenant = MagicMock()
- client.tenant_id
- client._init_tenant.assert_called_once()
-
def test_client_tenant_id_gets_default_tenant_for_server(self):
with set_temporary_config({"backend": "server"}):
client = Client()
@@ -279,8 +272,8 @@ def test_client_posts_to_api_server(patch_post):
with set_temporary_config(
{
- "cloud.graphql": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
+ "cloud.api": "http://my-cloud.foo",
+ "cloud.api_key": "key",
"backend": "cloud",
"backend": "cloud",
}
@@ -300,7 +293,7 @@ def test_version_header(monkeypatch):
with set_temporary_config(
{
"cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
+ "cloud.api_key": "key",
"backend": "cloud",
}
):
@@ -319,7 +312,7 @@ def test_version_header_cant_be_overridden(monkeypatch):
with set_temporary_config(
{
"cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
+ "cloud.api_key": "key",
"backend": "cloud",
}
):
@@ -335,7 +328,9 @@ def test_client_attached_headers(monkeypatch, cloud_api):
session = MagicMock()
session.return_value.get = get
monkeypatch.setattr("requests.Session", session)
- with set_temporary_config({"cloud.auth_token": "secret_token", "backend": "cloud"}):
+ with set_temporary_config(
+ {"cloud.api_key": "key", "cloud.tenant_id": TEST_TENANT_ID, "backend": "cloud"}
+ ):
client = Client()
assert client._attached_headers == {}
@@ -351,8 +346,8 @@ def test_client_posts_graphql_to_api_server(patch_post):
with set_temporary_config(
{
- "cloud.graphql": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
+ "cloud.api": "http://my-cloud.foo",
+ "cloud.api_key": "key",
"backend": "cloud",
}
):
@@ -370,7 +365,7 @@ def test_graphql_errors_get_raised(patch_post):
with set_temporary_config(
{
"cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
+ "cloud.api_key": "key",
"backend": "cloud",
}
):
@@ -392,7 +387,7 @@ def get_client(self):
with set_temporary_config(
{
"cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
+ "cloud.api_key": "key",
"backend": "cloud",
}
):
@@ -446,7 +441,7 @@ def test_client_register_raises_if_required_param_isnt_scheduled(
with set_temporary_config(
{
"cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
+ "cloud.api_key": "key",
"backend": "cloud",
}
):
@@ -504,7 +499,7 @@ def test_client_register_doesnt_raise_for_scheduled_params(
with set_temporary_config(
{
"cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
+ "cloud.api_key": "key",
"backend": "cloud",
}
):
@@ -558,7 +553,7 @@ def test_client_register(patch_post, compressed, monkeypatch, tmpdir):
with set_temporary_config(
{
"cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
+ "cloud.api_key": "key",
"backend": "cloud",
}
):
@@ -605,7 +600,7 @@ def a(x):
with set_temporary_config(
{
"cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
+ "cloud.api_key": "key",
"backend": "cloud",
}
):
@@ -649,7 +644,7 @@ def test_client_register_doesnt_raise_if_no_keyed_edges(
with set_temporary_config(
{
"cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
+ "cloud.api_key": "key",
"backend": "cloud",
}
):
@@ -689,7 +684,7 @@ def test_client_register_builds_flow(patch_post, compressed, monkeypatch, tmpdir
with set_temporary_config(
{
"cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
+ "cloud.api_key": "key",
"backend": "cloud",
}
):
@@ -742,7 +737,7 @@ def test_client_register_docker_image_name(patch_post, compressed, monkeypatch,
with set_temporary_config(
{
"cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
+ "cloud.api_key": "key",
"backend": "cloud",
}
):
@@ -750,68 +745,6 @@ def test_client_register_docker_image_name(patch_post, compressed, monkeypatch,
flow = prefect.Flow(
name="test",
storage=prefect.storage.Docker(image_name="test_image"),
- environment=LocalEnvironment(),
- )
- flow.result = flow.storage.result
-
- client.register(
- flow,
- project_name="my-default-project",
- compressed=compressed,
- build=True,
- no_url=True,
- set_schedule_active=False,
- )
-
- # extract POST info
- if compressed:
- serialized_flow = decompress(
- json.loads(post.call_args_list[1][1]["json"]["variables"])["input"][
- "serialized_flow"
- ]
- )
- else:
- serialized_flow = json.loads(post.call_args_list[1][1]["json"]["variables"])[
- "input"
- ]["serialized_flow"]
- assert serialized_flow["storage"] is not None
- assert "test_image" in serialized_flow["environment"]["metadata"]["image"]
-
-
-@pytest.mark.parametrize("compressed", [True, False])
-def test_client_register_default_prefect_image(
- patch_post, compressed, monkeypatch, tmpdir
-):
- if compressed:
- response = {
- "data": {
- "project": [{"id": "proj-id"}],
- "create_flow_from_compressed_string": {"id": "long-id"},
- }
- }
- else:
- response = {
- "data": {"project": [{"id": "proj-id"}], "create_flow": {"id": "long-id"}}
- }
- post = patch_post(response)
-
- monkeypatch.setattr(
- "prefect.client.Client.get_default_tenant_slug", MagicMock(return_value="tslug")
- )
- monkeypatch.setattr("prefect.storage.Docker._build_image", MagicMock())
-
- with set_temporary_config(
- {
- "cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
- "backend": "cloud",
- }
- ):
- client = Client()
- flow = prefect.Flow(
- name="test",
- storage=prefect.storage.Local(tmpdir),
- environment=LocalEnvironment(),
)
flow.result = flow.storage.result
@@ -836,7 +769,7 @@ def test_client_register_default_prefect_image(
"input"
]["serialized_flow"]
assert serialized_flow["storage"] is not None
- assert "prefecthq/prefect" in serialized_flow["environment"]["metadata"]["image"]
+ assert serialized_flow["storage"]["image_name"] == "test_image"
@pytest.mark.parametrize("compressed", [True, False])
@@ -863,7 +796,7 @@ def test_client_register_optionally_avoids_building_flow(
with set_temporary_config(
{
"cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
+ "cloud.api_key": "key",
"backend": "cloud",
}
):
@@ -901,7 +834,9 @@ def test_client_register_with_bad_proj_name(patch_post, monkeypatch, cloud_api):
"prefect.client.Client.get_default_tenant_slug", MagicMock(return_value="tslug")
)
- with set_temporary_config({"cloud.auth_token": "secret_token", "backend": "cloud"}):
+ with set_temporary_config(
+ {"cloud.api_key": "key", "cloud.tenant_id": TEST_TENANT_ID, "backend": "cloud"}
+ ):
client = Client()
flow = prefect.Flow(name="test")
flow.result = Result()
@@ -929,7 +864,9 @@ def test_client_create_project_that_already_exists(patch_posts, monkeypatch):
"prefect.client.Client.get_default_tenant_slug", MagicMock(return_value="tslug")
)
- with set_temporary_config({"cloud.auth_token": "secret_token", "backend": "cloud"}):
+ with set_temporary_config(
+ {"cloud.api_key": "key", "cloud.tenant_id": TEST_TENANT_ID, "backend": "cloud"}
+ ):
client = Client()
project_id = client.create_project(project_name="my-default-project")
assert project_id == "proj-id"
@@ -944,7 +881,9 @@ def test_client_delete_project(patch_post, monkeypatch):
"prefect.client.Client.get_default_tenant_slug", MagicMock(return_value="tslug")
)
- with set_temporary_config({"cloud.auth_token": "secret_token", "backend": "cloud"}):
+ with set_temporary_config(
+ {"cloud.api_key": "key", "cloud.tenant_id": TEST_TENANT_ID, "backend": "cloud"}
+ ):
client = Client()
result = client.delete_project(project_name="my-default-project")
assert result is True
@@ -965,7 +904,9 @@ def test_client_delete_project_error(patch_post, monkeypatch):
"prefect.client.Client.get_default_tenant_slug", MagicMock(return_value="tslug")
)
- with set_temporary_config({"cloud.auth_token": "secret_token", "backend": "cloud"}):
+ with set_temporary_config(
+ {"cloud.api_key": "key", "cloud.tenant_id": TEST_TENANT_ID, "backend": "cloud"}
+ ):
client = Client()
with pytest.raises(ValueError, match="Project {} not found".format(project_name)):
@@ -982,7 +923,7 @@ def test_client_register_with_flow_that_cant_be_deserialized(patch_post, monkeyp
with set_temporary_config(
{
"cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
+ "cloud.api_key": "key",
"backend": "cloud",
}
):
@@ -1007,10 +948,9 @@ def test_client_register_with_flow_that_cant_be_deserialized(patch_post, monkeyp
)
-@pytest.mark.parametrize("use_run_config", [True, False])
@pytest.mark.parametrize("compressed", [True, False])
def test_client_register_flow_id_output(
- patch_post, use_run_config, compressed, monkeypatch, capsys, cloud_api, tmpdir
+ patch_post, compressed, monkeypatch, capsys, cloud_api, tmpdir
):
if compressed:
response = {
@@ -1037,7 +977,7 @@ def test_client_register_flow_id_output(
with set_temporary_config(
{
"cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
+ "cloud.api_key": "key",
"backend": "cloud",
}
):
@@ -1045,15 +985,9 @@ def test_client_register_flow_id_output(
labels = ["test1", "test2"]
storage = Local(tmpdir)
- if use_run_config:
- flow = prefect.Flow(
- name="test", storage=storage, run_config=LocalRun(labels=labels)
- )
- flow.environment = None
- else:
- flow = prefect.Flow(
- name="test", storage=storage, environment=LocalEnvironment(labels=labels)
- )
+ flow = prefect.Flow(
+ name="test", storage=storage, run_config=LocalRun(labels=labels)
+ )
flow.result = flow.storage.result
flow_id = client.register(
@@ -1093,7 +1027,7 @@ def test_client_register_flow_id_no_output(
with set_temporary_config(
{
"cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
+ "cloud.api_key": "key",
"backend": "cloud",
}
):
@@ -1181,7 +1115,7 @@ def test_get_flow_run_info(patch_post):
with set_temporary_config(
{
"cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
+ "cloud.api_key": "key",
"backend": "cloud",
}
):
@@ -1246,7 +1180,7 @@ def test_get_flow_run_info_with_nontrivial_payloads(patch_post):
with set_temporary_config(
{
"cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
+ "cloud.api_key": "key",
"backend": "cloud",
}
):
@@ -1275,7 +1209,7 @@ def test_get_flow_run_info_raises_informative_error(patch_post):
with set_temporary_config(
{
"cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
+ "cloud.api_key": "key",
"backend": "cloud",
}
):
@@ -1284,7 +1218,7 @@ def test_get_flow_run_info_raises_informative_error(patch_post):
client.get_flow_run_info(flow_run_id="74-salt")
-def test_get_flow_run_state(patch_posts, cloud_api, runner_token):
+def test_get_flow_run_state(patch_posts, cloud_api):
query_resp = {
"flow_run_by_pk": {
"serialized_state": {
@@ -1309,7 +1243,7 @@ def test_get_flow_run_state(patch_posts, cloud_api, runner_token):
assert state.message is None
-def test_get_flow_run_state_object_not_found(patch_posts, cloud_api, runner_token):
+def test_get_flow_run_state_object_not_found(patch_posts, cloud_api):
query_resp = {"flow_run_by_pk": {}}
patch_posts([dict(data=query_resp)])
@@ -1332,7 +1266,7 @@ def test_set_flow_run_state(patch_post):
with set_temporary_config(
{
"cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
+ "cloud.api_key": "key",
"backend": "cloud",
}
):
@@ -1356,7 +1290,7 @@ def test_set_flow_run_state_gets_queued(patch_post):
with set_temporary_config(
{
"cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
+ "cloud.api_key": "key",
"backend": "cloud",
}
):
@@ -1385,7 +1319,7 @@ def test_set_flow_run_state_uses_config_queue_interval(
with set_temporary_config(
{
"cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
+ "cloud.api_key": "key",
"backend": "cloud",
"cloud.queue_interval": interval_seconds,
}
@@ -1415,7 +1349,7 @@ def test_set_flow_run_state_with_error(patch_post):
with set_temporary_config(
{
"cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
+ "cloud.api_key": "key",
"backend": "cloud",
}
):
@@ -1446,7 +1380,7 @@ def test_get_task_run_info(patch_posts):
with set_temporary_config(
{
"cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
+ "cloud.api_key": "key",
"backend": "cloud",
}
):
@@ -1472,7 +1406,7 @@ def test_get_task_run_info_with_error(patch_post):
with set_temporary_config(
{
"cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
+ "cloud.api_key": "key",
"backend": "cloud",
}
):
@@ -1495,7 +1429,7 @@ def test_set_task_run_name(patch_posts, cloud_api):
assert result is True
-def test_get_task_run_state(patch_posts, cloud_api, runner_token):
+def test_get_task_run_state(patch_posts, cloud_api):
query_resp = {
"get_task_run_info": {
"serialized_state": {
@@ -1528,7 +1462,7 @@ def test_set_task_run_state(patch_post):
with set_temporary_config(
{
"cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
+ "cloud.api_key": "key",
"backend": "cloud",
}
):
@@ -1546,7 +1480,7 @@ def test_set_task_run_state_responds_to_status(patch_post):
with set_temporary_config(
{
"cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
+ "cloud.api_key": "key",
"backend": "cloud",
}
):
@@ -1571,7 +1505,7 @@ def test_set_task_run_state_responds_to_config_when_queued(patch_post):
with set_temporary_config(
{
"cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
+ "cloud.api_key": "key",
"backend": "cloud",
"cloud.queue_interval": 750,
}
@@ -1597,7 +1531,7 @@ def test_set_task_run_state_with_error(patch_post):
with set_temporary_config(
{
"cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
+ "cloud.api_key": "key",
"backend": "cloud",
}
):
@@ -1611,7 +1545,7 @@ def test_create_flow_run_requires_flow_id_or_version_group_id():
with set_temporary_config(
{
"cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
+ "cloud.api_key": "key",
"backend": "cloud",
}
):
@@ -1634,7 +1568,7 @@ def test_create_flow_run_with_input(patch_post, use_flow_id, use_extra_args):
with set_temporary_config(
{
"cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
+ "cloud.api_key": "key",
"backend": "cloud",
}
):
@@ -1672,31 +1606,11 @@ def test_create_flow_run_with_input(patch_post, use_flow_id, use_extra_args):
assert variables["input"] == expected
-def test_get_default_tenant_slug_as_user(patch_post):
- response = {
- "data": {"user": [{"default_membership": {"tenant": {"slug": "tslug"}}}]}
- }
-
- patch_post(response)
-
- with set_temporary_config(
- {
- "cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
- "backend": "cloud",
- }
- ):
- client = Client()
- slug = client.get_default_tenant_slug(as_user=True)
-
- assert slug == "tslug"
-
-
-def test_get_default_tenant_slug_not_as_user(patch_post):
+def test_get_default_tenant_slug(patch_post):
response = {
"data": {
"tenant": [
- {"slug": "tslug", "id": "tenant-id"},
+ {"slug": "tslug", "id": TEST_TENANT_ID},
{"slug": "wrongslug", "id": "foo"},
]
}
@@ -1707,23 +1621,22 @@ def test_get_default_tenant_slug_not_as_user(patch_post):
with set_temporary_config(
{
"cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
- "cloud.tenant_id": "tenant-id",
+ "cloud.api_key": "key",
+ "cloud.tenant_id": TEST_TENANT_ID,
"backend": "cloud",
}
):
client = Client()
- slug = client.get_default_tenant_slug(as_user=False)
+ slug = client.get_default_tenant_slug()
assert slug == "tslug"
-def test_get_default_tenant_slug_not_as_user_with_no_tenant_id(patch_post):
- # Generally, this would occur when using a RUNNER API token
+def test_get_cloud_url(patch_post, cloud_api):
response = {
"data": {
"tenant": [
- {"slug": "firstslug", "id": "tenant-id"},
+ {"slug": "tslug", "id": TEST_TENANT_ID},
{"slug": "wrongslug", "id": "foo"},
]
}
@@ -1731,48 +1644,27 @@ def test_get_default_tenant_slug_not_as_user_with_no_tenant_id(patch_post):
patch_post(response)
- with set_temporary_config(
- {
- "cloud.api": "http://my-cloud.foo",
- "cloud.auth_token": "secret_token",
- "backend": "cloud",
- }
- ):
- client = Client()
- client._tenant_id = None # Ensure tenant id is not set
- slug = client.get_default_tenant_slug(as_user=False)
-
- assert slug == "firstslug"
-
-
-def test_get_cloud_url_as_user(patch_post, cloud_api):
- response = {
- "data": {"user": [{"default_membership": {"tenant": {"slug": "tslug"}}}]}
- }
-
- patch_post(response)
-
with set_temporary_config(
{
"cloud.api": "http://api.prefect.io",
- "cloud.auth_token": "secret_token",
+ "cloud.tenant_id": TEST_TENANT_ID,
"backend": "cloud",
}
):
client = Client()
- url = client.get_cloud_url(subdirectory="flow", id="id", as_user=True)
+ url = client.get_cloud_url(subdirectory="flow", id="id")
assert url == "http://cloud.prefect.io/tslug/flow/id"
- url = client.get_cloud_url(subdirectory="flow-run", id="id2", as_user=True)
+ url = client.get_cloud_url(subdirectory="flow-run", id="id2")
assert url == "http://cloud.prefect.io/tslug/flow-run/id2"
-def test_get_cloud_url_not_as_user(patch_post, cloud_api):
+def test_get_cloud_url_different_regex(patch_post, cloud_api):
response = {
"data": {
"tenant": [
- {"slug": "tslug", "id": "tenant-id"},
+ {"slug": "tslug", "id": TEST_TENANT_ID},
{"slug": "wrongslug", "id": "foo"},
]
}
@@ -1780,33 +1672,11 @@ def test_get_cloud_url_not_as_user(patch_post, cloud_api):
patch_post(response)
- with set_temporary_config(
- {
- "cloud.api": "http://api.prefect.io",
- "backend": "cloud",
- }
- ):
- client = Client()
- client._tenant_id = "tenant-id"
-
- url = client.get_cloud_url(subdirectory="flow", id="id", as_user=False)
- assert url == "http://cloud.prefect.io/tslug/flow/id"
-
- url = client.get_cloud_url(subdirectory="flow-run", id="id2", as_user=False)
- assert url == "http://cloud.prefect.io/tslug/flow-run/id2"
-
-
-def test_get_cloud_url_different_regex(patch_post, cloud_api):
- response = {
- "data": {"user": [{"default_membership": {"tenant": {"slug": "tslug"}}}]}
- }
-
- patch_post(response)
-
with set_temporary_config(
{
"cloud.api": "http://api-hello.prefect.io",
- "cloud.auth_token": "secret_token",
+ "cloud.api_key": "key",
+ "cloud.tenant_id": TEST_TENANT_ID,
"backend": "cloud",
}
):
@@ -1827,7 +1697,7 @@ def test_register_agent(cloud_api):
{
"data": {
"register_agent": {"id": "AGENT-ID"},
- "auth_info": {"tenant_id": "TENANT-ID"},
+ "auth_info": {"tenant_id": TEST_TENANT_ID},
}
}
)
@@ -1848,7 +1718,7 @@ def test_register_agent(cloud_api):
"type": "type",
"name": "name",
"labels": ["1", "2"],
- "tenant_id": "TENANT-ID",
+ "tenant_id": TEST_TENANT_ID,
"agent_config_id": "asdf",
}
},
@@ -1861,7 +1731,9 @@ def test_register_agent_raises_error(patch_post, cloud_api):
patch_post(response)
- with set_temporary_config({"cloud.auth_token": "secret_token", "backend": "cloud"}):
+ with set_temporary_config(
+ {"cloud.api_key": "key", "cloud.tenant_id": TEST_TENANT_ID, "backend": "cloud"}
+ ):
client = Client()
with pytest.raises(ValueError):
@@ -1873,7 +1745,9 @@ def test_get_agent_config(patch_post, cloud_api):
patch_post(response)
- with set_temporary_config({"cloud.auth_token": "secret_token", "backend": "cloud"}):
+ with set_temporary_config(
+ {"cloud.api_key": "key", "cloud.tenant_id": TEST_TENANT_ID, "backend": "cloud"}
+ ):
client = Client()
agent_config = client.get_agent_config(agent_config_id="id")
diff --git a/tests/client/test_client_auth_compat.py b/tests/client/test_client_auth_compat.py
deleted file mode 100644
index 452a9b7e775d..000000000000
--- a/tests/client/test_client_auth_compat.py
+++ /dev/null
@@ -1,644 +0,0 @@
-"""
-This file tests Client auth handling with API tokens which have been deprecated in
-favor of API keys. All of these tests are for backwards compatibility and can be removed
-in 0.16.0 when API tokens are dropped.
-"""
-import json
-import os
-import tempfile
-import uuid
-from pathlib import Path
-from unittest.mock import MagicMock
-
-import pendulum
-import pytest
-import requests
-import toml
-
-import prefect
-from prefect.client.client import Client, FlowRunInfoResult, TaskRunInfoResult
-from prefect.engine.result import Result
-from prefect.engine.state import Pending
-from prefect.utilities.configuration import set_temporary_config
-from prefect.exceptions import AuthorizationError, ClientError
-from prefect.utilities.graphql import GraphQLResult, decompress
-
-
-class TestClientConfig:
- def test_client_initializes_from_config(self):
- with set_temporary_config(
- {
- "cloud.graphql": "api_server",
- "cloud.auth_token": "token",
- "backend": "cloud",
- }
- ):
- client = Client()
- assert client.api_server == "api_server"
- assert client._api_token == "token"
-
- def test_client_initializes_and_prioritizes_kwargs(self):
- with set_temporary_config(
- {
- "cloud.graphql": "api_server",
- "cloud.auth_token": "token",
- "backend": "cloud",
- }
- ):
- client = Client(api_server="my-graphql")
- assert client.api_server == "my-graphql"
- assert client._api_token == "token"
-
- def test_client_settings_path_is_path_object(self, cloud_api):
- assert isinstance(Client()._api_token_settings_path, Path)
-
- def test_client_settings_path_depends_on_api_server(
- self, prefect_home_dir, cloud_api
- ):
- path = Client(
- api_server="https://a-test-api.prefect.test/subdomain"
- )._api_token_settings_path
- expected = os.path.join(
- prefect_home_dir,
- "client",
- "https-a-test-api.prefect.test-subdomain",
- "settings.toml",
- )
- assert str(path) == expected
-
- def test_client_settings_path_depends_on_home_dir(self, cloud_api):
- with set_temporary_config(dict(home_dir="abc/def")):
- path = Client(api_server="xyz")._api_token_settings_path
- expected = os.path.join("abc", "def", "client", "xyz", "settings.toml")
- assert str(path) == os.path.expanduser(expected)
-
- def test_client_token_initializes_from_file(selfmonkeypatch, cloud_api):
- with tempfile.TemporaryDirectory() as tmp:
- with set_temporary_config({"home_dir": tmp, "cloud.graphql": "xyz"}):
- path = Path(tmp) / "client" / "xyz" / "settings.toml"
- path.parent.mkdir(parents=True)
- with path.open("w") as f:
- toml.dump(dict(api_token="FILE_TOKEN"), f)
-
- client = Client()
- client._init_tenant()
- assert client._api_token == "FILE_TOKEN"
-
- def test_client_token_priotizes_config_over_file(selfmonkeypatch, cloud_api):
- with tempfile.TemporaryDirectory() as tmp:
- with set_temporary_config(
- {
- "home_dir": tmp,
- "cloud.graphql": "xyz",
- "cloud.auth_token": "CONFIG_TOKEN",
- }
- ):
- path = Path(tmp) / "client" / "xyz" / "settings.toml"
- path.parent.mkdir(parents=True)
- with path.open("w") as f:
- toml.dump(dict(api_token="FILE_TOKEN"), f)
-
- client = Client()
- assert client._api_token == "CONFIG_TOKEN"
-
- def test_client_token_priotizes_arg_over_config(self, cloud_api):
- with set_temporary_config({"cloud.auth_token": "CONFIG_TOKEN"}):
- client = Client(api_token="ARG_TOKEN")
- assert client._api_token == "ARG_TOKEN"
-
- def test_save_local_settings(self, cloud_api):
- with tempfile.TemporaryDirectory() as tmp:
- with set_temporary_config({"home_dir": tmp, "cloud.graphql": "xyz"}):
- path = Path(tmp) / "client" / "xyz" / "settings.toml"
-
- client = Client(api_token="a")
- client.save_api_token()
- with path.open("r") as f:
- assert toml.load(f)["api_token"] == "a"
-
- client = Client(api_token="b")
- client.save_api_token()
- with path.open("r") as f:
- assert toml.load(f)["api_token"] == "b"
-
- def test_load_local_api_token_is_called_when_the_client_is_initialized_without_token(
- self, cloud_api
- ):
- with tempfile.TemporaryDirectory() as tmp:
- with set_temporary_config({"home_dir": tmp}):
- client = Client(api_token="a")
- client._init_tenant()
- client.save_api_token()
-
- client = Client(api_token="b")
- assert client._api_token == "b"
- client_local_api = Client()
- client_local_api._init_tenant()
- assert client_local_api._api_token == "a"
-
-
-class TestTenantAuth:
- def test_login_to_tenant_requires_argument(self, cloud_api):
- client = Client()
- with pytest.raises(ValueError, match="At least one"):
- client.login_to_tenant()
-
- def test_login_to_tenant_requires_valid_uuid(self, cloud_api):
- client = Client()
- with pytest.raises(ValueError, match="valid UUID"):
- client.login_to_tenant(tenant_id="a")
-
- def test_login_to_client_sets_access_token(self, patch_post, cloud_api):
- tenant_id = str(uuid.uuid4())
- post = patch_post(
- {
- "data": {
- "tenant": [{"id": tenant_id}],
- "switch_tenant": {
- "access_token": "ACCESS_TOKEN",
- "expires_at": "2100-01-01",
- "refresh_token": "REFRESH_TOKEN",
- },
- }
- }
- )
- client = Client()
- assert client._access_token is None
- assert client._refresh_token is None
- client.login_to_tenant(tenant_id=tenant_id)
- assert client._access_token == "ACCESS_TOKEN"
- assert client._refresh_token == "REFRESH_TOKEN"
-
- def test_login_uses_api_token(self, patch_post, cloud_api):
- tenant_id = str(uuid.uuid4())
- post = patch_post(
- {
- "data": {
- "tenant": [{"id": tenant_id}],
- "switch_tenant": {
- "access_token": "ACCESS_TOKEN",
- "expires_at": "2100-01-01",
- "refresh_token": "REFRESH_TOKEN",
- },
- }
- }
- )
- client = Client(api_token="api")
- client.login_to_tenant(tenant_id=tenant_id)
- assert post.call_args[1]["headers"] == {
- "Authorization": "Bearer api",
- "X-PREFECT-CORE-VERSION": str(prefect.__version__),
- }
-
- def test_login_uses_api_token_when_access_token_is_set(self, patch_post, cloud_api):
- tenant_id = str(uuid.uuid4())
- post = patch_post(
- {
- "data": {
- "tenant": [{"id": tenant_id}],
- "switch_tenant": {
- "access_token": "ACCESS_TOKEN",
- "expires_at": "2100-01-01",
- "refresh_token": "REFRESH_TOKEN",
- },
- }
- }
- )
- client = Client(api_token="api")
- client._access_token = "access"
- client.login_to_tenant(tenant_id=tenant_id)
- assert client.get_auth_token() == "ACCESS_TOKEN"
- assert post.call_args[1]["headers"] == {
- "Authorization": "Bearer api",
- "X-PREFECT-CORE-VERSION": str(prefect.__version__),
- }
-
- def test_graphql_uses_access_token_after_login(self, patch_post, cloud_api):
- tenant_id = str(uuid.uuid4())
- post = patch_post(
- {
- "data": {
- "tenant": [{"id": tenant_id}],
- "switch_tenant": {
- "access_token": "ACCESS_TOKEN",
- "expires_at": "2100-01-01",
- "refresh_token": "REFRESH_TOKEN",
- },
- }
- }
- )
- client = Client(api_token="api")
- client.graphql({})
- assert client.get_auth_token() == "api"
- assert post.call_args[1]["headers"] == {
- "Authorization": "Bearer api",
- "X-PREFECT-CORE-VERSION": str(prefect.__version__),
- }
-
- client.login_to_tenant(tenant_id=tenant_id)
- client.graphql({})
- assert client.get_auth_token() == "ACCESS_TOKEN"
- assert post.call_args[1]["headers"] == {
- "Authorization": "Bearer ACCESS_TOKEN",
- "X-PREFECT-CORE-VERSION": str(prefect.__version__),
- }
-
- def test_login_to_tenant_writes_tenant_and_reloads_it_when_token_is_reloaded(
- self, patch_post, cloud_api
- ):
- tenant_id = str(uuid.uuid4())
- post = patch_post(
- {
- "data": {
- "tenant": [{"id": tenant_id}],
- "switch_tenant": {
- "access_token": "ACCESS_TOKEN",
- "expires_at": "2100-01-01",
- "refresh_token": "REFRESH_TOKEN",
- },
- }
- }
- )
-
- client = Client(api_token="abc")
- assert client.tenant_id is None
- client.login_to_tenant(tenant_id=tenant_id)
- client.save_api_token()
- assert client.active_tenant_id == tenant_id
-
- # new client loads the active tenant and token
- client_load_active_tenant = Client()
- # The tenant is initialized by calling the property active_tenant_id
- assert client_load_active_tenant.active_tenant_id == tenant_id
- assert client_load_active_tenant._api_token == "abc"
-
- def test_login_to_client_doesnt_reload_active_tenant_when_token_isnt_loaded(
- self, patch_post, cloud_api
- ):
- tenant_id = str(uuid.uuid4())
- post = patch_post(
- {
- "data": {
- "tenant": [{"id": tenant_id}],
- "switch_tenant": {
- "access_token": "ACCESS_TOKEN",
- "expires_at": "2100-01-01",
- "refresh_token": "REFRESH_TOKEN",
- },
- }
- }
- )
-
- client = Client(api_token="abc")
- assert client.tenant_id is None
- client.login_to_tenant(tenant_id=tenant_id)
- assert client.tenant_id == tenant_id
-
- # new client doesn't load the active tenant because there's no api token loaded
- client = Client()
- client._init_tenant()
- assert client._tenant_id is None
- # Note: Using `.tenant_id` here would active api_key logic
-
- def test_logout_clears_access_token_and_tenant(self, patch_post, cloud_api):
- tenant_id = str(uuid.uuid4())
- post = patch_post(
- {
- "data": {
- "tenant": [{"id": tenant_id}],
- "switch_tenant": {
- "access_token": "ACCESS_TOKEN",
- "expires_at": "2100-01-01",
- "refresh_token": "REFRESH_TOKEN",
- },
- }
- }
- )
- client = Client(api_token="TOKEN")
- client.login_to_tenant(tenant_id=tenant_id)
-
- assert client._access_token is not None
- assert client._refresh_token is not None
- assert client.tenant_id is not None
-
- client.logout_from_tenant()
-
- assert client._access_token is None
- assert client._refresh_token is None
- assert client.tenant_id is None
-
- # new client doesn't load the active tenant
- assert Client(api_token="TOKEN").tenant_id is None
-
- def test_refresh_token_sets_attributes(self, patch_post, cloud_api):
- patch_post(
- {
- "data": {
- "refresh_token": {
- "access_token": "ACCESS_TOKEN",
- "expires_at": "2100-01-01",
- "refresh_token": "REFRESH_TOKEN",
- }
- }
- }
- )
- client = Client()
- assert client._access_token is None
- assert client._refresh_token is None
-
- # add buffer because Windows doesn't compare milliseconds
- assert client._access_token_expires_at < pendulum.now().add(seconds=1)
- client._refresh_access_token()
- assert client._access_token == "ACCESS_TOKEN"
- assert client._refresh_token == "REFRESH_TOKEN"
- assert client._access_token_expires_at > pendulum.now().add(seconds=599)
-
- def test_refresh_token_passes_access_token_as_arg(self, patch_post, cloud_api):
- post = patch_post(
- {
- "data": {
- "refresh_token": {
- "access_token": "ACCESS_TOKEN",
- "expires_at": "2100-01-01",
- "refresh_token": "REFRESH_TOKEN",
- }
- }
- }
- )
- client = Client()
- client._access_token = "access"
- client._refresh_access_token()
- variables = json.loads(post.call_args[1]["json"]["variables"])
- assert variables["input"]["access_token"] == "access"
-
- def test_refresh_token_passes_refresh_token_as_header(self, patch_post, cloud_api):
- post = patch_post(
- {
- "data": {
- "refresh_token": {
- "access_token": "ACCESS_TOKEN",
- "expires_at": "2100-01-01",
- "refresh_token": "REFRESH_TOKEN",
- }
- }
- }
- )
- client = Client()
- client._refresh_token = "refresh"
- client._refresh_access_token()
- assert post.call_args[1]["headers"] == {
- "Authorization": "Bearer refresh",
- "X-PREFECT-CORE-VERSION": str(prefect.__version__),
- }
-
- def test_get_available_tenants(self, patch_post, cloud_api):
- tenants = [
- {"id": "a", "name": "a-name", "slug": "a-slug"},
- {"id": "b", "name": "b-name", "slug": "b-slug"},
- {"id": "c", "name": "c-name", "slug": "c-slug"},
- ]
- post = patch_post({"data": {"tenant": tenants}})
- client = Client()
- gql_tenants = client.get_available_tenants()
- assert gql_tenants == tenants
-
- def test_get_auth_token_returns_api_if_access_token_not_set(self, cloud_api):
- client = Client(api_token="api")
- assert client._access_token is None
- assert client.get_auth_token() == "api"
-
- def test_get_auth_token_returns_access_token_if_set(self, cloud_api):
- client = Client(api_token="api")
- client._access_token = "access"
- assert client.get_auth_token() == "access"
-
- def test_get_auth_token_refreshes_if_refresh_token_and_expiration_within_30_seconds(
- self, monkeypatch, cloud_api
- ):
- refresh_token = MagicMock()
- monkeypatch.setattr("prefect.Client._refresh_access_token", refresh_token)
- client = Client(api_token="api")
- client._access_token = "access"
- client._refresh_token = "refresh"
- client._access_token_expires_at = pendulum.now().add(seconds=29)
- client.get_auth_token()
- assert refresh_token.called
-
- def test_get_auth_token_refreshes_if_refresh_token_and_no_expiration(
- self, monkeypatch, cloud_api
- ):
- refresh_token = MagicMock()
- monkeypatch.setattr("prefect.Client._refresh_access_token", refresh_token)
- client = Client(api_token="api")
- client._access_token = "access"
- client._refresh_token = "refresh"
- client._access_token_expires_at = None
- client.get_auth_token()
- assert refresh_token.called
-
- def test_get_auth_token_doesnt_refresh_if_refresh_token_and_future_expiration(
- self, monkeypatch, cloud_api
- ):
- refresh_token = MagicMock()
- monkeypatch.setattr("prefect.Client._refresh_access_token", refresh_token)
- client = Client(api_token="api")
- client._access_token = "access"
- client._refresh_token = "refresh"
- client._access_token_expires_at = pendulum.now().add(minutes=10)
- assert client.get_auth_token() == "access"
- refresh_token.assert_not_called()
-
- def test_client_clears_active_tenant_if_login_fails_on_initialization(
- self, patch_post, cloud_api
- ):
- post = patch_post(
- {
- "errors": [
- {
- "message": "",
- "locations": [],
- "path": ["tenant"],
- "extensions": {"code": "UNAUTHENTICATED"},
- }
- ]
- }
- )
-
- # create a client just so we can use its settings methods to store settings
- client = Client()
- settings = client._load_local_settings()
- settings.update(api_token="API_TOKEN", active_tenant_id=str(uuid.uuid4()))
- client._save_local_settings(settings)
-
- # this initialization will fail with the patched error
- client = Client()
- client._init_tenant()
- settings = client._load_local_settings()
- assert "active_tenant_id" not in settings
-
- def test_client_infers_correct_tenant_if_a_token_is_not_user_scoped(
- self, patch_posts, cloud_api
- ):
- patch_posts(
- [
- # First, raise an UNAUTHENTICATED error
- {
- "errors": [
- {
- "message": "",
- "locations": [],
- "path": ["tenant"],
- "extensions": {"code": "UNAUTHENTICATED"},
- }
- ]
- },
- # Then, return a tenant id
- {"data": {"tenant": [{"id": "tenant-id"}]}},
- ]
- )
-
- # create a client just so we can use its settings methods to store settings
- disk_tenant = str(uuid.uuid4())
- client = Client()
- client._save_local_settings(
- dict(api_token="API_TOKEN", active_tenant_id=disk_tenant)
- )
-
- # this initialization will fail to login to the active tenant then load the
- # correct tenant from the API
- client = Client(api_token="API_TOKEN")
- client._init_tenant()
- assert client._tenant_id == "tenant-id"
-
- # Disk is unchanged
- settings = client._load_local_settings()
- assert settings["active_tenant_id"] == disk_tenant
-
- @pytest.mark.parametrize("tenants", ([], [{"id": "1"}, {"id": "2"}]))
- def test_client_throws_error_during_inference_if_non_single_tenant_is_returned(
- self, patch_posts, cloud_api, tenants
- ):
- patch_posts(
- [
- # First, raise an UNAUTHENTICATED error
- {
- "errors": [
- {
- "message": "",
- "locations": [],
- "path": ["tenant"],
- "extensions": {"code": "UNAUTHENTICATED"},
- }
- ]
- },
- # Then, return tenant ids
- {"data": {"tenant": tenants}},
- ]
- )
-
- # create a client just so we can use its settings methods to store settings
- client = Client()
- client._save_local_settings(
- dict(api_token="API_TOKEN", active_tenant_id=str(uuid.uuid4()))
- )
-
- # this initialization will fail to login to the active tenant then load the
- # correct tenant from the API
- client = Client(api_token="API_TOKEN")
- with pytest.raises(ValueError, match="Failed to authorize"):
- client._init_tenant()
-
-
-class TestPassingHeadersAndTokens:
- def test_headers_are_passed_to_get(self, monkeypatch, cloud_api):
- get = MagicMock()
- session = MagicMock()
- session.return_value.get = get
- monkeypatch.setattr("requests.Session", session)
- with set_temporary_config(
- {"cloud.graphql": "http://my-cloud.foo", "cloud.auth_token": "secret_token"}
- ):
- client = Client()
- client.get("/foo/bar", headers={"x": "y", "Authorization": "z"})
- assert get.called
- assert get.call_args[1]["headers"] == {
- "x": "y",
- "Authorization": "Bearer secret_token",
- "X-PREFECT-CORE-VERSION": str(prefect.__version__),
- }
-
- def test_headers_are_passed_to_post(self, monkeypatch, cloud_api):
- post = MagicMock()
- session = MagicMock()
- session.return_value.post = post
- monkeypatch.setattr("requests.Session", session)
- with set_temporary_config(
- {"cloud.graphql": "http://my-cloud.foo", "cloud.auth_token": "secret_token"}
- ):
- client = Client()
- client.post("/foo/bar", headers={"x": "y", "Authorization": "z"})
- assert post.called
- assert post.call_args[1]["headers"] == {
- "x": "y",
- "Authorization": "Bearer secret_token",
- "X-PREFECT-CORE-VERSION": str(prefect.__version__),
- }
-
- def test_headers_are_passed_to_graphql(self, monkeypatch, cloud_api):
- post = MagicMock()
- session = MagicMock()
- session.return_value.post = post
- monkeypatch.setattr("requests.Session", session)
- with set_temporary_config(
- {"cloud.graphql": "http://my-cloud.foo", "cloud.auth_token": "secret_token"}
- ):
- client = Client()
- client.graphql("query {}", headers={"x": "y", "Authorization": "z"})
- assert post.called
- assert post.call_args[1]["headers"] == {
- "x": "y",
- "Authorization": "Bearer secret_token",
- "X-PREFECT-CORE-VERSION": str(prefect.__version__),
- }
-
- def test_tokens_are_passed_to_get(self, monkeypatch, cloud_api):
- get = MagicMock()
- session = MagicMock()
- session.return_value.get = get
- monkeypatch.setattr("requests.Session", session)
- with set_temporary_config({"cloud.graphql": "http://my-cloud.foo"}):
- client = Client()
- client.get("/foo/bar", token="secret_token")
- assert get.called
- assert get.call_args[1]["headers"] == {
- "Authorization": "Bearer secret_token",
- "X-PREFECT-CORE-VERSION": str(prefect.__version__),
- }
-
- def test_tokens_are_passed_to_post(self, monkeypatch, cloud_api):
- post = MagicMock()
- session = MagicMock()
- session.return_value.post = post
- monkeypatch.setattr("requests.Session", session)
- with set_temporary_config({"cloud.graphql": "http://my-cloud.foo"}):
- client = Client()
- client.post("/foo/bar", token="secret_token")
- assert post.called
- assert post.call_args[1]["headers"] == {
- "Authorization": "Bearer secret_token",
- "X-PREFECT-CORE-VERSION": str(prefect.__version__),
- }
-
- def test_tokens_are_passed_to_graphql(self, monkeypatch, cloud_api):
- post = MagicMock()
- session = MagicMock()
- session.return_value.post = post
- monkeypatch.setattr("requests.Session", session)
- with set_temporary_config({"cloud.graphql": "http://my-cloud.foo"}):
- client = Client()
- client.graphql("query {}", token="secret_token")
- assert post.called
- assert post.call_args[1]["headers"] == {
- "Authorization": "Bearer secret_token",
- "X-PREFECT-CORE-VERSION": str(prefect.__version__),
- }
diff --git a/tests/client/test_secrets.py b/tests/client/test_secrets.py
index 759dbd09e582..17a49f25b4ef 100644
--- a/tests/client/test_secrets.py
+++ b/tests/client/test_secrets.py
@@ -51,7 +51,7 @@ def test_secret_value_depends_on_use_local_secrets(monkeypatch):
secret = Secret(name="test")
with set_temporary_config(
- {"cloud.use_local_secrets": False, "cloud.auth_token": None}
+ {"cloud.use_local_secrets": False, "cloud.api_key": None}
):
with prefect.context(secrets=dict()):
with pytest.raises(ClientError):
@@ -65,7 +65,7 @@ def test_secrets_use_client(monkeypatch, cloud_api):
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
with set_temporary_config(
- {"cloud.auth_token": "secret_token", "cloud.use_local_secrets": False}
+ {"cloud.api_key": "api-key", "cloud.use_local_secrets": False}
):
my_secret = Secret(name="the-key")
val = my_secret.get()
@@ -79,7 +79,7 @@ def test_cloud_secrets_use_context_first(monkeypatch):
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
with set_temporary_config(
- {"cloud.auth_token": "secret_token", "cloud.use_local_secrets": False}
+ {"cloud.api_key": "api-key", "cloud.use_local_secrets": False}
):
with prefect.context(secrets={"the-key": "foo"}):
my_secret = Secret(name="the-key")
@@ -94,7 +94,7 @@ def test_cloud_secrets_use_context_first_but_fallback_to_client(monkeypatch, clo
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
with set_temporary_config(
- {"cloud.auth_token": "secret_token", "cloud.use_local_secrets": False}
+ {"cloud.api_key": "api-key", "cloud.use_local_secrets": False}
):
with prefect.context(secrets={}):
my_secret = Secret(name="the-key")
@@ -109,7 +109,7 @@ def test_cloud_secrets_remain_plain_dictionaries(monkeypatch, cloud_api):
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
with set_temporary_config(
- {"cloud.auth_token": "secret_token", "cloud.use_local_secrets": False}
+ {"cloud.api_key": "api-key", "cloud.use_local_secrets": False}
):
my_secret = Secret(name="the-key")
val = my_secret.get()
@@ -128,7 +128,7 @@ def test_cloud_secrets_auto_load_json_strings(monkeypatch, cloud_api):
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
with set_temporary_config(
- {"cloud.auth_token": "secret_token", "cloud.use_local_secrets": False}
+ {"cloud.api_key": "api-key", "cloud.use_local_secrets": False}
):
my_secret = Secret(name="the-key")
val = my_secret.get()
diff --git a/tests/conftest.py b/tests/conftest.py
index de415a8ded9b..42ec645ba63a 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -175,12 +175,6 @@ def patch(responses):
return patch
-@pytest.fixture()
-def runner_token(monkeypatch):
- monkeypatch.setattr("prefect.agent.agent.Agent._verify_token", MagicMock())
- monkeypatch.setattr("prefect.agent.agent.Agent._register_agent", MagicMock())
-
-
@pytest.fixture()
def cloud_api():
with prefect.utilities.configuration.set_temporary_config(
@@ -214,6 +208,17 @@ def running_with_backend():
yield
+@pytest.fixture
+def config_with_api_key(cloud_api):
+ with prefect.utilities.configuration.set_temporary_config(
+ {
+ "cloud.api_key": "TEST_KEY",
+ "cloud.tenant_id": "fa68f96e-0c80-4e0d-9c2a-e11452f1d786",
+ }
+ ) as config:
+ yield config
+
+
# ----------------
# set up platform fixtures
# for every test that performs OS dependent logic
diff --git a/tests/core/test_flow.py b/tests/core/test_flow.py
index 3e44a2d65b04..2368b582079f 100644
--- a/tests/core/test_flow.py
+++ b/tests/core/test_flow.py
@@ -46,7 +46,6 @@
TriggerFailed,
TimedOut,
)
-from prefect.environments.execution import LocalEnvironment
from prefect.run_configs import LocalRun, UniversalRun
from prefect.schedules.clocks import ClockEvent
from prefect.tasks.core.function import FunctionTask
@@ -164,11 +163,6 @@ def test_create_flow_with_storage_and_result(self):
assert f2.result != f2.storage.result
assert f2.result == result
- def test_create_flow_with_environment(self):
- env = prefect.environments.LocalEnvironment()
- f2 = Flow(name="test", environment=env)
- assert f2.environment is env
-
def test_create_flow_auto_generates_tasks(self):
with Flow("auto") as f:
res = AddTask()(x=1, y=2)
@@ -2771,11 +2765,9 @@ def test_flow_register_sets_universal_run_if_empty(self, monkeypatch):
monkeypatch.setattr("prefect.Client", MagicMock())
f = Flow(name="test")
- f.environment = None
f.register("My-project", build=False)
assert isinstance(f.run_config, UniversalRun)
- @pytest.mark.parametrize("kind", ["environment", "run_config"])
@pytest.mark.parametrize(
"storage",
[
@@ -2786,18 +2778,17 @@ def test_flow_register_sets_universal_run_if_empty(self, monkeypatch):
],
)
def test_flow_register_auto_labels_if_labeled_storage_used(
- self, monkeypatch, storage, kind
+ self,
+ monkeypatch,
+ storage,
):
monkeypatch.setattr("prefect.Client", MagicMock())
f = Flow(name="Test me!! I should get labeled", storage=storage)
- if kind == "run_config":
- obj = f.run_config = LocalRun(labels=["test-label"])
- else:
- obj = f.environment = LocalEnvironment(labels=["test-label"])
+ run_config = f.run_config = LocalRun(labels=["test-label"])
f.register("My-project", build=False)
- assert obj.labels == {"test-label", *storage.labels}
+ assert run_config.labels == {"test-label", *storage.labels}
@pytest.mark.parametrize(
"storage",
@@ -2837,7 +2828,7 @@ def test_flow_register_doesnt_overwrite_labels_if_local_storage_is_used(
monkeypatch.setattr("prefect.Client", MagicMock())
f = Flow(
name="test",
- environment=prefect.environments.LocalEnvironment(labels=["foo"]),
+ run_config=prefect.run_configs.LocalRun(labels=["foo"]),
)
assert f.storage is None
@@ -2847,8 +2838,8 @@ def test_flow_register_doesnt_overwrite_labels_if_local_storage_is_used(
f.register("My-project")
assert isinstance(f.storage, prefect.storage.Local)
- assert "foo" in f.environment.labels
- assert len(f.environment.labels) == 2
+ assert "foo" in f.run_config.labels
+ assert len(f.run_config.labels) == 2
def test_flow_register_errors_if_in_flow_context(self):
with pytest.raises(ValueError) as exc:
@@ -2858,15 +2849,6 @@ def test_flow_register_errors_if_in_flow_context(self):
exc.value
)
- def test_flow_register_warns_if_mixing_environment_and_executor(self, monkeypatch):
- monkeypatch.setattr("prefect.Client", MagicMock())
- flow = Flow(
- name="test", environment=LocalEnvironment(), executor=LocalExecutor()
- )
-
- with pytest.warns(UserWarning, match="This flow is using the deprecated"):
- flow.register("testing", build=False)
-
def test_bad_flow_runner_code_still_returns_state_obj():
class BadFlowRunner(prefect.engine.flow_runner.FlowRunner):
@@ -3245,17 +3227,13 @@ def return_x(x, param):
}
-@pytest.mark.parametrize("kind", ["environment", "run_config"])
-def test_run_agent_passes_flow_labels(monkeypatch, kind):
+def test_run_agent_passes_flow_labels(monkeypatch):
agent = MagicMock()
monkeypatch.setattr("prefect.agent.local.LocalAgent", agent)
labels = ["test", "test", "test2"]
f = Flow("test")
- if kind == "run_config":
- f.run_config = LocalRun(labels=labels)
- else:
- f.environment = LocalEnvironment(labels=labels)
+ f.run_config = LocalRun(labels=labels)
f.run_agent()
assert type(agent.call_args[1]["labels"]) is list
diff --git a/tests/core/test_parameter.py b/tests/core/test_parameter.py
index 111de229a596..1ddec221f2c8 100644
--- a/tests/core/test_parameter.py
+++ b/tests/core/test_parameter.py
@@ -101,19 +101,6 @@ def test_copy_requires_name():
x.copy()
-def test_deprecated_parameter_in_task_module():
- """
- Deprecated test that asserts that backwards compatible access works after 0.12
- Can be removed once the backwards compatibility is no longer maintained.
- """
- from prefect.core.task import Parameter as OldParameter
-
- with pytest.warns(UserWarning, match="please import as"):
- p = OldParameter("hello")
-
- assert isinstance(p, Parameter)
-
-
class TestDateTimeParameter:
@prefect.task
def return_value(x):
diff --git a/tests/engine/cloud/test_cloud_flow_runner.py b/tests/engine/cloud/test_cloud_flow_runner.py
index 39cf879e5d21..3542e9609ac7 100644
--- a/tests/engine/cloud/test_cloud_flow_runner.py
+++ b/tests/engine/cloud/test_cloud_flow_runner.py
@@ -457,7 +457,7 @@ def test_cloud_task_runners_submitted_to_remote_machines_respect_original_config
def my_run_task(*args, **kwargs):
with prefect.utilities.configuration.set_temporary_config(
- {"cloud.send_flow_run_logs": False, "cloud.auth_token": ""}
+ {"cloud.send_flow_run_logs": False, "cloud.api_key": ""}
):
return run_task(*args, **kwargs)
@@ -491,14 +491,14 @@ def log_stuff():
logger.critical("important log right here")
return (
prefect.context.config.special_key,
- prefect.context.config.cloud.auth_token,
+ prefect.context.config.cloud.api_key,
)
with prefect.utilities.configuration.set_temporary_config(
{
"cloud.send_flow_run_logs": True,
"special_key": 42,
- "cloud.auth_token": "original",
+ "cloud.api_key": "original",
}
):
# captures config at init
diff --git a/tests/engine/cloud/test_cloud_flows.py b/tests/engine/cloud/test_cloud_flows.py
index 1b9669c62510..46796c4a9d64 100644
--- a/tests/engine/cloud/test_cloud_flows.py
+++ b/tests/engine/cloud/test_cloud_flows.py
@@ -83,7 +83,7 @@ def cloud_settings():
with set_temporary_config(
{
"cloud.graphql": "http://my-cloud.foo",
- "cloud.auth_token": "token",
+ "cloud.api_key": "api-key",
"cloud.queue_interval": 0.1,
"engine.flow_runner.default_class": "prefect.engine.cloud.CloudFlowRunner",
"engine.task_runner.default_class": "prefect.engine.cloud.CloudTaskRunner",
diff --git a/tests/engine/cloud/test_cloud_task_runner.py b/tests/engine/cloud/test_cloud_task_runner.py
index f9288f3dbafb..723c66f00ba9 100644
--- a/tests/engine/cloud/test_cloud_task_runner.py
+++ b/tests/engine/cloud/test_cloud_task_runner.py
@@ -43,7 +43,7 @@ def cloud_settings():
{
"engine.flow_runner.default_class": "prefect.engine.cloud.CloudFlowRunner",
"engine.task_runner.default_class": "prefect.engine.cloud.CloudTaskRunner",
- "cloud.auth_token": "token",
+ "cloud.api_key": "api-key",
}
):
yield
diff --git a/tests/engine/test_serializers.py b/tests/engine/test_serializers.py
index 10aa3abc2f9f..c19095d031aa 100644
--- a/tests/engine/test_serializers.py
+++ b/tests/engine/test_serializers.py
@@ -137,8 +137,8 @@ def test_constructor_accepts_standard_formats(self, format) -> None:
)
def test_constructor_rejects_missing_format_libs(self) -> None:
- with pytest.raises(ImportError, match="'foo' is not installed"):
- CompressedSerializer(PickleSerializer(), format="foo")
+ with pytest.raises(ImportError, match="'foobar' is not installed"):
+ CompressedSerializer(PickleSerializer(), format="foobar")
def test_constructor_rejects_format_libs_without_compression(self) -> None:
with pytest.raises(
diff --git a/tests/engine/test_task_runner.py b/tests/engine/test_task_runner.py
index c9a922d53b67..647fc9817cd5 100644
--- a/tests/engine/test_task_runner.py
+++ b/tests/engine/test_task_runner.py
@@ -137,13 +137,10 @@ def test_task_with_error_has_helpful_messages(caplog):
task_runner = TaskRunner(task=ErrorTask())
state = task_runner.run()
assert state.is_failed()
- exc_repr = (
- # Support py3.6 exception reprs
- "ValueError('custom-error-message',)"
- if sys.version_info < (3, 7)
- else "ValueError('custom-error-message')"
+ assert (
+ state.message
+ == f"Error during execution of task: ValueError('custom-error-message')"
)
- assert state.message == f"Error during execution of task: {exc_repr}"
assert "ValueError: custom-error-message" in caplog.text
assert "Traceback" in caplog.text # Traceback should be included
assert (
diff --git a/tests/environments/__init__.py b/tests/environments/__init__.py
deleted file mode 100644
index 5871ed8eef2f..000000000000
--- a/tests/environments/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-import pytest
diff --git a/tests/environments/execution/__init__.py b/tests/environments/execution/__init__.py
deleted file mode 100644
index bfcf540f0eb5..000000000000
--- a/tests/environments/execution/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-import pytest
-
-pytest.importorskip("boto3")
-pytest.importorskip("botocore")
-pytest.importorskip("dask_kubernetes")
-pytest.importorskip("kubernetes")
-pytest.importorskip("yaml")
diff --git a/tests/environments/execution/test_base_environment.py b/tests/environments/execution/test_base_environment.py
deleted file mode 100644
index 34fa7dac3741..000000000000
--- a/tests/environments/execution/test_base_environment.py
+++ /dev/null
@@ -1,119 +0,0 @@
-import os
-from unittest.mock import MagicMock
-
-import pytest
-
-import prefect
-from prefect import Flow
-from prefect.environments import Environment
-from prefect.environments.execution import load_and_run_flow
-from prefect.storage import Docker, Local, Storage
-from prefect.utilities.configuration import set_temporary_config
-from prefect.utilities.graphql import GraphQLResult
-
-
-def test_create_environment():
- environment = Environment()
- assert environment
- assert environment.labels == set()
- assert environment.on_start is None
- assert environment.on_exit is None
- assert environment.metadata == {}
- assert environment.logger.name == "prefect.Environment"
-
-
-def test_create_environment_converts_labels_to_set():
- environment = Environment(labels=["a", "b", "a"])
- assert environment
- assert environment.labels == set(["a", "b"])
- assert environment.logger.name == "prefect.Environment"
-
-
-def test_create_environment_metadata():
- environment = Environment(metadata={"test": "here"})
- assert environment
- assert environment.metadata == {"test": "here"}
-
-
-def test_create_environment_callbacks():
- def f():
- pass
-
- environment = Environment(on_start=f, on_exit=f)
- assert environment.on_start is f
- assert environment.on_exit is f
-
-
-def test_environment_dependencies():
- environment = Environment()
- assert environment.dependencies == []
-
-
-def test_setup_environment_passes():
- environment = Environment()
- environment.setup(flow=Flow("test", storage=Docker()))
- assert environment
-
-
-def test_execute_environment_passes():
- environment = Environment()
- environment.execute(flow=Flow("test", storage=Docker()))
- assert environment
-
-
-def test_serialize_environment():
- environment = Environment()
- env = environment.serialize()
- assert env["type"] == "Environment"
-
-
-def test_load_and_run_flow(monkeypatch, tmpdir):
- myflow = Flow("test-flow")
-
- # This is gross. Since the flow is pickled/unpickled, there's no easy way
- # to access the same object to set a flag. Resort to setting an environment
- # variable as a global flag that won't get copied eagerly through
- # cloudpickle.
- monkeypatch.setenv("TEST_RUN_CALLED", "FALSE")
-
- class MyEnvironment(Environment):
- def run(self, flow):
- assert flow is myflow
- os.environ["TEST_RUN_CALLED"] = "TRUE"
-
- myflow.environment = MyEnvironment()
-
- storage = Local(str(tmpdir))
- myflow.storage = storage
- storage.add_flow(myflow)
-
- gql_return = MagicMock(
- return_value=MagicMock(
- data=MagicMock(
- flow_run=[
- GraphQLResult(
- {
- "flow": GraphQLResult(
- {"name": myflow.name, "storage": storage.serialize()}
- )
- }
- )
- ],
- )
- )
- )
- client = MagicMock()
- client.return_value.graphql = gql_return
- monkeypatch.setattr("prefect.environments.execution.base.Client", client)
-
- with set_temporary_config({"cloud.auth_token": "test"}), prefect.context(
- {"flow_run_id": "id"}
- ):
- load_and_run_flow()
- assert os.environ["TEST_RUN_CALLED"] == "TRUE"
-
-
-def test_load_and_run_flow_no_flow_run_id_in_context(monkeypatch, tmpdir):
- with set_temporary_config({"cloud.auth_token": "test"}):
- with pytest.raises(ValueError):
- load_and_run_flow()
diff --git a/tests/environments/execution/test_dask_cloud_provider.py b/tests/environments/execution/test_dask_cloud_provider.py
deleted file mode 100644
index 599418889d89..000000000000
--- a/tests/environments/execution/test_dask_cloud_provider.py
+++ /dev/null
@@ -1,109 +0,0 @@
-import os
-import tempfile
-
-import cloudpickle
-import pytest
-
-pytest.importorskip("dask_cloudprovider")
-pytest.importorskip("dask_cloudprovider.aws")
-
-from distributed.deploy import Cluster
-
-from prefect.environments.execution import DaskCloudProviderEnvironment
-
-from dask_cloudprovider.aws import FargateCluster
-
-
-def test_create_environment():
- environment = DaskCloudProviderEnvironment(Cluster)
- assert environment
-
-
-def test_create_dask_cloud_provider_environment():
- environment = DaskCloudProviderEnvironment(provider_class=FargateCluster)
- assert environment
- assert environment.executor_kwargs == {}
- assert environment.labels == set()
- assert environment._on_execute is None
- assert environment.on_start is None
- assert environment.on_exit is None
- assert environment.metadata == {}
- assert environment.logger.name == "prefect.DaskCloudProviderEnvironment"
-
-
-def test_create_dask_cloud_provider_environment_with_executor_kwargs():
- environment = DaskCloudProviderEnvironment(
- provider_class=FargateCluster, executor_kwargs={"test": "here"}
- )
- assert environment
- assert environment.executor_kwargs == {"test": "here"}
-
-
-def test_create_dask_cloud_provider_environment_labels():
- environment = DaskCloudProviderEnvironment(
- provider_class=FargateCluster, labels=["foo"]
- )
- assert environment
- assert environment.labels == set(["foo"])
-
-
-def test_create_dask_cloud_provider_environment_callbacks():
- def f():
- pass
-
- environment = DaskCloudProviderEnvironment(
- provider_class=FargateCluster,
- labels=["foo"],
- on_execute=f,
- on_start=f,
- on_exit=f,
- )
- assert environment
- assert environment.labels == set(["foo"])
- assert environment._on_execute is f
- assert environment.on_start is f
- assert environment.on_exit is f
-
-
-def test_dask_cloud_provider_environment_dependencies():
- environment = DaskCloudProviderEnvironment(provider_class=FargateCluster)
- assert environment.dependencies == ["dask_cloudprovider"]
-
-
-def test_create_dask_cloud_provider_environment_aws_creds_provided():
- environment = DaskCloudProviderEnvironment(
- provider_class=FargateCluster,
- labels=["foo"],
- aws_access_key_id="id",
- aws_secret_access_key="secret",
- aws_session_token="session",
- region_name="region",
- )
- assert environment
- assert environment.labels == set(["foo"])
- assert environment._provider_kwargs["aws_access_key_id"] == "id"
- assert environment._provider_kwargs["aws_secret_access_key"] == "secret"
- assert environment._provider_kwargs["aws_session_token"] == "session"
- assert environment._provider_kwargs["region_name"] == "region"
-
-
-def test_roundtrip_cloudpickle():
- with tempfile.TemporaryDirectory() as directory:
-
- with open(os.path.join(directory, "job.yaml"), "w+") as file:
- file.write("job")
-
- environment = DaskCloudProviderEnvironment(
- provider_class=FargateCluster,
- cluster="test",
- skip_cleanup=False,
- )
-
- assert environment._provider_kwargs == {
- "cluster": "test",
- "skip_cleanup": False,
- }
-
- new = cloudpickle.loads(cloudpickle.dumps(environment))
- assert isinstance(new, DaskCloudProviderEnvironment)
- assert new._provider_kwargs == {"cluster": "test", "skip_cleanup": False}
diff --git a/tests/environments/execution/test_dask_k8s_environment.py b/tests/environments/execution/test_dask_k8s_environment.py
deleted file mode 100644
index 6242ccf4745f..000000000000
--- a/tests/environments/execution/test_dask_k8s_environment.py
+++ /dev/null
@@ -1,648 +0,0 @@
-import os
-import tempfile
-from os import path
-from unittest.mock import MagicMock
-
-import cloudpickle
-import pytest
-import yaml
-
-import prefect
-from prefect.environments import DaskKubernetesEnvironment
-from prefect.storage import Docker, Local
-from prefect.utilities.configuration import set_temporary_config
-from prefect.utilities.graphql import GraphQLResult
-
-base_flow = prefect.Flow("test", storage=Docker())
-
-
-def test_create_dask_environment():
- environment = DaskKubernetesEnvironment()
- assert environment
- assert environment.min_workers == 1
- assert environment.max_workers == 2
- assert environment.work_stealing is True
- assert environment.scheduler_logs is False
- assert environment.private_registry is False
- assert environment.docker_secret is None
- assert environment.labels == set()
- assert environment.on_start is None
- assert environment.on_exit is None
- assert environment.metadata == {}
- assert environment.logger.name == "prefect.DaskKubernetesEnvironment"
- assert environment.image_pull_secret is None
-
-
-def test_create_dask_environment_args():
- environment = DaskKubernetesEnvironment(
- min_workers=5,
- max_workers=6,
- work_stealing=False,
- scheduler_logs=True,
- private_registry=True,
- docker_secret="docker",
- metadata={"test": "here"},
- image_pull_secret="secret",
- )
- assert environment
- assert environment.min_workers == 5
- assert environment.max_workers == 6
- assert environment.work_stealing is False
- assert environment.scheduler_logs is True
- assert environment.private_registry is True
- assert environment.docker_secret == "docker"
- assert environment.metadata == {"test": "here"}
- assert environment.image_pull_secret == "secret"
-
-
-def test_create_dask_environment_multiple_image_secrets_in_args():
- environment = DaskKubernetesEnvironment(
- min_workers=5,
- max_workers=6,
- work_stealing=False,
- scheduler_logs=True,
- private_registry=True,
- docker_secret="docker",
- metadata={"test": "here"},
- image_pull_secret="some-cred,different-cred",
- )
- assert environment.image_pull_secret == "some-cred,different-cred"
-
-
-def test_create_dask_environment_labels():
- environment = DaskKubernetesEnvironment(labels=["foo"])
- assert environment.labels == set(["foo"])
-
-
-def test_create_dask_environment_callbacks():
- def f():
- pass
-
- environment = DaskKubernetesEnvironment(labels=["foo"], on_start=f, on_exit=f)
- assert environment.labels == set(["foo"])
- assert environment.on_start is f
- assert environment.on_exit is f
-
-
-def test_dask_environment_dependencies():
- environment = DaskKubernetesEnvironment()
- assert environment.dependencies == ["kubernetes"]
-
-
-def test_create_dask_environment_identifier_label():
- environment = DaskKubernetesEnvironment()
- assert environment.identifier_label
-
-
-def test_create_dask_environment_identifier_label_none():
- environment = DaskKubernetesEnvironment()
- environment._identifier_label = None
- assert environment.identifier_label
-
-
-def test_setup_dask_environment_passes():
- environment = DaskKubernetesEnvironment()
- environment.setup(flow=base_flow)
- assert environment
-
-
-def test_setup_doesnt_pass_if_private_registry(monkeypatch):
- environment = DaskKubernetesEnvironment(private_registry=True)
- assert environment.docker_secret == "DOCKER_REGISTRY_CREDENTIALS"
-
- config = MagicMock()
- monkeypatch.setattr("kubernetes.config", config)
-
- v1 = MagicMock()
- v1.list_namespaced_secret.return_value = MagicMock(items=[])
- monkeypatch.setattr(
- "kubernetes.client", MagicMock(CoreV1Api=MagicMock(return_value=v1))
- )
-
- create_secret = MagicMock()
- monkeypatch.setattr(
- "prefect.environments.DaskKubernetesEnvironment._create_namespaced_secret",
- create_secret,
- )
- with set_temporary_config({"cloud.auth_token": "test"}):
- environment.setup(flow=base_flow)
-
- assert create_secret.called
-
-
-def test_create_secret_isnt_called_if_exists(monkeypatch):
- environment = DaskKubernetesEnvironment(private_registry=True)
-
- config = MagicMock()
- monkeypatch.setattr("kubernetes.config", config)
-
- secret = MagicMock()
- secret.metadata.name = "foo-docker"
- v1 = MagicMock()
- v1.list_namespaced_secret.return_value = MagicMock(items=[secret])
- monkeypatch.setattr(
- "kubernetes.client", MagicMock(CoreV1Api=MagicMock(return_value=v1))
- )
-
- create_secret = MagicMock()
- monkeypatch.setattr(
- "prefect.environments.DaskKubernetesEnvironment._create_namespaced_secret",
- create_secret,
- )
- with set_temporary_config({"cloud.auth_token": "test"}):
- with prefect.context(namespace="foo"):
- environment.setup(flow=base_flow)
-
- assert not create_secret.called
-
-
-def test_execute(monkeypatch):
- environment = DaskKubernetesEnvironment()
-
- config = MagicMock()
- monkeypatch.setattr("kubernetes.config", config)
-
- batchv1 = MagicMock()
- monkeypatch.setattr(
- "kubernetes.client", MagicMock(BatchV1Api=MagicMock(return_value=batchv1))
- )
-
- environment = DaskKubernetesEnvironment()
- storage = Docker(registry_url="test1", image_name="test2", image_tag="test3")
-
- flow = base_flow
- flow.storage = storage
- with set_temporary_config({"cloud.auth_token": "test"}):
- environment.execute(flow=flow)
-
- assert (
- batchv1.create_namespaced_job.call_args[1]["body"]["apiVersion"] == "batch/v1"
- )
-
-
-def test_create_namespaced_job_fails_outside_cluster():
- environment = DaskKubernetesEnvironment()
- storage = Docker(registry_url="test1", image_name="test2", image_tag="test3")
-
- with pytest.raises(EnvironmentError):
- with set_temporary_config({"cloud.auth_token": "test"}):
- flow = base_flow
- flow.storage = storage
- with set_temporary_config({"cloud.auth_token": "test"}):
- environment.execute(flow=flow)
-
-
-def test_environment_run(monkeypatch):
- from prefect.executors import DaskExecutor
-
- start_func = MagicMock()
- exit_func = MagicMock()
-
- flow = prefect.Flow("my-flow")
- flow.environment = DaskKubernetesEnvironment(
- on_start=start_func,
- on_exit=exit_func,
- min_workers=3,
- max_workers=5,
- )
-
- flow_runner = MagicMock()
- flow_runner_class = MagicMock(return_value=flow_runner)
- monkeypatch.setattr(
- "prefect.engine.get_default_flow_runner_class",
- MagicMock(return_value=flow_runner_class),
- )
-
- kube_cluster = MagicMock()
- kube_cluster.scheduler_address = "tcp://fake-address:8786"
- kube_cluster_class = MagicMock()
- kube_cluster_class.from_dict.return_value = kube_cluster
- monkeypatch.setattr("dask_kubernetes.KubeCluster", kube_cluster_class)
-
- with set_temporary_config({"cloud.auth_token": "test"}), prefect.context(
- {"flow_run_id": "id", "namespace": "mynamespace"}
- ):
- flow.environment.run(flow)
-
- # Flow runner creation
- assert flow_runner_class.call_args[1]["flow"] is flow
-
- # Kube cluster is created with proper config
- assert kube_cluster_class.from_dict.called
- assert kube_cluster_class.from_dict.call_args[1]["namespace"] == "mynamespace"
-
- # Kube cluster adapt is called with config
- assert kube_cluster.adapt.called
- assert kube_cluster.adapt.call_args[1]["minimum"] == 3
- assert kube_cluster.adapt.call_args[1]["maximum"] == 5
-
- # Flow runner run is called with proper executor
- assert flow_runner.run.called
- executor = flow_runner.run.call_args[1]["executor"]
- assert isinstance(executor, DaskExecutor)
- assert executor.address == kube_cluster.scheduler_address
-
- # start/exit callbacks are called
- assert start_func.called
- assert exit_func.called
-
-
-def test_populate_job_yaml():
- environment = DaskKubernetesEnvironment(
- work_stealing=True, scheduler_logs=True, log_k8s_errors=True
- )
-
- file_path = os.path.dirname(prefect.environments.execution.dask.k8s.__file__)
-
- with open(path.join(file_path, "job.yaml")) as job_file:
- job = yaml.safe_load(job_file)
-
- with set_temporary_config(
- {
- "cloud.graphql": "gql_test",
- "cloud.auth_token": "auth_test",
- "logging.extra_loggers": ["test_logger"],
- }
- ):
- with prefect.context(flow_run_id="id_test", namespace="namespace_test"):
- yaml_obj = environment._populate_job_yaml(
- yaml_obj=job, docker_name="test1/test2:test3"
- )
-
- assert yaml_obj["metadata"]["name"] == "prefect-dask-job-{}".format(
- environment.identifier_label
- )
- assert (
- yaml_obj["metadata"]["labels"]["prefect.io/identifier"]
- == environment.identifier_label
- )
- assert yaml_obj["metadata"]["labels"]["prefect.io/flow_run_id"] == "id_test"
- assert (
- yaml_obj["spec"]["template"]["metadata"]["labels"]["prefect.io/identifier"]
- == environment.identifier_label
- )
-
- env = yaml_obj["spec"]["template"]["spec"]["containers"][0]["env"]
-
- assert env[0]["value"] == "gql_test"
- assert env[1]["value"] == "auth_test"
- assert env[2]["value"] == "id_test"
- assert env[3]["value"] == "namespace_test"
- assert env[4]["value"] == "test1/test2:test3"
- assert env[12]["value"] == "True"
- assert (
- env[13]["value"]
- == "['test_logger', 'dask_kubernetes.core', 'distributed.deploy.adaptive', 'kubernetes', 'distributed.scheduler']"
- )
-
- assert (
- yaml_obj["spec"]["template"]["spec"]["containers"][0]["image"]
- == "test1/test2:test3"
- )
-
-
-def test_populate_job_yaml_multiple_image_secrets():
- environment = DaskKubernetesEnvironment(
- image_pull_secret="good-secret,dangerous-secret"
- )
-
- file_path = os.path.dirname(prefect.environments.execution.dask.k8s.__file__)
-
- with open(path.join(file_path, "job.yaml")) as job_file:
- job = yaml.safe_load(job_file)
-
- with set_temporary_config(
- {
- "cloud.graphql": "gql_test",
- "cloud.auth_token": "auth_test",
- "logging.extra_loggers": ["test_logger"],
- }
- ):
- with prefect.context(flow_run_id="id_test", namespace="namespace_test"):
- yaml_obj = environment._populate_job_yaml(
- yaml_obj=job, docker_name="test1/test2:test3"
- )
-
- expected_secrets = [dict(name="good-secret"), dict(name="dangerous-secret")]
- assert yaml_obj["spec"]["template"]["spec"]["imagePullSecrets"] == expected_secrets
-
-
-def test_populate_worker_pod_yaml():
- environment = DaskKubernetesEnvironment()
-
- file_path = os.path.dirname(prefect.environments.execution.dask.k8s.__file__)
-
- with open(path.join(file_path, "worker_pod.yaml")) as pod_file:
- pod = yaml.safe_load(pod_file)
-
- with set_temporary_config(
- {
- "cloud.graphql": "gql_test",
- "cloud.auth_token": "auth_test",
- "logging.extra_loggers": ["test_logger"],
- }
- ):
- with prefect.context(flow_run_id="id_test", image="my_image"):
- yaml_obj = environment._populate_worker_pod_yaml(yaml_obj=pod)
-
- assert (
- yaml_obj["metadata"]["labels"]["prefect.io/identifier"]
- == environment.identifier_label
- )
- assert yaml_obj["metadata"]["labels"]["prefect.io/flow_run_id"] == "id_test"
-
- env = yaml_obj["spec"]["containers"][0]["env"]
-
- assert env[0]["value"] == "gql_test"
- assert env[1]["value"] == "auth_test"
- assert env[2]["value"] == "id_test"
- assert (
- env[10]["value"]
- == "['test_logger', 'dask_kubernetes.core', 'distributed.deploy.adaptive']"
- )
-
- assert yaml_obj["spec"]["containers"][0]["image"] == "my_image"
-
-
-def test_populate_worker_pod_yaml_with_private_registry():
- environment = DaskKubernetesEnvironment(private_registry=True)
-
- file_path = os.path.dirname(prefect.environments.execution.dask.k8s.__file__)
-
- with open(path.join(file_path, "worker_pod.yaml")) as pod_file:
- pod = yaml.safe_load(pod_file)
-
- with set_temporary_config(
- {"cloud.graphql": "gql_test", "cloud.auth_token": "auth_test"}
- ):
- with prefect.context(
- flow_run_id="id_test", image="my_image", namespace="foo-man"
- ):
- yaml_obj = environment._populate_worker_pod_yaml(yaml_obj=pod)
-
- assert yaml_obj["spec"]["imagePullSecrets"][0] == dict(name="foo-man-docker")
-
-
-def test_populate_worker_pod_yaml_with_image_pull_secret():
- environment = DaskKubernetesEnvironment(image_pull_secret="mysecret")
-
- file_path = os.path.dirname(prefect.environments.execution.dask.k8s.__file__)
-
- with open(path.join(file_path, "worker_pod.yaml")) as pod_file:
- pod = yaml.safe_load(pod_file)
-
- with set_temporary_config(
- {"cloud.graphql": "gql_test", "cloud.auth_token": "auth_test"}
- ):
- with prefect.context(
- flow_run_id="id_test", image="my_image", namespace="foo-man"
- ):
- yaml_obj = environment._populate_worker_pod_yaml(yaml_obj=pod)
-
- assert yaml_obj["spec"]["imagePullSecrets"][0] == dict(name="mysecret")
-
-
-def test_populate_worker_pod_yaml_with_multiple_image_pull_secrets():
- environment = DaskKubernetesEnvironment(image_pull_secret="some-secret,another-one")
-
- file_path = os.path.dirname(prefect.environments.execution.dask.k8s.__file__)
-
- with open(path.join(file_path, "worker_pod.yaml")) as pod_file:
- pod = yaml.safe_load(pod_file)
-
- with set_temporary_config(
- {"cloud.graphql": "gql_test", "cloud.auth_token": "auth_test"}
- ):
- with prefect.context(
- flow_run_id="id_test", image="my_image", namespace="foo-man"
- ):
- yaml_obj = environment._populate_worker_pod_yaml(yaml_obj=pod)
-
- assert yaml_obj["spec"]["imagePullSecrets"] == [
- dict(name="some-secret"),
- dict(name="another-one"),
- ]
-
-
-def test_initialize_environment_with_spec_populates(monkeypatch):
-
- with tempfile.TemporaryDirectory() as directory:
-
- with open(os.path.join(directory, "scheduler.yaml"), "w+") as file:
- file.write("scheduler")
- with open(os.path.join(directory, "worker.yaml"), "w+") as file:
- file.write("worker")
-
- environment = DaskKubernetesEnvironment(
- scheduler_spec_file=os.path.join(directory, "scheduler.yaml"),
- worker_spec_file=os.path.join(directory, "worker.yaml"),
- )
-
- assert environment._scheduler_spec == "scheduler"
- assert environment._worker_spec == "worker"
-
-
-@pytest.mark.parametrize("log_flag", [True, False])
-def test_populate_custom_worker_spec_yaml(log_flag):
- environment = DaskKubernetesEnvironment()
-
- file_path = os.path.dirname(prefect.environments.execution.dask.k8s.__file__)
-
- with open(path.join(file_path, "worker_pod.yaml")) as pod_file:
- pod = yaml.safe_load(pod_file)
- pod["spec"]["containers"][0]["env"] = []
-
- with set_temporary_config(
- {
- "cloud.graphql": "gql_test",
- "cloud.auth_token": "auth_test",
- "cloud.send_flow_run_logs": log_flag,
- "logging.extra_loggers": ["test_logger"],
- }
- ):
- with prefect.context(flow_run_id="id_test", image="my_image"):
- yaml_obj = environment._populate_worker_spec_yaml(yaml_obj=pod)
-
- assert (
- yaml_obj["metadata"]["labels"]["prefect.io/identifier"]
- == environment.identifier_label
- )
- assert yaml_obj["metadata"]["labels"]["prefect.io/flow_run_id"] == "id_test"
-
- env = yaml_obj["spec"]["containers"][0]["env"]
-
- assert env[0]["value"] == "gql_test"
- assert env[1]["value"] == "auth_test"
- assert env[2]["value"] == "id_test"
- assert env[3]["value"] == "false"
- assert env[4]["value"] == "prefect.engine.cloud.CloudFlowRunner"
- assert env[5]["value"] == "prefect.engine.cloud.CloudTaskRunner"
- assert env[6]["value"] == "prefect.executors.DaskExecutor"
- assert env[7]["value"] == str(log_flag).lower()
- assert env[8]["value"] == "INFO"
- assert (
- env[9]["value"]
- == "['test_logger', 'dask_kubernetes.core', 'distributed.deploy.adaptive']"
- )
-
- assert yaml_obj["spec"]["containers"][0]["image"] == "my_image"
-
-
-@pytest.mark.parametrize("log_flag", [True, False])
-def test_populate_custom_scheduler_spec_yaml(log_flag):
- environment = DaskKubernetesEnvironment()
-
- file_path = os.path.dirname(prefect.environments.execution.dask.k8s.__file__)
-
- with open(path.join(file_path, "job.yaml")) as job_file:
- job = yaml.safe_load(job_file)
- job["spec"]["template"]["spec"]["containers"][0]["env"] = []
-
- with set_temporary_config(
- {
- "cloud.graphql": "gql_test",
- "cloud.auth_token": "auth_test",
- "cloud.send_flow_run_logs": log_flag,
- "logging.extra_loggers": ["test_logger"],
- }
- ):
- with prefect.context(flow_run_id="id_test", namespace="namespace_test"):
- yaml_obj = environment._populate_scheduler_spec_yaml(
- yaml_obj=job, docker_name="test1/test2:test3"
- )
-
- assert yaml_obj["metadata"]["name"] == "prefect-dask-job-{}".format(
- environment.identifier_label
- )
-
- env = yaml_obj["spec"]["template"]["spec"]["containers"][0]["env"]
-
- assert env[0]["value"] == "gql_test"
- assert env[1]["value"] == "auth_test"
- assert env[2]["value"] == "id_test"
- assert env[3]["value"] == "namespace_test"
- assert env[4]["value"] == "test1/test2:test3"
- assert env[5]["value"] == "false"
- assert env[6]["value"] == "prefect.engine.cloud.CloudFlowRunner"
- assert env[7]["value"] == "prefect.engine.cloud.CloudTaskRunner"
- assert env[8]["value"] == "prefect.executors.DaskExecutor"
- assert env[9]["value"] == str(log_flag).lower()
- assert env[10]["value"] == "INFO"
- assert (
- env[11]["value"]
- == "['test_logger', 'dask_kubernetes.core', 'distributed.deploy.adaptive']"
- )
-
- assert (
- yaml_obj["spec"]["template"]["spec"]["containers"][0]["image"]
- == "test1/test2:test3"
- )
-
-
-@pytest.mark.parametrize("log_flag", [True, False])
-def test_populate_custom_yaml_specs_with_logging_vars(log_flag):
- environment = DaskKubernetesEnvironment()
-
- file_path = os.path.dirname(prefect.environments.execution.dask.k8s.__file__)
-
- log_vars = [
- {
- "name": "PREFECT__CLOUD__SEND_FLOW_RUN_LOGS",
- "value": "YES",
- },
- {
- "name": "PREFECT__LOGGING__LEVEL",
- "value": "NO",
- },
- {
- "name": "PREFECT__LOGGING__EXTRA_LOGGERS",
- "value": "MAYBE",
- },
- ]
-
- with open(path.join(file_path, "job.yaml")) as job_file:
- job = yaml.safe_load(job_file)
- job["spec"]["template"]["spec"]["containers"][0]["env"] = []
- job["spec"]["template"]["spec"]["containers"][0]["env"].extend(log_vars)
-
- with set_temporary_config(
- {
- "cloud.graphql": "gql_test",
- "cloud.auth_token": "auth_test",
- "cloud.send_flow_run_logs": log_flag,
- "logging.extra_loggers": ["test_logger"],
- }
- ):
- with prefect.context(flow_run_id="id_test", namespace="namespace_test"):
- yaml_obj = environment._populate_scheduler_spec_yaml(
- yaml_obj=job, docker_name="test1/test2:test3"
- )
-
- assert yaml_obj["metadata"]["name"] == "prefect-dask-job-{}".format(
- environment.identifier_label
- )
-
- env = yaml_obj["spec"]["template"]["spec"]["containers"][0]["env"]
-
- assert env[0]["value"] == "YES"
- assert env[1]["value"] == "NO"
- assert env[2]["value"] == "MAYBE"
- assert len(env) == 12
-
- # worker
- with open(path.join(file_path, "worker_pod.yaml")) as pod_file:
- pod = yaml.safe_load(pod_file)
- pod["spec"]["containers"][0]["env"] = []
- pod["spec"]["containers"][0]["env"].extend(log_vars)
-
- with set_temporary_config(
- {
- "cloud.graphql": "gql_test",
- "cloud.auth_token": "auth_test",
- "cloud.send_flow_run_logs": log_flag,
- "logging.extra_loggers": ["test_logger"],
- }
- ):
- with prefect.context(flow_run_id="id_test", image="my_image"):
- yaml_obj = environment._populate_worker_spec_yaml(yaml_obj=pod)
-
- assert (
- yaml_obj["metadata"]["labels"]["prefect.io/identifier"]
- == environment.identifier_label
- )
- assert yaml_obj["metadata"]["labels"]["prefect.io/flow_run_id"] == "id_test"
-
- env = yaml_obj["spec"]["containers"][0]["env"]
-
- assert env[0]["value"] == "YES"
- assert env[1]["value"] == "NO"
- assert env[2]["value"] == "MAYBE"
- assert len(env) == 10
-
-
-def test_roundtrip_cloudpickle():
- with tempfile.TemporaryDirectory() as directory:
-
- with open(os.path.join(directory, "scheduler.yaml"), "w+") as file:
- file.write("scheduler")
- with open(os.path.join(directory, "worker.yaml"), "w+") as file:
- file.write("worker")
-
- environment = DaskKubernetesEnvironment(
- scheduler_spec_file=os.path.join(directory, "scheduler.yaml"),
- worker_spec_file=os.path.join(directory, "worker.yaml"),
- )
-
- assert environment._scheduler_spec == "scheduler"
- assert environment._worker_spec == "worker"
-
- new = cloudpickle.loads(cloudpickle.dumps(environment))
- assert isinstance(new, DaskKubernetesEnvironment)
- assert new._scheduler_spec == "scheduler"
- assert new._worker_spec == "worker"
-
- # Identifer labels do not persist
- assert environment.identifier_label
- assert new.identifier_label
-
- assert environment.identifier_label != new.identifier_label
diff --git a/tests/environments/execution/test_fargate_task_environment.py b/tests/environments/execution/test_fargate_task_environment.py
deleted file mode 100644
index 74d5c7c541f1..000000000000
--- a/tests/environments/execution/test_fargate_task_environment.py
+++ /dev/null
@@ -1,832 +0,0 @@
-from unittest.mock import MagicMock
-
-import cloudpickle
-import prefect
-import pytest
-from botocore.exceptions import ClientError
-from prefect import Flow, config
-from prefect.executors import LocalDaskExecutor
-from prefect.environments import FargateTaskEnvironment
-from prefect.storage import Docker
-from prefect.utilities.configuration import set_temporary_config
-
-
-def test_create_fargate_task_environment():
- environment = FargateTaskEnvironment()
- assert environment.executor is not None
- assert environment.labels == set()
- assert environment.on_start is None
- assert environment.on_exit is None
- assert environment.metadata == {}
- assert environment.logger.name == "prefect.FargateTaskEnvironment"
-
-
-def test_create_fargate_task_environment_with_executor():
- executor = LocalDaskExecutor()
- environment = FargateTaskEnvironment(executor=executor)
- assert environment.executor is executor
-
-
-def test_create_fargate_task_environment_labels():
- environment = FargateTaskEnvironment(labels=["foo"])
- assert environment.labels == set(["foo"])
-
-
-def test_create_fargate_task_environment_callbacks():
- def f():
- pass
-
- environment = FargateTaskEnvironment(labels=["foo"], on_start=f, on_exit=f)
- assert environment.labels == set(["foo"])
- assert environment.on_start is f
- assert environment.on_exit is f
-
-
-def test_fargate_task_environment_dependencies():
- environment = FargateTaskEnvironment()
- assert environment.dependencies == ["boto3", "botocore"]
-
-
-def test_create_fargate_task_environment_aws_creds_provided():
- environment = FargateTaskEnvironment(
- labels=["foo"],
- aws_access_key_id="id",
- aws_secret_access_key="secret",
- aws_session_token="session",
- region_name="region",
- )
- assert environment.labels == set(["foo"])
- assert environment.aws_access_key_id == "id"
- assert environment.aws_secret_access_key == "secret"
- assert environment.aws_session_token == "session"
- assert environment.region_name == "region"
-
-
-def test_create_fargate_task_environment_aws_creds_environment(monkeypatch):
- monkeypatch.setenv("AWS_ACCESS_KEY_ID", "id")
- monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "secret")
- monkeypatch.setenv("AWS_SESSION_TOKEN", "session")
- monkeypatch.setenv("REGION_NAME", "region")
-
- environment = FargateTaskEnvironment(labels=["foo"])
- assert environment.labels == set(["foo"])
- assert environment.aws_access_key_id == "id"
- assert environment.aws_secret_access_key == "secret"
- assert environment.aws_session_token == "session"
- assert environment.region_name == "region"
-
-
-def test_parse_task_definition_kwargs():
- environment = FargateTaskEnvironment()
-
- kwarg_dict = {
- "family": "test",
- "taskRoleArn": "test",
- "executionRoleArn": "test",
- "networkMode": "test",
- "containerDefinitions": "test",
- "volumes": "test",
- "placementConstraints": "test",
- "requiresCompatibilities": "test",
- "cpu": "test",
- "memory": "test",
- "tags": "test",
- "pidMode": "test",
- "ipcMode": "test",
- "proxyConfiguration": "test",
- "inferenceAccelerators": "test",
- }
-
- task_definition_kwargs, task_run_kwargs = environment._parse_kwargs(kwarg_dict)
-
- assert task_definition_kwargs == kwarg_dict
- assert task_run_kwargs == {"placementConstraints": "test", "tags": "test"}
-
-
-def test_parse_task_run_kwargs():
- environment = FargateTaskEnvironment()
-
- kwarg_dict = {
- "cluster": "test",
- "taskDefinition": "test",
- "count": "test",
- "startedBy": "test",
- "group": "test",
- "placementConstraints": "test",
- "placementStrategy": "test",
- "platformVersion": "test",
- "networkConfiguration": "test",
- "tags": "test",
- "enableECSManagedTags": "test",
- "propagateTags": "test",
- }
-
- task_definition_kwargs, task_run_kwargs = environment._parse_kwargs(kwarg_dict)
-
- assert task_run_kwargs == kwarg_dict
- assert task_definition_kwargs == {"placementConstraints": "test", "tags": "test"}
-
-
-def test_parse_task_definition_and_run_kwargs():
- environment = FargateTaskEnvironment()
-
- def_kwarg_dict = {
- "family": "test",
- "taskRoleArn": "test",
- "executionRoleArn": "test",
- "networkMode": "test",
- "containerDefinitions": "test",
- "volumes": "test",
- "placementConstraints": "test",
- "requiresCompatibilities": "test",
- "cpu": "test",
- "memory": "test",
- "tags": "test",
- "pidMode": "test",
- "ipcMode": "test",
- "proxyConfiguration": "test",
- "inferenceAccelerators": "test",
- }
-
- run_kwarg_dict = {
- "cluster": "test",
- "taskDefinition": "test",
- "count": "test",
- "startedBy": "test",
- "group": "test",
- "placementConstraints": "test",
- "placementStrategy": "test",
- "platformVersion": "test",
- "networkConfiguration": "test",
- "tags": "test",
- "enableECSManagedTags": "test",
- "propagateTags": "test",
- }
-
- kwarg_dict = {
- "family": "test",
- "taskRoleArn": "test",
- "executionRoleArn": "test",
- "networkMode": "test",
- "containerDefinitions": "test",
- "volumes": "test",
- "placementConstraints": "test",
- "requiresCompatibilities": "test",
- "cpu": "test",
- "memory": "test",
- "tags": "test",
- "pidMode": "test",
- "ipcMode": "test",
- "proxyConfiguration": "test",
- "inferenceAccelerators": "test",
- "cluster": "test",
- "taskDefinition": "test",
- "count": "test",
- "startedBy": "test",
- "group": "test",
- "placementStrategy": "test",
- "platformVersion": "test",
- "networkConfiguration": "test",
- "enableECSManagedTags": "test",
- "propagateTags": "test",
- }
-
- task_definition_kwargs, task_run_kwargs = environment._parse_kwargs(kwarg_dict)
-
- assert task_definition_kwargs == def_kwarg_dict
- assert task_run_kwargs == run_kwarg_dict
-
-
-def test_parse_task_kwargs_invalid_value_removed():
- environment = FargateTaskEnvironment()
-
- kwarg_dict = {"test": "not_real"}
-
- task_definition_kwargs, task_run_kwargs = environment._parse_kwargs(kwarg_dict)
-
- assert task_definition_kwargs == {}
- assert task_run_kwargs == {}
-
-
-def test_setup_definition_exists(monkeypatch):
- existing_task_definition = {
- "containerDefinitions": [
- {
- "environment": [
- {"name": "PREFECT__CLOUD__GRAPHQL", "value": config.cloud.graphql},
- {"name": "PREFECT__CLOUD__USE_LOCAL_SECRETS", "value": "false"},
- {
- "name": "PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudFlowRunner",
- },
- {
- "name": "PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudTaskRunner",
- },
- {"name": "PREFECT__CLOUD__SEND_FLOW_RUN_LOGS", "value": "true"},
- {
- "name": "PREFECT__LOGGING__EXTRA_LOGGERS",
- "value": str(config.logging.extra_loggers),
- },
- ],
- "name": "flow-container",
- "image": "test/image:tag",
- "command": [
- "/bin/sh",
- "-c",
- "python -c 'import prefect; prefect.environments.execution.load_and_run_flow()'",
- ],
- }
- ],
- }
-
- boto3_client = MagicMock()
- boto3_client.describe_task_definition.return_value = {
- "taskDefinition": existing_task_definition
- }
- monkeypatch.setattr("boto3.client", MagicMock(return_value=boto3_client))
-
- environment = FargateTaskEnvironment()
-
- environment.setup(
- Flow(
- "test",
- storage=Docker(registry_url="test", image_name="image", image_tag="tag"),
- )
- )
-
- assert boto3_client.describe_task_definition.called
- assert not boto3_client.register_task_definition.called
-
-
-def test_setup_definition_changed(monkeypatch):
- existing_task_definition = {
- "containerDefinitions": [
- {
- "environment": [
- {"name": "PREFECT__CLOUD__GRAPHQL", "value": config.cloud.graphql},
- {"name": "PREFECT__CLOUD__USE_LOCAL_SECRETS", "value": "false"},
- {
- "name": "PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudFlowRunner",
- },
- {
- "name": "PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudTaskRunner",
- },
- {"name": "PREFECT__CLOUD__SEND_FLOW_RUN_LOGS", "value": "true"},
- {
- "name": "PREFECT__LOGGING__EXTRA_LOGGERS",
- "value": str(config.logging.extra_loggers),
- },
- ],
- "name": "flow-container",
- "image": "test/image:tag",
- "command": [
- "/bin/sh",
- "-c",
- "python -c 'import prefect; prefect.environments.execution.load_and_run_flow()'",
- ],
- }
- ],
- "memory": 256,
- "cpu": 512,
- }
-
- boto3_client = MagicMock()
- boto3_client.describe_task_definition.return_value = {
- "taskDefinition": existing_task_definition
- }
- monkeypatch.setattr("boto3.client", MagicMock(return_value=boto3_client))
-
- environment = FargateTaskEnvironment(memory=256, cpu=1024)
-
- with pytest.raises(ValueError):
- environment.setup(
- Flow(
- "test",
- storage=Docker(
- registry_url="test", image_name="image", image_tag="newtag"
- ),
- )
- )
-
- assert boto3_client.describe_task_definition.called
- assert not boto3_client.register_task_definition.called
-
-
-def test_validate_definition_not_changed_when_env_out_of_order(monkeypatch):
- existing_task_definition = {
- "containerDefinitions": [
- {
- "environment": [
- {
- "name": "PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudFlowRunner",
- },
- {"name": "PREFECT__CLOUD__USE_LOCAL_SECRETS", "value": "false"},
- {
- "name": "PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudTaskRunner",
- },
- {"name": "PREFECT__CLOUD__SEND_FLOW_RUN_LOGS", "value": "true"},
- {
- "name": "PREFECT__LOGGING__EXTRA_LOGGERS",
- "value": str(config.logging.extra_loggers),
- },
- # This is added first in _render_task_definition_kwargs, so it's at the end now
- {"name": "PREFECT__CLOUD__GRAPHQL", "value": config.cloud.graphql},
- ],
- "name": "flow-container",
- "image": "test/image:tag",
- "command": [
- "/bin/sh",
- "-c",
- "python -c 'import prefect; prefect.environments.execution.load_and_run_flow()'",
- ],
- }
- ],
- "memory": 256,
- "cpu": 512,
- }
-
- boto3_client = MagicMock()
- boto3_client.describe_task_definition.return_value = {
- "taskDefinition": existing_task_definition
- }
- monkeypatch.setattr("boto3.client", MagicMock(return_value=boto3_client))
-
- environment = FargateTaskEnvironment(memory=256, cpu=512)
-
- environment.setup(
- Flow(
- "test",
- storage=Docker(registry_url="test", image_name="image", image_tag="tag"),
- )
- )
-
- assert boto3_client.describe_task_definition.called
- assert not boto3_client.register_task_definition.called
-
-
-def test_validate_definition_not_changed_when_out_of_order_in_second_container(
- monkeypatch,
-):
- existing_task_definition = {
- "containerDefinitions": [
- {
- "environment": [
- {
- "name": "PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudFlowRunner",
- },
- {"name": "PREFECT__CLOUD__USE_LOCAL_SECRETS", "value": "false"},
- {
- "name": "PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudTaskRunner",
- },
- {"name": "PREFECT__CLOUD__SEND_FLOW_RUN_LOGS", "value": "true"},
- {
- "name": "PREFECT__LOGGING__EXTRA_LOGGERS",
- "value": str(config.logging.extra_loggers),
- },
- # This is added first in _render_task_definition_kwargs, so it's at the end now
- {"name": "PREFECT__CLOUD__GRAPHQL", "value": config.cloud.graphql},
- ],
- "name": "flow-container",
- "image": "test/image:tag",
- "command": [
- "/bin/sh",
- "-c",
- "python -c 'import prefect; prefect.environments.execution.load_and_run_flow()'",
- ],
- },
- {
- "environment": [
- {
- "name": "foo",
- "value": "bar",
- },
- {
- "name": "foo2",
- "value": "bar2",
- },
- {"name": "PREFECT__CLOUD__GRAPHQL", "value": config.cloud.graphql},
- {"name": "PREFECT__CLOUD__USE_LOCAL_SECRETS", "value": "false"},
- {
- "name": "PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudFlowRunner",
- },
- {
- "name": "PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudTaskRunner",
- },
- {"name": "PREFECT__CLOUD__SEND_FLOW_RUN_LOGS", "value": "true"},
- {
- "name": "PREFECT__LOGGING__EXTRA_LOGGERS",
- "value": str(config.logging.extra_loggers),
- },
- ],
- "secrets": [
- {"name": "1", "valueFrom": "1"},
- {"name": "2", "valueFrom": "2"},
- ],
- "mountPoints": [
- {"sourceVolume": "1", "containerPath": "1", "readOnly": False},
- {"sourceVolume": "2", "containerPath": "2", "readOnly": False},
- ],
- "extraHosts": [
- {"hostname": "1", "ipAddress": "1"},
- {"hostname": "2", "ipAddress": "2"},
- ],
- "volumesFrom": [
- {"sourceContainer": "1", "readOnly": False},
- {"sourceContainer": "2", "readOnly": False},
- ],
- "ulimits": [
- {"name": "cpu", "softLimit": 1, "hardLimit": 1},
- {"name": "memlock", "softLimit": 2, "hardLimit": 2},
- ],
- "portMappings": [
- {"containerPort": 80, "hostPort": 80, "protocol": "tcp"},
- {"containerPort": 81, "hostPort": 81, "protocol": "tcp"},
- ],
- "logConfiguration": {
- "logDriver": "awslogs",
- "options": {},
- "secretOptions": [
- {"name": "1", "valueFrom": "1"},
- {"name": "2", "valueFrom": "2"},
- ],
- },
- "name": "some-other-container",
- "image": "test/image:tag",
- "command": [
- "/bin/sh",
- ],
- },
- ],
- "memory": 256,
- "cpu": 512,
- }
-
- boto3_client = MagicMock()
- boto3_client.describe_task_definition.return_value = {
- "taskDefinition": existing_task_definition
- }
- monkeypatch.setattr("boto3.client", MagicMock(return_value=boto3_client))
-
- environment = FargateTaskEnvironment(
- memory=256,
- cpu=512,
- containerDefinitions=[
- {},
- {
- "environment": [
- {
- "name": "foo2",
- "value": "bar2",
- },
- {
- "name": "foo",
- "value": "bar",
- },
- ],
- "secrets": [
- {"name": "2", "valueFrom": "2"},
- {"name": "1", "valueFrom": "1"},
- ],
- "mountPoints": [
- {"sourceVolume": "2", "containerPath": "2", "readOnly": False},
- {"sourceVolume": "1", "containerPath": "1", "readOnly": False},
- ],
- "extraHosts": [
- {"hostname": "2", "ipAddress": "2"},
- {"hostname": "1", "ipAddress": "1"},
- ],
- "volumesFrom": [
- {"sourceContainer": "2", "readOnly": False},
- {"sourceContainer": "1", "readOnly": False},
- ],
- "ulimits": [
- {"name": "memlock", "softLimit": 2, "hardLimit": 2},
- {"name": "cpu", "softLimit": 1, "hardLimit": 1},
- ],
- "portMappings": [
- {"containerPort": 81, "hostPort": 81, "protocol": "tcp"},
- {"containerPort": 80, "hostPort": 80, "protocol": "tcp"},
- ],
- "logConfiguration": {
- "logDriver": "awslogs",
- "options": {},
- "secretOptions": [
- {"name": "2", "valueFrom": "2"},
- {"name": "1", "valueFrom": "1"},
- ],
- },
- "name": "some-other-container",
- "image": "test/image:tag",
- "command": [
- "/bin/sh",
- ],
- },
- ],
- )
-
- environment.setup(
- Flow(
- "test",
- storage=Docker(registry_url="test", image_name="image", image_tag="tag"),
- )
- )
-
- assert boto3_client.describe_task_definition.called
- assert not boto3_client.register_task_definition.called
-
-
-def test_validate_definition_not_changed_when_names_are_in_arn(monkeypatch):
- existing_task_definition = {
- "containerDefinitions": [
- {
- "environment": [
- {"name": "PREFECT__CLOUD__GRAPHQL", "value": config.cloud.graphql},
- {"name": "PREFECT__CLOUD__USE_LOCAL_SECRETS", "value": "false"},
- {
- "name": "PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudFlowRunner",
- },
- {
- "name": "PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudTaskRunner",
- },
- {"name": "PREFECT__CLOUD__SEND_FLOW_RUN_LOGS", "value": "true"},
- {
- "name": "PREFECT__LOGGING__EXTRA_LOGGERS",
- "value": str(config.logging.extra_loggers),
- },
- ],
- "name": "flow-container",
- "image": "test/image:tag",
- "command": [
- "/bin/sh",
- "-c",
- "python -c 'import prefect; prefect.environments.execution.load_and_run_flow()'",
- ],
- }
- ],
- "taskRoleArn": "arn:aws:iam::000000000000:role/my-role-name",
- "memory": 256,
- "cpu": 512,
- }
-
- boto3_client = MagicMock()
- boto3_client.describe_task_definition.return_value = {
- "taskDefinition": existing_task_definition
- }
- monkeypatch.setattr("boto3.client", MagicMock(return_value=boto3_client))
-
- environment = FargateTaskEnvironment(
- memory=256, cpu=512, taskRoleArn="my-role-name"
- )
-
- environment.setup(
- Flow(
- "test",
- storage=Docker(registry_url="test", image_name="image", image_tag="tag"),
- )
- )
-
- assert boto3_client.describe_task_definition.called
- assert not boto3_client.register_task_definition.called
-
-
-def test_setup_definition_register(monkeypatch):
- boto3_client = MagicMock()
- boto3_client.describe_task_definition.side_effect = ClientError({}, None)
- boto3_client.register_task_definition.return_value = {}
- monkeypatch.setattr("boto3.client", MagicMock(return_value=boto3_client))
-
- environment = FargateTaskEnvironment(
- family="test",
- containerDefinitions=[
- {
- "name": "flow-container",
- "image": "image",
- "command": [],
- "environment": [],
- "essential": True,
- }
- ],
- )
-
- environment.setup(
- Flow(
- "test",
- storage=Docker(registry_url="test", image_name="image", image_tag="tag"),
- )
- )
-
- assert boto3_client.describe_task_definition.called
- assert boto3_client.register_task_definition.called
- assert boto3_client.register_task_definition.call_args[1]["family"] == "test"
- assert boto3_client.register_task_definition.call_args[1][
- "containerDefinitions"
- ] == [
- {
- "name": "flow-container",
- "image": "test/image:tag",
- "command": [
- "/bin/sh",
- "-c",
- "python -c 'import prefect; prefect.environments.execution.load_and_run_flow()'",
- ],
- "environment": [
- {
- "name": "PREFECT__CLOUD__GRAPHQL",
- "value": prefect.config.cloud.graphql,
- },
- {"name": "PREFECT__CLOUD__USE_LOCAL_SECRETS", "value": "false"},
- {
- "name": "PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudFlowRunner",
- },
- {
- "name": "PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudTaskRunner",
- },
- {"name": "PREFECT__CLOUD__SEND_FLOW_RUN_LOGS", "value": "true"},
- {
- "name": "PREFECT__LOGGING__EXTRA_LOGGERS",
- "value": "[]",
- },
- ],
- "essential": True,
- }
- ]
-
-
-def test_setup_definition_register_no_defintions(monkeypatch):
- boto3_client = MagicMock()
- boto3_client.describe_task_definition.side_effect = ClientError({}, None)
- boto3_client.register_task_definition.return_value = {}
- monkeypatch.setattr("boto3.client", MagicMock(return_value=boto3_client))
-
- environment = FargateTaskEnvironment(family="test")
-
- environment.setup(
- Flow(
- "test",
- storage=Docker(registry_url="test", image_name="image", image_tag="tag"),
- )
- )
-
- assert boto3_client.describe_task_definition.called
- assert boto3_client.register_task_definition.called
- assert boto3_client.register_task_definition.call_args[1]["family"] == "test"
- assert boto3_client.register_task_definition.call_args[1][
- "containerDefinitions"
- ] == [
- {
- "environment": [
- {
- "name": "PREFECT__CLOUD__GRAPHQL",
- "value": prefect.config.cloud.graphql,
- },
- {"name": "PREFECT__CLOUD__USE_LOCAL_SECRETS", "value": "false"},
- {
- "name": "PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudFlowRunner",
- },
- {
- "name": "PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS",
- "value": "prefect.engine.cloud.CloudTaskRunner",
- },
- {"name": "PREFECT__CLOUD__SEND_FLOW_RUN_LOGS", "value": "true"},
- {
- "name": "PREFECT__LOGGING__EXTRA_LOGGERS",
- "value": "[]",
- },
- ],
- "name": "flow-container",
- "image": "test/image:tag",
- "command": [
- "/bin/sh",
- "-c",
- "python -c 'import prefect; prefect.environments.execution.load_and_run_flow()'",
- ],
- }
- ]
-
-
-def test_execute_run_task(monkeypatch):
- boto3_client = MagicMock()
- boto3_client.run_task.return_value = {}
- monkeypatch.setattr("boto3.client", MagicMock(return_value=boto3_client))
-
- with set_temporary_config({"cloud.auth_token": "test"}):
- environment = FargateTaskEnvironment(
- cluster="test", family="test", taskDefinition="test"
- )
-
- environment.execute(
- Flow(
- "test",
- storage=Docker(
- registry_url="test", image_name="image", image_tag="tag"
- ),
- ),
- )
-
- assert boto3_client.run_task.called
- assert boto3_client.run_task.call_args[1]["taskDefinition"] == "test"
- assert boto3_client.run_task.call_args[1]["overrides"] == {
- "containerOverrides": [
- {
- "name": "flow-container",
- "environment": [
- {
- "name": "PREFECT__CLOUD__AUTH_TOKEN",
- "value": prefect.config.cloud.get("auth_token"),
- },
- {"name": "PREFECT__CONTEXT__FLOW_RUN_ID", "value": "unknown"},
- {"name": "PREFECT__CONTEXT__IMAGE", "value": "test/image:tag"},
- ],
- }
- ]
- }
- assert boto3_client.run_task.call_args[1]["launchType"] == "FARGATE"
- assert boto3_client.run_task.call_args[1]["cluster"] == "test"
-
-
-def test_execute_run_task_agent_token(monkeypatch):
- boto3_client = MagicMock()
- boto3_client.run_task.return_value = {}
- monkeypatch.setattr("boto3.client", MagicMock(return_value=boto3_client))
-
- with set_temporary_config({"cloud.agent.auth_token": "test"}):
- environment = FargateTaskEnvironment(
- cluster="test", family="test", taskDefinition="test"
- )
-
- environment.execute(
- Flow(
- "test",
- storage=Docker(
- registry_url="test", image_name="image", image_tag="tag"
- ),
- ),
- )
-
- assert boto3_client.run_task.called
- assert boto3_client.run_task.call_args[1]["taskDefinition"] == "test"
- assert boto3_client.run_task.call_args[1]["overrides"] == {
- "containerOverrides": [
- {
- "name": "flow-container",
- "environment": [
- {
- "name": "PREFECT__CLOUD__AUTH_TOKEN",
- "value": prefect.config.cloud.agent.get("auth_token"),
- },
- {"name": "PREFECT__CONTEXT__FLOW_RUN_ID", "value": "unknown"},
- {"name": "PREFECT__CONTEXT__IMAGE", "value": "test/image:tag"},
- ],
- }
- ]
- }
- assert boto3_client.run_task.call_args[1]["launchType"] == "FARGATE"
- assert boto3_client.run_task.call_args[1]["cluster"] == "test"
-
-
-def test_environment_run():
- class MyExecutor(LocalDaskExecutor):
- submit_called = False
-
- def submit(self, *args, **kwargs):
- self.submit_called = True
- return super().submit(*args, **kwargs)
-
- global_dict = {}
-
- @prefect.task
- def add_to_dict():
- global_dict["run"] = True
-
- executor = MyExecutor()
- environment = FargateTaskEnvironment(executor=executor)
- flow = prefect.Flow("test", tasks=[add_to_dict], environment=environment)
-
- environment.run(flow=flow)
-
- assert global_dict.get("run") is True
- assert executor.submit_called
-
-
-def test_roundtrip_cloudpickle():
- environment = FargateTaskEnvironment(cluster="test")
-
- assert environment.task_run_kwargs == {"cluster": "test"}
-
- new = cloudpickle.loads(cloudpickle.dumps(environment))
- assert isinstance(new, FargateTaskEnvironment)
- assert new.task_run_kwargs == {"cluster": "test"}
diff --git a/tests/environments/execution/test_k8s_job_environment.py b/tests/environments/execution/test_k8s_job_environment.py
deleted file mode 100644
index 38c612085dc6..000000000000
--- a/tests/environments/execution/test_k8s_job_environment.py
+++ /dev/null
@@ -1,408 +0,0 @@
-import copy
-import os
-from typing import List
-from unittest.mock import MagicMock
-
-import cloudpickle
-import pytest
-import yaml
-
-import prefect
-from prefect import Flow
-from prefect.executors import LocalDaskExecutor
-from prefect.environments import KubernetesJobEnvironment
-from prefect.storage import Docker
-from prefect.utilities.configuration import set_temporary_config
-
-
-@pytest.fixture
-def default_command_args() -> List[str]:
- return [
- 'python -c "import prefect; prefect.environments.execution.load_and_run_flow()"'
- ]
-
-
-@pytest.fixture
-def initial_job_spec(default_command_args):
- return {
- "apiVersion": "batch/v1",
- "kind": "Job",
- "metadata": {"labels": {}},
- "spec": {
- "template": {
- "spec": {
- "containers": [
- {"command": ["/bin/sh", "-c"], "args": default_command_args}
- ]
- },
- "metadata": {"labels": {}},
- }
- },
- }
-
-
-@pytest.fixture
-def job_spec_file(tmpdir):
- job_spec_file = str(tmpdir.join("job.yaml"))
- with open(job_spec_file, "w") as f:
- f.write("apiVersion: batch/v1\nkind: Job\n")
- return job_spec_file
-
-
-@pytest.fixture
-def job():
- file_path = os.path.dirname(prefect.environments.execution.dask.k8s.__file__)
-
- with open(os.path.join(file_path, "job.yaml")) as job_file:
- return yaml.safe_load(job_file)
-
-
-def test_create_k8s_job_environment(job_spec_file):
- environment = KubernetesJobEnvironment(job_spec_file=job_spec_file)
- assert environment.job_spec_file == job_spec_file
- assert environment.unique_job_name is False
- assert environment.executor is not None
- assert environment.labels == set()
- assert environment.on_start is None
- assert environment.on_exit is None
- assert environment.metadata == {}
- assert environment.logger.name == "prefect.KubernetesJobEnvironment"
-
-
-def test_create_k8s_job_environment_labels(job_spec_file):
- environment = KubernetesJobEnvironment(job_spec_file=job_spec_file, labels=["foo"])
- assert environment.labels == set(["foo"])
-
-
-def test_create_k8s_job_callbacks(job_spec_file):
- def f():
- pass
-
- environment = KubernetesJobEnvironment(
- job_spec_file=job_spec_file, labels=["foo"], on_start=f, on_exit=f
- )
- assert environment.labels == set(["foo"])
- assert environment.on_start is f
- assert environment.on_exit is f
-
-
-def test_k8s_job_environment_dependencies():
- environment = KubernetesJobEnvironment()
- assert environment.dependencies == ["kubernetes"]
-
-
-def test_create_k8s_job_environment_identifier_label(job_spec_file):
- environment = KubernetesJobEnvironment(job_spec_file=job_spec_file)
- assert environment.identifier_label
-
-
-def test_create_k8s_job_environment_identifier_label_none(job_spec_file):
- environment = KubernetesJobEnvironment(job_spec_file=job_spec_file)
- environment._identifier_label = None
- assert environment.identifier_label
-
-
-def test_setup_k8s_job_environment_passes(job_spec_file):
- environment = KubernetesJobEnvironment(job_spec_file=job_spec_file)
- environment.setup(Flow("test", storage=Docker()))
-
-
-def test_execute_storage_missing_fields(job_spec_file):
- environment = KubernetesJobEnvironment(job_spec_file=job_spec_file)
- with pytest.raises(ValueError):
- environment.execute(Flow("test", storage=Docker()))
-
-
-def test_execute(monkeypatch):
- file_path = os.path.dirname(prefect.environments.execution.dask.k8s.__file__)
- environment = KubernetesJobEnvironment(os.path.join(file_path, "job.yaml"))
-
- config = MagicMock()
- monkeypatch.setattr("kubernetes.config", config)
-
- batchv1 = MagicMock()
- monkeypatch.setattr(
- "kubernetes.client", MagicMock(BatchV1Api=MagicMock(return_value=batchv1))
- )
-
- storage = Docker(registry_url="test1", image_name="test2", image_tag="test3")
-
- with set_temporary_config({"cloud.auth_token": "test"}):
- environment.execute(Flow("test", storage=storage))
-
- assert (
- batchv1.create_namespaced_job.call_args[1]["body"]["apiVersion"] == "batch/v1"
- )
-
-
-def test_environment_run():
- class MyExecutor(LocalDaskExecutor):
- submit_called = False
-
- def submit(self, *args, **kwargs):
- self.submit_called = True
- return super().submit(*args, **kwargs)
-
- global_dict = {}
-
- @prefect.task
- def add_to_dict():
- global_dict["run"] = True
-
- executor = MyExecutor()
- environment = KubernetesJobEnvironment(executor=executor)
- flow = prefect.Flow("test", tasks=[add_to_dict], environment=environment)
-
- environment.run(flow=flow)
-
- assert global_dict.get("run") is True
- assert executor.submit_called
-
-
-def test_create_namespaced_job_fails_outside_cluster(job_spec_file):
- environment = KubernetesJobEnvironment(job_spec_file=job_spec_file)
- storage = Docker(registry_url="test1", image_name="test2", image_tag="test3")
-
- with pytest.raises(EnvironmentError):
- with set_temporary_config({"cloud.auth_token": "test"}):
- environment.execute(Flow("test", storage=storage))
-
-
-def test_populate_job_yaml(job_spec_file, job, default_command_args):
- environment = KubernetesJobEnvironment(
- job_spec_file=job_spec_file, unique_job_name=True
- )
-
- job["spec"]["template"]["spec"]["containers"][0]["env"] = []
- environment._job_spec = job
-
- with set_temporary_config(
- {
- "cloud.graphql": "gql_test",
- "cloud.auth_token": "auth_test",
- "logging.extra_loggers": "['test_logger']",
- }
- ):
- with prefect.context(flow_run_id="id_test", namespace="namespace_test"):
- yaml_obj = environment._populate_run_time_job_spec_details(
- docker_name="test1/test2:test3"
- )
-
- assert "prefect-dask-job-" in yaml_obj["metadata"]["name"]
- assert len(yaml_obj["metadata"]["name"]) == 25
-
- assert (
- yaml_obj["metadata"]["labels"]["prefect.io/identifier"]
- == environment.identifier_label
- )
- assert yaml_obj["metadata"]["labels"]["prefect.io/flow_run_id"] == "id_test"
- assert (
- yaml_obj["spec"]["template"]["metadata"]["labels"]["prefect.io/identifier"]
- == environment.identifier_label
- )
-
- env = yaml_obj["spec"]["template"]["spec"]["containers"][0]["env"]
-
- assert env[0]["value"] == "gql_test"
- assert env[1]["value"] == "auth_test"
- assert env[2]["value"] == "id_test"
- assert env[3]["value"] == "namespace_test"
- assert env[4]["value"] == "test1/test2:test3"
- assert env[9]["value"] == "['test_logger']"
-
- assert (
- yaml_obj["spec"]["template"]["spec"]["containers"][0]["image"]
- == "test1/test2:test3"
- )
-
- assert yaml_obj["spec"]["template"]["spec"]["containers"][0]["command"] == [
- "/bin/sh",
- "-c",
- ]
- assert (
- yaml_obj["spec"]["template"]["spec"]["containers"][0]["args"]
- == default_command_args
- )
-
-
-def test_populate_job_yaml_no_defaults(job_spec_file, job):
- environment = KubernetesJobEnvironment(job_spec_file=job_spec_file)
-
- # only command and args are set on the container when the instance
- # is initialized
- job["spec"]["template"]["spec"]["containers"][0] = {
- "command": ["/bin/sh", "-c"],
- "args": default_command_args,
- }
- del job["metadata"]
- del job["spec"]["template"]["metadata"]
- environment._job_spec = job
-
- with set_temporary_config(
- {"cloud.graphql": "gql_test", "cloud.auth_token": "auth_test"}
- ):
- with prefect.context(flow_run_id="id_test", namespace="namespace_test"):
- yaml_obj = environment._populate_run_time_job_spec_details(
- docker_name="test1/test2:test3"
- )
-
- assert (
- yaml_obj["metadata"]["labels"]["prefect.io/identifier"]
- == environment.identifier_label
- )
- assert yaml_obj["metadata"]["labels"]["prefect.io/flow_run_id"] == "id_test"
- assert (
- yaml_obj["spec"]["template"]["metadata"]["labels"]["prefect.io/identifier"]
- == environment.identifier_label
- )
-
- env = yaml_obj["spec"]["template"]["spec"]["containers"][0]["env"]
-
- assert env[0]["value"] == "gql_test"
- assert env[1]["value"] == "auth_test"
- assert env[2]["value"] == "id_test"
- assert env[3]["value"] == "namespace_test"
- assert env[4]["value"] == "test1/test2:test3"
- assert env[9]["value"] == "[]"
-
- assert (
- yaml_obj["spec"]["template"]["spec"]["containers"][0]["image"]
- == "test1/test2:test3"
- )
-
-
-def test_populate_job_yaml_command_and_args_not_overridden_at_run_time(job_spec_file):
- environment = KubernetesJobEnvironment(job_spec_file=job_spec_file)
-
- test_command = ["/bin/bash", "-acdefg"]
- test_args = "echo 'hello'; python -c 'import prefect; prefect.environments.execution.load_and_run_flow()'"
- environment._job_spec["spec"]["template"]["spec"]["containers"][0][
- "command"
- ] = test_command
- environment._job_spec["spec"]["template"]["spec"]["containers"][0][
- "args"
- ] = test_args
-
- with set_temporary_config(
- {"cloud.graphql": "gql_test", "cloud.auth_token": "auth_test"}
- ):
- with prefect.context(flow_run_id="id_test", namespace="namespace_test"):
- yaml_obj = environment._populate_run_time_job_spec_details(
- docker_name="test1/test2:test3"
- )
-
- assert (
- yaml_obj["spec"]["template"]["spec"]["containers"][0]["command"] == test_command
- )
- assert yaml_obj["spec"]["template"]["spec"]["containers"][0]["args"] == test_args
-
-
-def test_populate_job_yaml_multiple_containers(
- job_spec_file, job, default_command_args
-):
- environment = KubernetesJobEnvironment(job_spec_file=job_spec_file)
-
- # Generate yaml object with multiple containers
- job["spec"]["template"]["spec"]["containers"][0]["env"] = []
- job["spec"]["template"]["spec"]["containers"].append(
- copy.deepcopy(job["spec"]["template"]["spec"]["containers"][0])
- )
- job["spec"]["template"]["spec"]["containers"][1]["env"] = []
- job["spec"]["template"]["spec"]["containers"][1]["args"] = "echo 'other command'"
- environment._job_spec = job
-
- with set_temporary_config(
- {
- "cloud.graphql": "gql_test",
- "cloud.auth_token": "auth_test",
- "logging.extra_loggers": "['test_logger']",
- }
- ):
- with prefect.context(flow_run_id="id_test", namespace="namespace_test"):
- yaml_obj = environment._populate_run_time_job_spec_details(
- docker_name="test1/test2:test3"
- )
-
- assert (
- yaml_obj["metadata"]["labels"]["prefect.io/identifier"]
- == environment.identifier_label
- )
- assert yaml_obj["metadata"]["labels"]["prefect.io/flow_run_id"] == "id_test"
- assert (
- yaml_obj["spec"]["template"]["metadata"]["labels"]["prefect.io/identifier"]
- == environment.identifier_label
- )
-
- # Assert First Container
- env = yaml_obj["spec"]["template"]["spec"]["containers"][0]["env"]
-
- assert env[0]["value"] == "gql_test"
- assert env[1]["value"] == "auth_test"
- assert env[2]["value"] == "id_test"
- assert env[3]["value"] == "namespace_test"
- assert env[4]["value"] == "test1/test2:test3"
- assert env[9]["value"] == "['test_logger']"
-
- assert (
- yaml_obj["spec"]["template"]["spec"]["containers"][0]["image"]
- == "test1/test2:test3"
- )
-
- assert yaml_obj["spec"]["template"]["spec"]["containers"][0]["command"] == [
- "/bin/sh",
- "-c",
- ]
- assert (
- yaml_obj["spec"]["template"]["spec"]["containers"][0]["args"]
- == default_command_args
- )
-
- # Assert Second Container
- env = yaml_obj["spec"]["template"]["spec"]["containers"][1]["env"]
-
- assert env[0]["value"] == "gql_test"
- assert env[1]["value"] == "auth_test"
- assert env[2]["value"] == "id_test"
- assert env[3]["value"] == "namespace_test"
- assert env[4]["value"] == "test1/test2:test3"
- assert env[9]["value"] == "['test_logger']"
-
- assert (
- yaml_obj["spec"]["template"]["spec"]["containers"][1]["image"]
- != "test1/test2:test3"
- )
-
- assert (
- yaml_obj["spec"]["template"]["spec"]["containers"][1]["args"]
- != default_command_args
- )
-
-
-def test_initialize_environment_with_spec_populates(
- monkeypatch, job_spec_file, initial_job_spec, default_command_args
-):
- environment = KubernetesJobEnvironment(job_spec_file=job_spec_file)
- assert environment._job_spec == initial_job_spec
- assert environment._job_spec["spec"]["template"]["spec"]["containers"][0][
- "command"
- ] == ["/bin/sh", "-c"]
- assert (
- environment._job_spec["spec"]["template"]["spec"]["containers"][0]["args"]
- == default_command_args
- )
-
-
-def test_roundtrip_cloudpickle(job_spec_file, initial_job_spec):
- environment = KubernetesJobEnvironment(job_spec_file=job_spec_file)
-
- assert environment._job_spec == initial_job_spec
-
- new = cloudpickle.loads(cloudpickle.dumps(environment))
- assert isinstance(new, KubernetesJobEnvironment)
- assert new._job_spec == initial_job_spec
-
- # Identifer labels do not persist
- assert environment.identifier_label
- assert new.identifier_label
-
- assert environment.identifier_label != new.identifier_label
diff --git a/tests/environments/execution/test_local_environment.py b/tests/environments/execution/test_local_environment.py
deleted file mode 100644
index cf09bdc5dd09..000000000000
--- a/tests/environments/execution/test_local_environment.py
+++ /dev/null
@@ -1,118 +0,0 @@
-from unittest.mock import MagicMock
-
-import prefect
-from prefect import Flow
-from prefect.executors import LocalDaskExecutor
-from prefect.environments.execution import LocalEnvironment
-from prefect.storage import Docker, Local
-from prefect.utilities.configuration import set_temporary_config
-
-
-class DummyStorage(Local):
- def add_flow(self, flow):
- self.flows[flow.name] = flow
- return flow.name
-
- def get_flow(self, flow_name):
- return self.flows[flow_name]
-
-
-def test_create_environment():
- with set_temporary_config(
- {"engine.executor.default_class": "prefect.executors.LocalDaskExecutor"}
- ):
- environment = LocalEnvironment()
-
- assert isinstance(environment.executor, LocalDaskExecutor)
- assert environment.labels == set()
- assert environment.on_start is None
- assert environment.on_exit is None
- assert environment.metadata == {}
- assert environment.logger.name == "prefect.LocalEnvironment"
-
-
-def test_create_environment_populated():
- def f():
- pass
-
- executor = LocalDaskExecutor()
- environment = LocalEnvironment(
- executor=executor,
- labels=["test"],
- on_start=f,
- on_exit=f,
- metadata={"test": "here"},
- )
- assert environment.executor is executor
- assert environment.labels == set(["test"])
- assert environment.on_start is f
- assert environment.on_exit is f
- assert environment.metadata == {"test": "here"}
- assert environment.logger.name == "prefect.LocalEnvironment"
-
-
-def test_environment_dependencies():
- environment = LocalEnvironment()
- assert environment.dependencies == []
-
-
-def test_setup_environment_passes():
- environment = LocalEnvironment()
- environment.setup(flow=Flow("test", storage=Docker()))
-
-
-def test_serialize_environment():
- environment = LocalEnvironment()
- env = environment.serialize()
- assert env["type"] == "LocalEnvironment"
-
-
-def test_environment_execute():
- class MyExecutor(LocalDaskExecutor):
- submit_called = False
-
- def submit(self, *args, **kwargs):
- self.submit_called = True
- return super().submit(*args, **kwargs)
-
- global_dict = {}
-
- @prefect.task
- def add_to_dict():
- global_dict["run"] = True
-
- executor = MyExecutor()
- environment = LocalEnvironment(executor=executor)
- storage = DummyStorage()
- flow = prefect.Flow(
- "test", tasks=[add_to_dict], environment=environment, storage=storage
- )
-
- storage.add_flow(flow)
- environment.execute(flow=flow)
-
- assert global_dict.get("run") is True
- assert executor.submit_called
-
-
-def test_environment_execute_calls_callbacks():
- start_func = MagicMock()
- exit_func = MagicMock()
-
- global_dict = {}
-
- @prefect.task
- def add_to_dict():
- global_dict["run"] = True
-
- environment = LocalEnvironment(on_start=start_func, on_exit=exit_func)
- storage = DummyStorage()
- flow = prefect.Flow("test", tasks=[add_to_dict])
- storage.add_flow(flow)
- flow.storage = storage
-
- environment.execute(flow)
- assert global_dict.get("run") is True
-
- assert start_func.called
- assert exit_func.called
diff --git a/tests/executors/test_executors.py b/tests/executors/test_executors.py
index cdafe88473e1..e6e0578d80fd 100644
--- a/tests/executors/test_executors.py
+++ b/tests/executors/test_executors.py
@@ -22,17 +22,6 @@
from prefect.engine.signals import SUCCESS
-@pytest.mark.parametrize(
- "cls_name", ["LocalExecutor", "LocalDaskExecutor", "DaskExecutor"]
-)
-def test_deprecated_executors(cls_name):
- old_cls = getattr(prefect.engine.executors, cls_name)
- new_cls = getattr(prefect.executors, cls_name)
- with pytest.warns(UserWarning, match="has been moved to"):
- obj = old_cls()
- assert isinstance(obj, new_cls)
-
-
class TestBaseExecutor:
def test_submit_raises_notimplemented(self):
with pytest.raises(NotImplementedError):
@@ -158,7 +147,7 @@ def test_temporary_pool_created_of_proper_size_and_kind(
self, scheduler, num_workers
):
from dask.system import CPU_COUNT
- from multiprocessing.pool import Pool, ThreadPool
+ from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
e = LocalDaskExecutor(scheduler, num_workers=num_workers)
with e.start():
@@ -166,9 +155,13 @@ def test_temporary_pool_created_of_proper_size_and_kind(
assert e._pool is None
else:
sol = num_workers or CPU_COUNT
- kind = ThreadPool if scheduler == "threads" else Pool
+ kind = (
+ ThreadPoolExecutor
+ if scheduler == "threads"
+ else ProcessPoolExecutor
+ )
+ assert e._pool._max_workers == sol
assert isinstance(e._pool, kind)
- assert e._pool._processes == sol
assert e._pool is None
@pytest.mark.parametrize("scheduler", ["threads", "processes", "synchronous"])
diff --git a/tests/serialization/test_environments.py b/tests/serialization/test_environments.py
deleted file mode 100644
index f3591bc4fea6..000000000000
--- a/tests/serialization/test_environments.py
+++ /dev/null
@@ -1,286 +0,0 @@
-import os
-import tempfile
-
-import pytest
-
-import prefect
-from prefect import environments
-from prefect.serialization.environment import (
- BaseEnvironmentSchema,
- DaskKubernetesEnvironmentSchema,
- EnvironmentSchema,
- FargateTaskEnvironmentSchema,
- KubernetesJobEnvironmentSchema,
- LocalEnvironmentSchema,
-)
-
-
-@pytest.fixture
-def k8s_job_spec_content() -> str:
- return "apiVersion: batch/v1\nkind: Job\n"
-
-
-def test_serialize_base_environment():
- env = environments.Environment()
-
- serialized = BaseEnvironmentSchema().dump(env)
- assert serialized
- assert serialized["__version__"] == prefect.__version__
- assert serialized["labels"] == []
- assert serialized["metadata"] == {}
-
-
-def test_serialize_base_environment_with_labels():
- env = environments.Environment(labels=["b", "c", "a"])
-
- serialized = BaseEnvironmentSchema().dump(env)
- assert serialized
- assert serialized["__version__"] == prefect.__version__
- assert serialized["labels"] == ["a", "b", "c"]
-
-
-def test_serialize_dask_environment():
- env = environments.DaskKubernetesEnvironment()
-
- schema = DaskKubernetesEnvironmentSchema()
- serialized = schema.dump(env)
- assert serialized
- assert serialized["__version__"] == prefect.__version__
- assert serialized["docker_secret"] is None
- assert serialized["min_workers"] == 1
- assert serialized["max_workers"] == 2
- assert serialized["labels"] == []
- assert serialized["metadata"] == {}
-
- new = schema.load(serialized)
- assert new.private_registry is False
- assert new.docker_secret is None
- assert new.min_workers == 1
- assert new.max_workers == 2
- assert new.labels == set()
- assert new.scheduler_spec_file is None
- assert new.worker_spec_file is None
-
-
-def test_serialize_dask_env_with_custom_specs():
- with tempfile.TemporaryDirectory() as directory:
- with open(os.path.join(directory, "scheduler.yaml"), "w+") as f:
- f.write("scheduler")
- with open(os.path.join(directory, "worker.yaml"), "w+") as f:
- f.write("worker")
-
- env = environments.DaskKubernetesEnvironment(
- scheduler_spec_file=os.path.join(directory, "scheduler.yaml"),
- worker_spec_file=os.path.join(directory, "worker.yaml"),
- )
-
- schema = DaskKubernetesEnvironmentSchema()
- serialized = schema.dump(env)
-
- deserialized = schema.load(serialized)
- assert isinstance(deserialized, environments.DaskKubernetesEnvironment)
-
-
-def test_serialize_dask_environment_with_labels():
- env = environments.DaskKubernetesEnvironment(labels=["b", "c", "a"])
-
- schema = DaskKubernetesEnvironmentSchema()
- serialized = schema.dump(env)
- assert serialized
- assert serialized["__version__"] == prefect.__version__
- assert serialized["docker_secret"] is None
- assert serialized["min_workers"] == 1
- assert serialized["max_workers"] == 2
- # labels should be sorted in the serialized obj
- assert serialized["labels"] == ["a", "b", "c"]
-
- new = schema.load(serialized)
- assert new.private_registry is False
- assert new.docker_secret is None
- assert new.min_workers == 1
- assert new.max_workers == 2
- assert new.labels == {"a", "b", "c"}
-
-
-def test_serialize_dask_environment_with_customized_workers():
- env = environments.DaskKubernetesEnvironment(min_workers=10, max_workers=60)
-
- schema = DaskKubernetesEnvironmentSchema()
- serialized = schema.dump(env)
- assert serialized
- assert serialized["__version__"] == prefect.__version__
- assert serialized["min_workers"] == 10
- assert serialized["max_workers"] == 60
-
- new = schema.load(serialized)
- assert new.min_workers == 10
- assert new.max_workers == 60
-
-
-def test_serialize_dask_environment_with_private_registry():
- env = environments.DaskKubernetesEnvironment(
- private_registry=True, docker_secret="FOO"
- )
-
- schema = DaskKubernetesEnvironmentSchema()
- serialized = schema.dump(env)
- assert serialized
- assert serialized["__version__"] == prefect.__version__
- assert serialized["private_registry"] is True
- assert serialized["docker_secret"] == "FOO"
-
- new = schema.load(serialized)
- assert new.private_registry is True
- assert new.docker_secret == "FOO"
-
-
-def test_serialize_fargate_task_environment():
- env = environments.FargateTaskEnvironment()
-
- schema = FargateTaskEnvironmentSchema()
- serialized = schema.dump(env)
- assert serialized
- assert serialized["__version__"] == prefect.__version__
- assert serialized["labels"] == []
- assert serialized["metadata"] == {}
-
- new = schema.load(serialized)
- assert new.labels == set()
-
-
-def test_serialize_fargate_task_env_with_kwargs():
- env = environments.FargateTaskEnvironment(cluster="test")
-
- schema = FargateTaskEnvironmentSchema()
- serialized = schema.dump(env)
-
- deserialized = schema.load(serialized)
- assert isinstance(deserialized, environments.FargateTaskEnvironment)
- assert deserialized.task_run_kwargs == {}
-
-
-def test_serialize_fargate_task_environment_with_labels():
- env = environments.FargateTaskEnvironment(labels=["b", "c", "a"])
-
- schema = FargateTaskEnvironmentSchema()
- serialized = schema.dump(env)
- assert serialized
- assert serialized["__version__"] == prefect.__version__
- # labels should be sorted in the serialized obj
- assert serialized["labels"] == ["a", "b", "c"]
-
- new = schema.load(serialized)
- assert new.labels == {"a", "b", "c"}
-
-
-def test_serialize_k8s_job_environment(k8s_job_spec_content):
- with tempfile.TemporaryDirectory() as directory:
-
- with open(os.path.join(directory, "job.yaml"), "w+") as file:
- file.write(k8s_job_spec_content)
-
- env = environments.KubernetesJobEnvironment(
- job_spec_file=os.path.join(directory, "job.yaml")
- )
-
- schema = KubernetesJobEnvironmentSchema()
- serialized = schema.dump(env)
- assert serialized
- assert serialized["__version__"] == prefect.__version__
- assert serialized["labels"] == []
- assert serialized["metadata"] == {}
-
- new = schema.load(serialized)
- assert new.labels == set()
- assert new.job_spec_file is None
-
-
-def test_serialize_k8s_job_env_with_job_spec(k8s_job_spec_content):
- with tempfile.TemporaryDirectory() as directory:
- with open(os.path.join(directory, "job.yaml"), "w+") as f:
- f.write(k8s_job_spec_content)
-
- env = environments.KubernetesJobEnvironment(
- job_spec_file=os.path.join(directory, "job.yaml")
- )
-
- schema = KubernetesJobEnvironmentSchema()
- serialized = schema.dump(env)
-
- deserialized = schema.load(serialized)
- assert isinstance(deserialized, environments.KubernetesJobEnvironment)
-
-
-def test_serialize_k8s_job_environment_with_labels(k8s_job_spec_content):
- with tempfile.TemporaryDirectory() as directory:
-
- with open(os.path.join(directory, "job.yaml"), "w+") as file:
- file.write(k8s_job_spec_content)
-
- env = environments.KubernetesJobEnvironment(
- job_spec_file=os.path.join(directory, "job.yaml"), labels=["b", "c", "a"]
- )
-
- schema = KubernetesJobEnvironmentSchema()
- serialized = schema.dump(env)
- assert serialized
- assert serialized["__version__"] == prefect.__version__
- # labels should be sorted in the serialized obj
- assert serialized["labels"] == ["a", "b", "c"]
-
- new = schema.load(serialized)
- assert new.labels == {"a", "b", "c"}
-
-
-def test_serialize_local_environment_with_labels():
- env = environments.LocalEnvironment(labels=["b", "c", "a"])
-
- schema = LocalEnvironmentSchema()
- serialized = schema.dump(env)
- assert serialized
- assert serialized["__version__"] == prefect.__version__
- # labels should be sorted in the serialized obj
- assert serialized["labels"] == ["a", "b", "c"]
-
- new = schema.load(serialized)
- assert new.labels == {"b", "c", "a"}
-
-
-def test_serialize_custom_environment():
- class MyEnv(environments.Environment):
- def __init__(self, x=5):
- self.x = 5
- super().__init__(labels=["b", "c", "a"], metadata={"test": "here"})
-
- def custom_method(self):
- pass
-
- env = MyEnv()
- schema = EnvironmentSchema()
- serialized = schema.dump(env)
- assert serialized["type"] == "CustomEnvironment"
- assert serialized["labels"] == ["a", "b", "c"]
- assert serialized["metadata"] == {"test": "here"}
-
- obj = schema.load(serialized)
- assert isinstance(obj, environments.Environment)
- assert obj.labels == {"a", "b", "c"}
- assert obj.metadata == {"test": "here"}
-
-
-@pytest.mark.parametrize("cls_name", ["RemoteEnvironment", "RemoteDaskEnvironment"])
-def test_deserialize_old_environments_still_work(cls_name):
- """Check that old removed environments can still be deserialzed in the agent"""
- env = {
- "type": cls_name,
- "labels": ["prod"],
- "executor": "prefect.engine.executors.LocalExecutor",
- "__version__": "0.9.0",
- "executor_kwargs": {},
- }
- schema = EnvironmentSchema()
- obj = schema.load(env)
-
- assert isinstance(obj, environments.Environment)
- assert obj.labels == {"prod"}
- assert obj.metadata == {}
diff --git a/tests/storage/test_docker_healthcheck.py b/tests/storage/test_docker_healthcheck.py
index b6f9e4be4557..3aac0f97b280 100644
--- a/tests/storage/test_docker_healthcheck.py
+++ b/tests/storage/test_docker_healthcheck.py
@@ -7,7 +7,6 @@
from prefect import Flow, Task, task
from prefect.engine.results import LocalResult
-from prefect.environments import Environment
from prefect.storage import _healthcheck as healthchecks
from prefect.utilities.storage import flow_to_bytes_pickle
@@ -205,39 +204,3 @@ def down():
result = down(upstream_tasks=[up])
assert healthchecks.result_check([f]) is None
-
-
-class TestEnvironmentDependencyCheck:
- def test_no_raise_on_normal_flow(self):
- flow = Flow("THIS IS A TEST")
-
- assert healthchecks.environment_dependency_check([flow]) is None
-
- def test_no_raise_on_proper_imports(self):
- class NewEnvironment(Environment):
- @property
- def dependencies(self) -> list:
- return ["prefect"]
-
- flow = Flow("THIS IS A TEST", environment=NewEnvironment())
-
- assert healthchecks.environment_dependency_check([flow]) is None
-
- def test_no_raise_on_missing_dependencies_property(self):
- class NewEnvironment(Environment):
- pass
-
- flow = Flow("THIS IS A TEST", environment=NewEnvironment())
-
- assert healthchecks.environment_dependency_check([flow]) is None
-
- def test_raise_on_missing_imports(self, monkeypatch):
- class NewEnvironment(Environment):
- @property
- def dependencies(self) -> list:
- return ["TEST"]
-
- flow = Flow("THIS IS A TEST", environment=NewEnvironment())
-
- with pytest.raises(ModuleNotFoundError):
- healthchecks.environment_dependency_check([flow])
diff --git a/tests/storage/test_docker_storage.py b/tests/storage/test_docker_storage.py
index aec878adfd16..69b419d3b8c6 100644
--- a/tests/storage/test_docker_storage.py
+++ b/tests/storage/test_docker_storage.py
@@ -34,7 +34,7 @@ def test_create_docker_storage():
def test_cant_create_docker_with_both_base_image_and_dockerfile():
with pytest.raises(ValueError):
- Docker(dockerfile="path/to/file", base_image="python:3.6")
+ Docker(dockerfile="path/to/file", base_image="python:3.7")
def test_serialize_docker_storage():
@@ -74,13 +74,13 @@ def test_add_flow_to_docker_custom_prefect_dir():
)
def test_empty_docker_storage(monkeypatch, platform, url, no_docker_host_var):
monkeypatch.setattr("prefect.storage.docker.sys.platform", platform)
- monkeypatch.setattr(sys, "version_info", MagicMock(major=3, minor=6))
+ monkeypatch.setattr(sys, "version_info", MagicMock(major=3, minor=7))
monkeypatch.setattr(prefect, "__version__", "0.9.2+c2394823")
storage = Docker()
assert not storage.registry_url
- assert storage.base_image == "python:3.6-slim"
+ assert storage.base_image == "python:3.7-slim"
assert not storage.image_name
assert not storage.image_tag
assert storage.python_dependencies == ["wheel"]
@@ -105,13 +105,13 @@ def test_empty_docker_storage_on_tagged_commit(
monkeypatch, platform, url, no_docker_host_var
):
monkeypatch.setattr("prefect.storage.docker.sys.platform", platform)
- monkeypatch.setattr(sys, "version_info", MagicMock(major=3, minor=6))
+ monkeypatch.setattr(sys, "version_info", MagicMock(major=3, minor=7))
monkeypatch.setattr(prefect, "__version__", "0.9.2")
storage = Docker()
assert not storage.registry_url
- assert storage.base_image == "prefecthq/prefect:0.9.2-python3.6"
+ assert storage.base_image == "prefecthq/prefect:0.9.2-python3.7"
assert not storage.image_name
assert not storage.image_tag
assert storage.python_dependencies == ["wheel"]
@@ -122,6 +122,23 @@ def test_empty_docker_storage_on_tagged_commit(
assert not storage.local_image
+@pytest.mark.parametrize("dev_version", ["1.0rc0", "1.0rc0+c2394823"])
+def test_base_image_release_candidate_dev_image(monkeypatch, dev_version):
+ monkeypatch.setattr(sys, "version_info", MagicMock(major=3, minor=7))
+ monkeypatch.setattr(prefect, "__version__", dev_version)
+
+ storage = Docker()
+ assert storage.base_image == "prefecthq/prefect:1.0rc0"
+
+
+def test_base_image_release_candidate(monkeypatch):
+ monkeypatch.setattr(sys, "version_info", MagicMock(major=3, minor=7))
+ monkeypatch.setattr(prefect, "__version__", "1.0rc1")
+
+ storage = Docker()
+ assert storage.base_image == "prefecthq/prefect:1.0rc1-python3.7"
+
+
@pytest.mark.parametrize("version_info", [(3, 5), (3, 6), (3, 7)])
def test_docker_init_responds_to_python_version(monkeypatch, version_info):
version_mock = MagicMock(major=version_info[0], minor=version_info[1])
@@ -302,7 +319,7 @@ def test_build_image_fails_with_value_error(monkeypatch):
flow = Flow("test")
storage = Docker(
registry_url="reg",
- base_image="python:3.6",
+ base_image="python:3.7",
image_name="test",
image_tag="latest",
)
@@ -315,7 +332,7 @@ def test_build_image_fails_with_value_error(monkeypatch):
def test_build_image_fails_no_registry(monkeypatch):
- storage = Docker(base_image="python:3.6", image_name="test", image_tag="latest")
+ storage = Docker(base_image="python:3.7", image_name="test", image_tag="latest")
client = MagicMock()
monkeypatch.setattr("docker.APIClient", client)
@@ -329,7 +346,7 @@ def test_build_image_passes(monkeypatch):
flow = Flow("test")
storage = Docker(
registry_url="reg",
- base_image="python:3.6",
+ base_image="python:3.7",
image_name="test",
image_tag="latest",
)
@@ -352,7 +369,7 @@ def test_build_image_passes(monkeypatch):
@pytest.mark.skip(reason="Needs to be mocked so it can work on CircleCI")
def test_build_image_passes_and_pushes(monkeypatch):
flow = Flow("test")
- storage = Docker(registry_url="reg", base_image="python:3.6")
+ storage = Docker(registry_url="reg", base_image="python:3.7")
pull_image = MagicMock()
monkeypatch.setattr("prefect.storage.Docker.pull_image", pull_image)
@@ -379,7 +396,7 @@ def test_build_image_passes_and_pushes(monkeypatch):
def test_create_dockerfile_from_base_image():
- storage = Docker(base_image="python:3.6")
+ storage = Docker(base_image="python:3.7")
with tempfile.TemporaryDirectory() as tempdir:
dpath = storage.create_dockerfile_object(directory=tempdir)
@@ -387,7 +404,7 @@ def test_create_dockerfile_from_base_image():
with open(dpath, "r") as dockerfile:
output = dockerfile.read()
- assert "FROM python:3.6" in output
+ assert "FROM python:3.7" in output
def test_create_dockerfile_from_dockerfile():
@@ -579,18 +596,18 @@ def test_create_dockerfile_from_dockerfile_uses_tempdir_path():
@pytest.mark.parametrize(
"prefect_version",
[
- ("0.5.3", ("FROM prefecthq/prefect:0.5.3-python3.6",)),
+ ("0.5.3", ("FROM prefecthq/prefect:0.5.3-python3.7",)),
(
"master",
(
- "FROM python:3.6-slim",
+ "FROM python:3.7-slim",
"pip show prefect || pip install git+https://github.com/PrefectHQ/prefect.git@master",
),
),
(
"424be6b5ed8d3be85064de4b95b5c3d7cb665510",
(
- "FROM python:3.6-slim",
+ "FROM python:3.7-slim",
"apt update && apt install -y gcc git make && rm -rf /var/lib/apt/lists/*",
"pip show prefect || pip install git+https://github.com/PrefectHQ/prefect.git@424be6b5ed8d3be85064de4b95b5c3d7cb665510#egg=prefect[all_orchestration_extras]",
),
@@ -598,7 +615,7 @@ def test_create_dockerfile_from_dockerfile_uses_tempdir_path():
],
)
def test_create_dockerfile_from_prefect_version(monkeypatch, prefect_version):
- monkeypatch.setattr(sys, "version_info", MagicMock(major=3, minor=6))
+ monkeypatch.setattr(sys, "version_info", MagicMock(major=3, minor=7))
storage = Docker(prefect_version=prefect_version[0])
@@ -773,7 +790,7 @@ def test_run_healthchecks_arg_custom_prefect_dir(ignore_healthchecks, tmpdir):
def test_pull_image(capsys, monkeypatch):
- storage = Docker(base_image="python:3.6")
+ storage = Docker(base_image="python:3.7")
client = MagicMock()
client.pull.return_value = [{"progress": "test", "status": "100"}]
@@ -787,7 +804,7 @@ def test_pull_image(capsys, monkeypatch):
def test_pull_image_raises_if_error_encountered(monkeypatch):
- storage = Docker(base_image="python:3.6")
+ storage = Docker(base_image="python:3.7")
client = MagicMock()
client.pull.return_value = [
@@ -801,7 +818,7 @@ def test_pull_image_raises_if_error_encountered(monkeypatch):
def test_push_image(capsys, monkeypatch):
- storage = Docker(base_image="python:3.6")
+ storage = Docker(base_image="python:3.7")
client = MagicMock()
client.push.return_value = [{"progress": "test", "status": "100"}]
@@ -816,7 +833,7 @@ def test_push_image(capsys, monkeypatch):
def test_push_image_raises_if_error_encountered(monkeypatch):
- storage = Docker(base_image="python:3.6")
+ storage = Docker(base_image="python:3.7")
client = MagicMock()
client.push.return_value = [
@@ -830,7 +847,7 @@ def test_push_image_raises_if_error_encountered(monkeypatch):
def test_docker_storage_name():
- storage = Docker(base_image="python:3.6")
+ storage = Docker(base_image="python:3.7")
with pytest.raises(ValueError):
storage.name
@@ -876,7 +893,7 @@ def test_docker_storage_output_stream(contents, expected, capsys):
def test_docker_storage_name_registry_url_none():
- storage = Docker(base_image="python:3.6")
+ storage = Docker(base_image="python:3.7")
with pytest.raises(ValueError):
storage.name
@@ -893,7 +910,7 @@ def test_docker_storage_get_flow_method(tmpdir):
with open(flow_path, "wb") as f:
cloudpickle.dump(flow, f)
- storage = Docker(base_image="python:3.6", prefect_directory=str(tmpdir))
+ storage = Docker(base_image="python:3.7", prefect_directory=str(tmpdir))
storage.add_flow(flow)
f = storage.get_flow(flow.name)
diff --git a/tests/tasks/aws/test_lambda.py b/tests/tasks/aws/test_lambda.py
index 1bdba569ebb3..253960162cde 100644
--- a/tests/tasks/aws/test_lambda.py
+++ b/tests/tasks/aws/test_lambda.py
@@ -13,7 +13,7 @@ class TestLambdaCreate:
def test_initialization(self):
task = LambdaCreate(
function_name="test",
- runtime="python3.6",
+ runtime="python3.7",
role="aws_role",
handler="file.handler",
bucket="s3_bucket",
@@ -24,7 +24,7 @@ def test_initialization(self):
def test_lambda_create_exposes_boto3_create_api(self, monkeypatch):
task = LambdaCreate(
function_name="test",
- runtime="python3.6",
+ runtime="python3.7",
role="aws_role",
handler="file.handler",
)
diff --git a/tests/tasks/prefect/test_flow_run.py b/tests/tasks/prefect/test_flow_run.py
index d02767dec102..0ce18af2ef5b 100644
--- a/tests/tasks/prefect/test_flow_run.py
+++ b/tests/tasks/prefect/test_flow_run.py
@@ -161,9 +161,7 @@ def test_displays_flow_run_url(self, MockFlowView, MockClient, caplog):
MockClient().create_flow_run.return_value = "flow-run-id"
MockClient().get_cloud_url.return_value = "fake-url"
create_flow_run.run(flow_id="flow-id")
- MockClient().get_cloud_url.assert_called_once_with(
- "flow-run", "flow-run-id", as_user=False
- )
+ MockClient().get_cloud_url.assert_called_once_with("flow-run", "flow-run-id")
assert "Created flow run '': fake-url" in caplog.text
diff --git a/tests/tasks/secrets/test_base.py b/tests/tasks/secrets/test_base.py
index 0626e1036ed1..041e7093f456 100644
--- a/tests/tasks/secrets/test_base.py
+++ b/tests/tasks/secrets/test_base.py
@@ -74,7 +74,7 @@ def test_secret_value_depends_on_use_local_secrets(self, monkeypatch):
secret = PrefectSecret(name="test")
with set_temporary_config(
- {"cloud.use_local_secrets": False, "cloud.auth_token": None}
+ {"cloud.use_local_secrets": False, "cloud.api-key": None}
):
with prefect.context(secrets=dict()):
with pytest.raises(ClientError):
@@ -87,7 +87,7 @@ def test_secrets_use_client(self, monkeypatch):
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
with set_temporary_config(
- {"cloud.auth_token": "secret_token", "cloud.use_local_secrets": False}
+ {"cloud.api_key": "api-key", "cloud.use_local_secrets": False}
):
my_secret = PrefectSecret(name="the-key")
val = my_secret.run()
@@ -100,7 +100,7 @@ def test_cloud_secrets_use_context_first(self, monkeypatch):
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
with set_temporary_config(
- {"cloud.auth_token": "secret_token", "cloud.use_local_secrets": False}
+ {"cloud.api_key": "api-key", "cloud.use_local_secrets": False}
):
with prefect.context(secrets={"the-key": "foo"}):
my_secret = PrefectSecret(name="the-key")
@@ -114,7 +114,7 @@ def test_cloud_secrets_use_context_first_but_fallback_to_client(self, monkeypatc
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
with set_temporary_config(
- {"cloud.auth_token": "secret_token", "cloud.use_local_secrets": False}
+ {"cloud.api_key": "api-key", "cloud.use_local_secrets": False}
):
with prefect.context(secrets={}):
my_secret = PrefectSecret(name="the-key")
@@ -128,7 +128,7 @@ def test_cloud_secrets_remain_plain_dictionaries(self, monkeypatch):
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
with set_temporary_config(
- {"cloud.auth_token": "secret_token", "cloud.use_local_secrets": False}
+ {"cloud.api_key": "api-key", "cloud.use_local_secrets": False}
):
my_secret = PrefectSecret(name="the-key")
val = my_secret.run()
@@ -146,7 +146,7 @@ def test_cloud_secrets_auto_load_json_strings(self, monkeypatch):
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
with set_temporary_config(
- {"cloud.auth_token": "secret_token", "cloud.use_local_secrets": False}
+ {"cloud.api_key": "api-key", "cloud.use_local_secrets": False}
):
my_secret = PrefectSecret(name="the-key")
val = my_secret.run()
diff --git a/tests/test_configuration.py b/tests/test_configuration.py
index 57ce93e12ab9..d1f4a0bd512f 100644
--- a/tests/test_configuration.py
+++ b/tests/test_configuration.py
@@ -138,9 +138,6 @@ def test_to_environment_variables_respects_prefix():
assert env == {"FOO__KEY": "value"}
-@pytest.mark.skipif(
- sys.version_info < (3, 7), reason="One of the nested keys is a Box in Python 3.6"
-)
def test_to_environment_variables_roundtrip(config, monkeypatch, test_config_file_path):
keys = [".".join(k) for k in dict_to_flatdict(config)]
diff --git a/tests/test_exceptions.py b/tests/test_exceptions.py
deleted file mode 100644
index 9f4f189c3b6c..000000000000
--- a/tests/test_exceptions.py
+++ /dev/null
@@ -1,79 +0,0 @@
-import pytest
-
-from prefect.utilities.exceptions import (
- PrefectError,
- ClientError as OldClientError,
- AuthorizationError as OldAuthorizationError,
- StorageError,
- SerializationError,
- TaskTimeoutError,
- ContextError,
- VersionLockError,
-)
-from prefect.exceptions import (
- PrefectException,
- ClientError,
- AuthorizationError,
- VersionLockMismatchSignal,
- TaskTimeoutSignal,
- FlowStorageError,
-)
-
-
-@pytest.mark.parametrize(
- "old_err,new_err",
- [
- (PrefectError, PrefectException),
- (OldClientError, ClientError),
- (OldAuthorizationError, AuthorizationError),
- (StorageError, FlowStorageError),
- ],
-)
-def test_new_exceptions_can_be_caught_by_old(old_err, new_err):
- raises = False
- try:
- raise new_err("message")
- except old_err as exc:
- assert str(exc) == "message"
- raises = True
-
- assert raises
-
-
-@pytest.mark.parametrize(
- "err_cls",
- [
- PrefectError,
- OldClientError,
- OldAuthorizationError,
- SerializationError,
- TaskTimeoutError,
- StorageError,
- VersionLockError,
- ContextError,
- VersionLockError,
- ],
-)
-def test_old_exceptions_warn_on_creation(err_cls):
- with pytest.warns(UserWarning, match=f"prefect.utilities.exceptions"):
- err_cls()
-
-
-@pytest.mark.parametrize(
- "err_cls",
- [
- PrefectException,
- ClientError,
- AuthorizationError,
- FlowStorageError,
- VersionLockMismatchSignal,
- TaskTimeoutSignal,
- ],
-)
-def test_new_exceptions_do_not_warn_on_creation(err_cls):
- with pytest.warns(None) as warnings:
- err_cls()
- if warnings:
- raise AssertionError(
- "Warnings raised:\n" + "\n".join([str(w) for w in warnings])
- )
diff --git a/tests/utilities/test_agent.py b/tests/utilities/test_agent.py
index c998a77cbdb1..d6ada122456f 100644
--- a/tests/utilities/test_agent.py
+++ b/tests/utilities/test_agent.py
@@ -1,67 +1,11 @@
import pytest
-from prefect.environments import LocalEnvironment
from prefect.storage import Docker, Local
from prefect.run_configs import KubernetesRun, LocalRun
from prefect.utilities.agent import get_flow_image, get_flow_run_command
from prefect.utilities.graphql import GraphQLResult
-def test_get_flow_image_docker_storage():
- flow_run = GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Docker(
- registry_url="test", image_name="name", image_tag="tag"
- ).serialize(),
- "environment": LocalEnvironment().serialize(),
- "id": "id",
- }
- ),
- "id": "id",
- }
- )
- image = get_flow_image(flow_run=flow_run)
- assert image == "test/name:tag"
-
-
-def test_get_flow_image_env_metadata():
- flow_run = GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Local().serialize(),
- "environment": LocalEnvironment(
- metadata={"image": "repo/name:tag"}
- ).serialize(),
- "id": "id",
- }
- ),
- "id": "id",
- }
- )
- image = get_flow_image(flow_run=flow_run)
- assert image == "repo/name:tag"
-
-
-def test_get_flow_image_raises_on_missing_info():
- flow_run = GraphQLResult(
- {
- "flow": GraphQLResult(
- {
- "storage": Local().serialize(),
- "environment": LocalEnvironment().serialize(),
- "id": "id",
- }
- ),
- "id": "id",
- }
- )
- with pytest.raises(ValueError):
- get_flow_image(flow_run=flow_run)
-
-
@pytest.mark.parametrize("run_config", [KubernetesRun(), LocalRun(), None])
def test_get_flow_image_run_config_docker_storage(run_config):
flow_run = GraphQLResult(
@@ -141,7 +85,6 @@ def test_get_flow_run_command(core_version, command):
"flow": GraphQLResult(
{
"storage": Local().serialize(),
- "environment": LocalEnvironment().serialize(),
"id": "id",
"core_version": core_version,
}
@@ -160,7 +103,6 @@ def test_get_flow_run_command_works_if_core_version_not_on_response():
"flow": GraphQLResult(
{
"storage": Local().serialize(),
- "environment": LocalEnvironment().serialize(),
"id": "id",
}
),
diff --git a/tests/utilities/test_logging.py b/tests/utilities/test_logging.py
index 0f2d06a80bc4..1628636b7492 100644
--- a/tests/utilities/test_logging.py
+++ b/tests/utilities/test_logging.py
@@ -88,16 +88,6 @@ def test_cloud_handler_emit_noop_if_cloud_logging_disabled(logger, log_manager):
assert log_manager.thread is None
-def test_cloud_handler_emit_noop_if_cloud_logging_disabled_deprecated(
- logger, log_manager
-):
- with utilities.configuration.set_temporary_config({"logging.log_to_cloud": False}):
- logger.info("testing")
- assert not log_manager.enqueue.called
- assert log_manager.client is None
- assert log_manager.thread is None
-
-
def test_cloud_handler_emit_noop_if_below_log_level(logger, log_manager):
logger.debug("testing")
assert not log_manager.enqueue.called
@@ -105,6 +95,12 @@ def test_cloud_handler_emit_noop_if_below_log_level(logger, log_manager):
assert log_manager.thread is None
+def test_cloud_handler_emit_ignores_removed_log_to_cloud_setting(logger, log_manager):
+ with utilities.configuration.set_temporary_config({"logging.log_to_cloud": False}):
+ logger.info("testing")
+ assert log_manager.enqueue.called
+
+
def test_cloud_handler_emit_noop_if_below_log_level_in_context(logger, log_manager):
# Log level in context is higher than log level of logger
assert logger.level == logging.INFO
diff --git a/tests/utilities/test_storage.py b/tests/utilities/test_storage.py
index 4b641e4eb11e..f7390dc9891b 100644
--- a/tests/utilities/test_storage.py
+++ b/tests/utilities/test_storage.py
@@ -8,9 +8,9 @@
import prefect
from prefect import Flow, Task
-from prefect.environments import LocalEnvironment
from prefect.storage import Docker, Local
from prefect.exceptions import FlowStorageError
+from prefect.run_configs import DockerRun, UniversalRun
from prefect.utilities.storage import (
get_flow_image,
extract_flow_from_file,
@@ -23,17 +23,17 @@
def test_get_flow_image_docker_storage():
flow = Flow(
"test",
- environment=LocalEnvironment(),
+ run_config=UniversalRun(),
storage=Docker(registry_url="test", image_name="name", image_tag="tag"),
)
image = get_flow_image(flow=flow)
assert image == "test/name:tag"
-def test_get_flow_image_env_metadata():
+def test_get_flow_image_run_config():
flow = Flow(
"test",
- environment=LocalEnvironment(metadata={"image": "repo/name:tag"}),
+ run_config=DockerRun(image="repo/name:tag"),
storage=Local(),
)
image = get_flow_image(flow=flow)
@@ -43,7 +43,7 @@ def test_get_flow_image_env_metadata():
def test_get_flow_image_raises_on_missing_info():
flow = Flow(
"test",
- environment=LocalEnvironment(),
+ run_config=UniversalRun(),
storage=Local(),
)
with pytest.raises(ValueError):