From acbd1fcdfb95ebc62822adc24602ae4cc28ec59d Mon Sep 17 00:00:00 2001 From: Gal Topper Date: Sun, 4 Feb 2024 22:33:50 +0800 Subject: [PATCH 001/119] [Datastore] Fix ValueError when parsing a timestamp string column with pandas 1 [1.6.x] (#5055) --- mlrun/datastore/utils.py | 12 +++++++++++- tests/datastore/test_sources.py | 2 +- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/mlrun/datastore/utils.py b/mlrun/datastore/utils.py index 63dc35e1da3..2f960ed5b88 100644 --- a/mlrun/datastore/utils.py +++ b/mlrun/datastore/utils.py @@ -18,6 +18,7 @@ from urllib.parse import parse_qs, urlparse, urlunparse import pandas as pd +import semver import mlrun.datastore @@ -137,7 +138,16 @@ def filter_df_generator( def _execute_time_filter( df: pd.DataFrame, time_column: str, start_time: pd.Timestamp, end_time: pd.Timestamp ): - df[time_column] = pd.to_datetime(df[time_column], format="mixed", yearfirst=True) + if semver.parse(pd.__version__)["major"] >= 2: + # pandas 2 is too strict by default (ML-5629) + kwargs = { + "format": "mixed", + "yearfirst": True, + } + else: + # pandas 1 may fail on format "mixed" (ML-5661) + kwargs = {} + df[time_column] = pd.to_datetime(df[time_column], **kwargs) if start_time: df = df[df[time_column] > start_time] if end_time: diff --git a/tests/datastore/test_sources.py b/tests/datastore/test_sources.py index 6733230cf3d..f941970961a 100644 --- a/tests/datastore/test_sources.py +++ b/tests/datastore/test_sources.py @@ -66,7 +66,7 @@ def test_kafka_source_with_new_nuclio(): assert function.spec.max_replicas == 2 -# ML-5629 +# ML-5629 (pandas 2), ML-5661 (pandas 1) def test_timestamp_format_inference(rundb_mock): source = CSVSource( path=str(pathlib.Path(__file__).parent / "assets" / "mixed_timestamps.csv") From ed94c4fd31bfdc583f757923322f424c53cebba2 Mon Sep 17 00:00:00 2001 From: TomerShor <90552140+TomerShor@users.noreply.github.com> Date: Sun, 4 Feb 2024 17:31:48 +0200 Subject: [PATCH 002/119] [Project] Fix load project with artifacts test [1.6.x] (#5057) --- tests/system/projects/test_project.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/system/projects/test_project.py b/tests/system/projects/test_project.py index 81accb82bef..f755255f58d 100644 --- a/tests/system/projects/test_project.py +++ b/tests/system/projects/test_project.py @@ -1277,7 +1277,8 @@ def test_load_project_with_artifact_db_key(self): # create a new project from the same spec, and validate the artifact was loaded properly project3 = mlrun.load_project(context=context, name=project_3_name) - artifacts = project3.list_artifacts(name=artifact_db_key_2) + # since it is imported from yaml, the artifact is saved with the set key + artifacts = project3.list_artifacts(name=another_artifact_key) assert len(artifacts) == 1 assert artifacts[0]["metadata"]["key"] == another_artifact_key From 7f1497dfa85867b201388e640db9564a9df09dbf Mon Sep 17 00:00:00 2001 From: alxtkr77 <3098237+alxtkr77@users.noreply.github.com> Date: Mon, 5 Feb 2024 10:04:14 +0200 Subject: [PATCH 003/119] [Datastore] Clarify error messages when using spark to ingest unsupported sources (#5023) - 1.6.x (#5061) --- mlrun/datastore/sources.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/mlrun/datastore/sources.py b/mlrun/datastore/sources.py index 79ae3e787a7..df9e817f13e 100644 --- a/mlrun/datastore/sources.py +++ b/mlrun/datastore/sources.py @@ -118,7 +118,10 @@ def to_spark_df(self, session, named_view=False, time_field=None, columns=None): if named_view: df.createOrReplaceTempView(self.name) return self._filter_spark_df(df, time_field, columns) - raise NotImplementedError() + raise NotImplementedError( + f"Conversion of a source of type '{type(self).__name__}' " + "to a Spark dataframe is not possible, as this operation is not supported" + ) def _filter_spark_df(self, df, time_field=None, columns=None): if not (columns or time_field): @@ -1016,6 +1019,12 @@ def add_nuclio_trigger(self, function): return function + def to_spark_df(self, session, named_view=False, time_field=None, columns=None): + raise NotImplementedError( + "Conversion of a source of type 'KafkaSource' " + "to a Spark dataframe is not possible, as this operation is not supported by Spark" + ) + class SQLSource(BaseSourceDriver): kind = "sqldb" From e071ba5d92cb8a554e2f2d6a72fc957e4ec60ba5 Mon Sep 17 00:00:00 2001 From: davesh0812 <85231462+davesh0812@users.noreply.github.com> Date: Mon, 5 Feb 2024 13:34:26 +0200 Subject: [PATCH 004/119] [Model Monitoring] Fix path filtering according to nuclio changes [1.6.x] (#5063) --- mlrun/model_monitoring/stream_processing.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mlrun/model_monitoring/stream_processing.py b/mlrun/model_monitoring/stream_processing.py index 30c8171c8c7..46b48d0ed2f 100644 --- a/mlrun/model_monitoring/stream_processing.py +++ b/mlrun/model_monitoring/stream_processing.py @@ -180,14 +180,14 @@ def apply_event_routing(): apply_event_routing() - # Step 2 - Filter out events with no '-' in path which indicates that the event is supposed to be processed + # Step 2 - Filter out events with '-' in the path basename from going forward # through the next steps of the stream graph def apply_storey_filter_stream_events(): # Remove none values from each event graph.add_step( "storey.Filter", "filter_stream_event", - _fn="('-' not in event.path)", + _fn="('-' not in event.path.split('/')[-1])", full_event=True, ) From 9c490906a9d0cd13dcb787a00fed803cbccfe4f2 Mon Sep 17 00:00:00 2001 From: Eyal Danieli Date: Tue, 6 Feb 2024 20:28:17 +0200 Subject: [PATCH 005/119] [Grafana] Set MLRun API datasource for `Feature Analysis` chart [1.6.x] (#5077) --- .../dashboards/model-monitoring-details.json | 359 ++---------------- server/api/api/endpoints/grafana_proxy.py | 8 - 2 files changed, 37 insertions(+), 330 deletions(-) diff --git a/docs/monitoring/dashboards/model-monitoring-details.json b/docs/monitoring/dashboards/model-monitoring-details.json index 0a1afbe1e04..56c4b5bf4f5 100644 --- a/docs/monitoring/dashboards/model-monitoring-details.json +++ b/docs/monitoring/dashboards/model-monitoring-details.json @@ -24,7 +24,7 @@ "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, - "id": 33, + "id": 13, "links": [ { "icon": "external link", @@ -74,7 +74,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -217,7 +218,7 @@ } ] }, - "pluginVersion": "9.2.2", + "pluginVersion": "9.2.15", "targets": [ { "datasource": "iguazio", @@ -291,7 +292,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -438,7 +440,7 @@ } ] }, - "pluginVersion": "9.2.2", + "pluginVersion": "9.2.15", "targets": [ { "datasource": "iguazio", @@ -479,7 +481,7 @@ "type": "table" }, { - "datasource": "iguazio", + "datasource": "model-monitoring", "description": "Feature analysis of the latest batch", "fieldConfig": { "defaults": { @@ -494,7 +496,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -596,337 +599,49 @@ "showHeader": true, "sortBy": [ { - "desc": true, - "displayName": "current_stats" + "desc": false, + "displayName": "Feature" } ] }, - "pluginVersion": "9.2.2", + "pluginVersion": "9.2.15", "targets": [ { - "datasource": "iguazio", + "datasource": "model-monitoring", "rawQuery": true, "refId": "A", - "target": "backend=kv;\ncontainer=users;\ntable=pipelines/$PROJECT/model-endpoints/endpoints;\nfilter=uid==\"$MODELENDPOINT\";\nfields= current_stats;", - "type": "table" - }, - { - "datasource": "iguazio", - "hide": false, - "refId": "B", - "target": "backend=kv; container=users; table=pipelines/$PROJECT/model-endpoints/endpoints; filter=uid==\"$MODELENDPOINT\"; fields= feature_stats;", - "type": "table" - }, - { - "datasource": "iguazio", - "hide": false, - "refId": "C", - "target": "backend=kv; container=users; table=pipelines/$PROJECT/model-endpoints/endpoints; filter=uid==\"$MODELENDPOINT\"; fields= drift_measures;", + "target": "target_endpoint=individual_feature_analysis;endpoint_id=$MODELENDPOINT;project=$PROJECT", "type": "table" } ], "title": "Features Analysis", "transformations": [ - { - "id": "extractFields", - "options": { - "format": "auto", - "replace": false, - "source": "current_stats" - } - }, - { - "id": "extractFields", - "options": { - "format": "auto", - "source": "feature_stats" - } - }, - { - "id": "extractFields", - "options": { - "replace": false, - "source": "drift_measures" - } - }, - { - "id": "merge", - "options": {} - }, - { - "id": "reduce", - "options": { - "includeTimeField": false, - "labelsToFields": false, - "mode": "seriesToRows", - "reducers": [ - "allValues" - ] - } - }, - { - "id": "filterByValue", - "options": { - "filters": [ - { - "config": { - "id": "equal", - "options": { - "value": "feature_stats" - } - }, - "fieldName": "Field" - }, - { - "config": { - "id": "equal", - "options": { - "value": "current_stats" - } - }, - "fieldName": "Field" - }, - { - "config": { - "id": "equal", - "options": { - "value": "timestamp" - } - }, - "fieldName": "Field" - }, - { - "config": { - "id": "equal", - "options": { - "value": "drift_measures" - } - }, - "fieldName": "Field" - }, - { - "config": { - "id": "equal", - "options": { - "value": "kld_sum" - } - }, - "fieldName": "Field" - }, - { - "config": { - "id": "equal", - "options": { - "value": "kld_mean" - } - }, - "fieldName": "Field" - }, - { - "config": { - "id": "equal", - "options": { - "value": "tvd_mean" - } - }, - "fieldName": "Field" - }, - { - "config": { - "id": "equal", - "options": { - "value": "tvd_sum" - } - }, - "fieldName": "Field" - }, - { - "config": { - "id": "equal", - "options": { - "value": "hellinger_sum" - } - }, - "fieldName": "Field" - }, - { - "config": { - "id": "equal", - "options": { - "value": "hellinger_mean" - } - }, - "fieldName": "Field" - } - ], - "match": "any", - "type": "exclude" - } - }, - { - "id": "extractFields", - "options": { - "replace": false, - "source": "All values" - } - }, - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": [ - "Field", - "0", - "1", - "2" - ] - } - } - }, - { - "id": "extractFields", - "options": { - "replace": false, - "source": "0" - } - }, - { - "id": "filterByValue", - "options": { - "filters": [ - { - "config": { - "id": "isNull", - "options": {} - }, - "fieldName": "1" - }, - { - "config": { - "id": "greater", - "options": { - "value": 0 - } - }, - "fieldName": "2" - } - ], - "match": "any", - "type": "exclude" - } - }, - { - "id": "extractFields", - "options": { - "format": "json", - "source": "1" - } - }, - { - "id": "extractFields", - "options": { - "source": "2" - } - }, - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": [ - "Field", - "mean 1", - "min 1", - "max 1", - "mean 2", - "min 2", - "max 2", - "tvd", - "hellinger", - "kld" - ] - } - } - }, - { - "id": "renameByRegex", - "options": { - "regex": "mean 1", - "renamePattern": "Actual Mean" - } - }, - { - "id": "renameByRegex", - "options": { - "regex": "min 1", - "renamePattern": "Actual Min" - } - }, - { - "id": "renameByRegex", - "options": { - "regex": "max 1", - "renamePattern": "Actual Max" - } - }, - { - "id": "renameByRegex", - "options": { - "regex": "mean 2", - "renamePattern": "Expected Mean" - } - }, - { - "id": "renameByRegex", - "options": { - "regex": "min 2", - "renamePattern": "Expected Min" - } - }, - { - "id": "renameByRegex", - "options": { - "regex": "max 2", - "renamePattern": "Expected Max" - } - }, - { - "id": "renameByRegex", - "options": { - "regex": "tvd", - "renamePattern": "TVD" - } - }, - { - "id": "renameByRegex", - "options": { - "regex": "hellinger", - "renamePattern": "Hellinger" - } - }, - { - "id": "renameByRegex", - "options": { - "regex": "kld", - "renamePattern": "KLD" - } - }, { "id": "organize", "options": { - "excludeByName": {}, + "excludeByName": { + "count": true, + "idx": true, + "model": true + }, "indexByName": { - "Actual Max": 6, - "Actual Mean": 2, - "Actual Min": 4, - "Expected Max": 5, - "Expected Mean": 1, - "Expected Min": 3, - "Field": 0, - "Hellinger": 8, - "KLD": 9, - "TVD": 7 + "actual_max": 3, + "actual_mean": 2, + "actual_min": 1, + "expected_max": 4, + "expected_mean": 5, + "expected_min": 6, + "feature_name": 0 }, - "renameByName": {} + "renameByName": { + "actual_max": "Actual Max", + "actual_mean": "Actual Mean", + "actual_min": "Actual Min", + "expected_max": "Expected Min", + "expected_mean": "Expected Mean", + "expected_min": "Expected Max", + "feature_name": "Feature" + } } } ], @@ -968,7 +683,7 @@ "alertThreshold": true }, "percentage": false, - "pluginVersion": "9.2.2", + "pluginVersion": "9.2.15", "pointradius": 2, "points": false, "renderer": "flot", diff --git a/server/api/api/endpoints/grafana_proxy.py b/server/api/api/endpoints/grafana_proxy.py index 0f72fd9114f..1ceee7b0330 100644 --- a/server/api/api/endpoints/grafana_proxy.py +++ b/server/api/api/endpoints/grafana_proxy.py @@ -13,7 +13,6 @@ # limitations under the License. # import asyncio -import warnings from http import HTTPStatus from typing import List, Union @@ -124,13 +123,6 @@ async def grafana_proxy_model_endpoints_query( model-endpoint monitoring functions. """ - warnings.warn( - "This api is deprecated in 1.3.1 and will be removed in 1.5.0. " - "Please update grafana model monitoring dashboards that use a different data source", - # TODO: remove in 1.5.0 - FutureWarning, - ) - body = await request.json() query_parameters = server.api.crud.model_monitoring.grafana.parse_query_parameters( body From 6cbc2dd174c0d286873b13f17c193eecf1d66803 Mon Sep 17 00:00:00 2001 From: tomer-mamia <125267619+tomerm-iguazio@users.noreply.github.com> Date: Tue, 6 Feb 2024 21:35:11 +0200 Subject: [PATCH 006/119] [Artifacts] Fix log dataset artifact with df [1.6.x] (#5074) --- mlrun/artifacts/manager.py | 3 +++ tests/artifacts/test_artifacts.py | 21 +++++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/mlrun/artifacts/manager.py b/mlrun/artifacts/manager.py index 3e762b4ba44..7b6f4dca0f5 100644 --- a/mlrun/artifacts/manager.py +++ b/mlrun/artifacts/manager.py @@ -132,6 +132,9 @@ def ensure_artifact_source_file_exists(item, path, body): # ModelArtifact is a directory. if isinstance(item, ModelArtifact): return + # in DatasetArtifact + if hasattr(item, "df") and item.df is not None: + return parsed_url = urlparse(path) schema = parsed_url.scheme # we are not checking remote paths yet. diff --git a/tests/artifacts/test_artifacts.py b/tests/artifacts/test_artifacts.py index 4b9203121b2..39e996ec9dd 100644 --- a/tests/artifacts/test_artifacts.py +++ b/tests/artifacts/test_artifacts.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import os.path import pathlib import tempfile import typing @@ -378,6 +379,26 @@ def test_ensure_artifact_source_file_exists(local_path, fail): context.log_artifact(item=artifact, local_path=local_path) +@pytest.mark.parametrize( + "df, fail", + [ + (pd.DataFrame({"num": [0, 1, 2], "color": ["green", "blue", "red"]}), False), + (None, True), + ], +) +def test_ensure_artifact_source_file_exists_by_df(df, fail): + context = mlrun.get_or_create_ctx("test") + + with tempfile.TemporaryDirectory() as temp_dir: + full_path = os.path.join(temp_dir, "df.parquet") + if fail: + with pytest.raises(mlrun.errors.MLRunInvalidArgumentError) as error: + context.log_dataset(key=str(uuid.uuid4()), df=df, local_path=full_path) + assert "Failed to log an artifact, file does not exists" in str(error.value) + else: + context.log_dataset(key=str(uuid.uuid4()), df=df, local_path=full_path) + + @pytest.mark.parametrize( "artifact,artifact_path,expected_hash,expected_target_path,expected_error", [ From d8086193c40a961bc4cf680fd3bd9d71b14ab3c5 Mon Sep 17 00:00:00 2001 From: alxtkr77 <3098237+alxtkr77@users.noreply.github.com> Date: Tue, 6 Feb 2024 21:42:55 +0200 Subject: [PATCH 007/119] [Datastore] Fix previewing artifacts associated with datastore profile [1.6.x] (#5078) --- mlrun/datastore/datastore.py | 10 +++++++--- mlrun/datastore/datastore_profile.py | 4 ++-- server/api/api/endpoints/files.py | 13 +++++++++++-- server/api/crud/datastore_profiles.py | 11 +++++++++++ server/api/rundb/sqldb.py | 7 ++++++- tests/api/api/test_files.py | 10 ++++++---- 6 files changed, 43 insertions(+), 12 deletions(-) diff --git a/mlrun/datastore/datastore.py b/mlrun/datastore/datastore.py index 2854656e752..3817a7c8500 100644 --- a/mlrun/datastore/datastore.py +++ b/mlrun/datastore/datastore.py @@ -182,16 +182,20 @@ def object( url, project, allow_empty_resources, secrets ) - store, subpath = self.get_or_create_store(url, secrets=secrets) + store, subpath = self.get_or_create_store( + url, secrets=secrets, project_name=project + ) return DataItem(key, store, subpath, url, meta=meta, artifact_url=artifact_url) - def get_or_create_store(self, url, secrets: dict = None) -> (DataStore, str): + def get_or_create_store( + self, url, secrets: dict = None, project_name="" + ) -> (DataStore, str): schema, endpoint, parsed_url = parse_url(url) subpath = parsed_url.path store_key = f"{schema}://{endpoint}" if schema == "ds": - datastore_profile = datastore_profile_read(url) + datastore_profile = datastore_profile_read(url, project_name) if secrets and datastore_profile.secrets(): secrets = merge(secrets, datastore_profile.secrets()) else: diff --git a/mlrun/datastore/datastore_profile.py b/mlrun/datastore/datastore_profile.py index bd88f53ed31..e7766d7171d 100644 --- a/mlrun/datastore/datastore_profile.py +++ b/mlrun/datastore/datastore_profile.py @@ -367,7 +367,7 @@ def safe_literal_eval(value): ) -def datastore_profile_read(url): +def datastore_profile_read(url, project_name=""): parsed_url = urlparse(url) if parsed_url.scheme.lower() != "ds": raise mlrun.errors.MLRunInvalidArgumentError( @@ -375,7 +375,7 @@ def datastore_profile_read(url): ) profile_name = parsed_url.hostname - project_name = parsed_url.username or mlrun.mlconf.default_project + project_name = project_name or mlrun.mlconf.default_project datastore = TemporaryClientDatastoreProfiles().get(profile_name) if datastore: return datastore diff --git a/server/api/api/endpoints/files.py b/server/api/api/endpoints/files.py index c13c3b58ad9..e3cc5d463fd 100644 --- a/server/api/api/endpoints/files.py +++ b/server/api/api/endpoints/files.py @@ -75,7 +75,15 @@ async def get_files_with_project_secrets( secrets = await _verify_and_get_project_secrets(project, auth_info) return await run_in_threadpool( - _get_files, schema, objpath, user, size, offset, auth_info, secrets=secrets + _get_files, + schema, + objpath, + user, + size, + offset, + auth_info, + secrets=secrets, + project=project, ) @@ -136,6 +144,7 @@ def _get_files( offset: int, auth_info: mlrun.common.schemas.AuthInfo, secrets: dict = None, + project: str = "", ): _, filename = objpath.split(objpath) @@ -154,7 +163,7 @@ def _get_files( body = None try: - obj = store_manager.object(url=objpath, secrets=secrets) + obj = store_manager.object(url=objpath, secrets=secrets, project=project) if objpath.endswith("/"): listdir = obj.listdir() return { diff --git a/server/api/crud/datastore_profiles.py b/server/api/crud/datastore_profiles.py index 8c5cb18ceaa..0fc9030ba12 100644 --- a/server/api/crud/datastore_profiles.py +++ b/server/api/crud/datastore_profiles.py @@ -109,3 +109,14 @@ def delete_datastore_profile( ) # Delete private part of the secret self._delete_secret(project, profile_name) + + def get_datastore_profile( + self, + session: sqlalchemy.orm.Session, + profile_name: str = None, + project: str = None, + ): + project = project or mlrun.mlconf.default_project + return server.api.utils.singletons.db.get_db().get_datastore_profile( + session, profile_name, project + ) diff --git a/server/api/rundb/sqldb.py b/server/api/rundb/sqldb.py index f288f796a7e..463099dbcef 100644 --- a/server/api/rundb/sqldb.py +++ b/server/api/rundb/sqldb.py @@ -919,7 +919,12 @@ def watch_log(self, uid, project="", watch=True, offset=0): def get_datastore_profile( self, name: str, project: str ) -> Optional[mlrun.common.schemas.DatastoreProfile]: - raise NotImplementedError() + return self._transform_db_error( + server.api.db.session.run_function_with_new_db_session, + server.api.crud.DatastoreProfiles().get_datastore_profile, + name, + project, + ) def delete_datastore_profile(self, name: str, project: str): raise NotImplementedError() diff --git a/tests/api/api/test_files.py b/tests/api/api/test_files.py index b176910a22c..af0dbbf3679 100644 --- a/tests/api/api/test_files.py +++ b/tests/api/api/test_files.py @@ -78,20 +78,22 @@ def test_files(db: Session, client: TestClient, files_mock, k8s_secrets_mock) -> resp = client.get(f"files?path={path}") assert resp - files_mock.assert_called_once_with(url=path, secrets=env_secrets) + files_mock.assert_called_once_with(url=path, secrets=env_secrets, project="") files_mock.reset_mock() resp = client.get(f"projects/{project}/files?path={path}") assert resp - files_mock.assert_called_once_with(url=path, secrets=full_secrets) + files_mock.assert_called_once_with(url=path, secrets=full_secrets, project="proj1") files_mock.reset_mock() resp = client.get(f"projects/wrong-project/files?path={path}") assert resp - files_mock.assert_called_once_with(url=path, secrets=env_secrets) + files_mock.assert_called_once_with( + url=path, secrets=env_secrets, project="wrong-project" + ) files_mock.reset_mock() resp = client.get(f"projects/{project}/files?path={path}&use-secrets=false") assert resp - files_mock.assert_called_once_with(url=path, secrets=env_secrets) + files_mock.assert_called_once_with(url=path, secrets=env_secrets, project="proj1") files_mock.reset_mock() From e4eb1a8a69fcdacbc8581c819074b83705793a3d Mon Sep 17 00:00:00 2001 From: Alon Maor <48641682+alonmr@users.noreply.github.com> Date: Tue, 6 Feb 2024 22:29:27 +0200 Subject: [PATCH 008/119] [API] Do not enrich workflow schedule when given spec [1.6.x] (#5075) --- server/api/api/endpoints/workflows.py | 15 +++++++++------ tests/api/api/test_workflows.py | 27 +++++++++++++++++++++++++-- 2 files changed, 34 insertions(+), 8 deletions(-) diff --git a/server/api/api/endpoints/workflows.py b/server/api/api/endpoints/workflows.py index fff09d195b3..d3e57bccf62 100644 --- a/server/api/api/endpoints/workflows.py +++ b/server/api/api/endpoints/workflows.py @@ -285,16 +285,19 @@ def _fill_workflow_missing_fields_from_project( Fill the workflow spec details from the project object, with favour to spec :param project: MLRun project that contains the workflow. - :param workflow_name: workflow name - :param spec: workflow spec input - :param arguments: arguments to workflow + :param workflow_name: Workflow name + :param spec: Workflow spec input + :param arguments: Arguments to workflow - :return: completed workflow spec + :return: Completed workflow spec """ - # while we expect workflow to be exists on project spec, we might get a case where the workflow is not exists. - # this is possible when workflow is not set prior to its execution. + # While we expect workflow to exist on project spec, we might get a case where the workflow does not exist. + # This is possible when workflow is not set prior to its execution. workflow = _get_workflow_by_name(project, workflow_name) + if spec and spec.schedule is None: + # Do not enrich with schedule from project's workflow when spec was provided + workflow.pop("schedule", None) if spec: # Merge between the workflow spec provided in the request with existing diff --git a/tests/api/api/test_workflows.py b/tests/api/api/test_workflows.py index b55d16bde2a..f0ccc90979e 100644 --- a/tests/api/api/test_workflows.py +++ b/tests/api/api/test_workflows.py @@ -13,6 +13,7 @@ # limitations under the License. # import random +import unittest.mock from http import HTTPStatus from fastapi.testclient import TestClient @@ -97,14 +98,36 @@ def test_get_workflow_bad_project(db: Session, client: TestClient): ) -def _create_proj_with_workflow(client: TestClient): +def test_schedule_not_enriched(db: Session, client: TestClient, k8s_secrets_mock): + _create_proj_with_workflow(client, schedule="* * * * 1") + + # Spec with bad schedule: + workflow_body = {"spec": {"name": WORKFLOW_NAME}} + + class UIDMock: + def uid(self): + return "some uid" + + with unittest.mock.patch.object( + server.api.crud.WorkflowRunners, "run", return_value=UIDMock() + ): + resp = client.post( + f"projects/{PROJECT_NAME}/workflows/{WORKFLOW_NAME}/submit", + json=workflow_body, + ) + assert resp.status_code == HTTPStatus.ACCEPTED + response_data = resp.json() + assert response_data["schedule"] is None + + +def _create_proj_with_workflow(client: TestClient, **extra_workflow_spec): project = mlrun.common.schemas.Project( metadata=mlrun.common.schemas.ProjectMetadata(name=PROJECT_NAME), spec=mlrun.common.schemas.ProjectSpec( description="banana", source="git://github.com/mlrun/project-demo", goals="some goals", - workflows=[{"name": WORKFLOW_NAME}], + workflows=[{"name": WORKFLOW_NAME, **extra_workflow_spec}], ), ) client.post("projects", json=project.dict()) From edbd1274acb46df197ff944297a18e1170c9c3fd Mon Sep 17 00:00:00 2001 From: Eyal Danieli Date: Tue, 6 Feb 2024 22:31:01 +0200 Subject: [PATCH 009/119] [Model Monitoring] Fix monitoring jobs API query permissions [1.6.x] (#5076) --- server/api/api/endpoints/jobs.py | 39 ++++++++++++++++++++++++++++++- server/api/utils/clients/chief.py | 13 +++++++++++ 2 files changed, 51 insertions(+), 1 deletion(-) diff --git a/server/api/api/endpoints/jobs.py b/server/api/api/endpoints/jobs.py index e9dde6beb31..6d44cad31b5 100644 --- a/server/api/api/endpoints/jobs.py +++ b/server/api/api/endpoints/jobs.py @@ -18,6 +18,7 @@ from sqlalchemy.orm import Session import mlrun.common.schemas +import server.api.utils.auth.verifier import server.api.utils.clients.chief from mlrun.model_monitoring import TrackingPolicy from mlrun.utils import logger @@ -29,8 +30,9 @@ @router.post("/batch-monitoring") -def deploy_monitoring_batch_job( +async def deploy_monitoring_batch_job( project: str, + request: fastapi.Request, auth_info: mlrun.common.schemas.AuthInfo = fastapi.Depends( deps.authenticate_request ), @@ -43,6 +45,7 @@ def deploy_monitoring_batch_job( To submit a scheduled job as well, please set with_schedule = True. :param project: Project name. + :param request: fastapi request for the HTTP connection. :param auth_info: The auth info of the request. :param db_session: a session that manages the current dialog with the database. :param default_batch_image: The default image of the model monitoring batch job. By default, the image @@ -52,6 +55,32 @@ def deploy_monitoring_batch_job( :return: model monitoring batch job as a dictionary. """ + await server.api.utils.auth.verifier.AuthVerifier().query_project_resource_permissions( + resource_type=mlrun.common.schemas.AuthorizationResourceTypes.function, + project_name=project, + resource_name=mlrun.common.schemas.model_monitoring.MonitoringFunctionNames.BATCH, + action=mlrun.common.schemas.AuthorizationAction.store, + auth_info=auth_info, + ) + + if with_schedule and ( + mlrun.mlconf.httpdb.clusterization.role + != mlrun.common.schemas.ClusterizationRole.chief + ): + logger.info( + "Requesting to deploy model monitoring batch job, re-routing to chief", + function_name="model-monitoring-batch", + project=project, + ) + chief_client = server.api.utils.clients.chief.Client() + params = { + "default_batch_image": default_batch_image, + "with_schedule": with_schedule, + } + return await chief_client.deploy_monitoring_batch_job( + project=project, request=request, json=params + ) + model_monitoring_access_key = None if not mlrun.mlconf.is_ce_mode(): # Generate V3IO Access Key @@ -107,6 +136,14 @@ async def create_model_monitoring_controller( is running. By default, the base period is 5 minutes. """ + await server.api.utils.auth.verifier.AuthVerifier().query_project_resource_permissions( + resource_type=mlrun.common.schemas.AuthorizationResourceTypes.function, + project_name=project, + resource_name=mlrun.common.schemas.model_monitoring.MonitoringFunctionNames.APPLICATION_CONTROLLER, + action=mlrun.common.schemas.AuthorizationAction.store, + auth_info=auth_info, + ) + if ( mlrun.mlconf.httpdb.clusterization.role != mlrun.common.schemas.ClusterizationRole.chief diff --git a/server/api/utils/clients/chief.py b/server/api/utils/clients/chief.py index a039249cc88..526d005e0b0 100644 --- a/server/api/utils/clients/chief.py +++ b/server/api/utils/clients/chief.py @@ -225,6 +225,19 @@ async def create_model_monitoring_controller( json, ) + async def deploy_monitoring_batch_job( + self, project: str, request: fastapi.Request, json: dict + ): + """ + Model monitoring batch includes a scheduled job which is handled by the chief + """ + return await self._proxy_request_to_chief( + "POST", + f"projects/{project}/jobs/batch-monitoring", + request, + json, + ) + async def _proxy_request_to_chief( self, method, From e9ebb41e73fa82a083dcfb260aa9f9713ab634be Mon Sep 17 00:00:00 2001 From: Gal Topper Date: Wed, 7 Feb 2024 14:20:11 +0800 Subject: [PATCH 010/119] [Model Monitoring] Enable explicit ack for V3IO streams [1.6.x] (#5079) --- server/api/crud/model_monitoring/deployment.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/server/api/crud/model_monitoring/deployment.py b/server/api/crud/model_monitoring/deployment.py index 558327ecf7a..2d113014ee6 100644 --- a/server/api/crud/model_monitoring/deployment.py +++ b/server/api/crud/model_monitoring/deployment.py @@ -669,15 +669,18 @@ def _apply_stream_trigger( function_name=function_name, ) if stream_path.startswith("v3io://"): + kwargs = {} + if function_name != mm_constants.MonitoringFunctionNames.STREAM: + kwargs["access_key"] = model_monitoring_access_key + if mlrun.mlconf.is_explicit_ack(): + kwargs["explicit_ack_mode"] = "explicitOnly" + kwargs["workerAllocationMode"] = "static" + # Generate V3IO stream trigger function.add_v3io_stream_trigger( stream_path=stream_path, - name="monitoring_stream_trigger" - if function_name is None - else f"monitoring_{function_name}_trigger", - access_key=model_monitoring_access_key - if function_name != mm_constants.MonitoringFunctionNames.STREAM - else None, + name=f"monitoring_{function_name or 'stream'}_trigger", + **kwargs, ) # Add the default HTTP source http_source = mlrun.datastore.sources.HttpSource() From 46b2ccb3f8f11d5cd37d232a733779ccfeba4c5d Mon Sep 17 00:00:00 2001 From: Gal Topper Date: Wed, 7 Feb 2024 17:15:11 +0800 Subject: [PATCH 011/119] [Datastore] Fix `ParquetTarget.write_dataframe()` and improve error [1.6.x] (#5081) --- mlrun/datastore/targets.py | 4 +++- mlrun/utils/helpers.py | 17 ++++++++++++++++- tests/datastore/test_targets.py | 18 +++++++++++++++++- 3 files changed, 36 insertions(+), 3 deletions(-) diff --git a/mlrun/datastore/targets.py b/mlrun/datastore/targets.py index 051168e9fcf..9111040918d 100644 --- a/mlrun/datastore/targets.py +++ b/mlrun/datastore/targets.py @@ -456,7 +456,7 @@ def _get_store_and_path(self): self.get_target_path(), credentials_prefix_secrets, ) - if self.get_target_path().startswith("ds://"): + if self.get_target_path() and self.get_target_path().startswith("ds://"): return store, store.url + resolved_store_path else: return store, self.get_target_path() @@ -1984,6 +1984,8 @@ def _get_target_path(driver, resource, run_id_mode=False): def generate_path_with_chunk(target, chunk_id, path): + if path is None: + return "" prefix, suffix = os.path.splitext(path) if chunk_id and not target.partitioned and not target.time_partitioning_granularity: return f"{prefix}/{chunk_id:0>4}{suffix}" diff --git a/mlrun/utils/helpers.py b/mlrun/utils/helpers.py index 747c9b2a343..e2ac6bced74 100644 --- a/mlrun/utils/helpers.py +++ b/mlrun/utils/helpers.py @@ -1571,10 +1571,25 @@ def iterate_list_by_chunks( def to_parquet(df, *args, **kwargs): + import pyarrow.lib + # version set for pyspark compatibility, and is needed as of pyarrow 13 due to timestamp incompatibility if "version" not in kwargs: kwargs["version"] = "2.4" - df.to_parquet(*args, **kwargs) + try: + df.to_parquet(*args, **kwargs) + except pyarrow.lib.ArrowInvalid as ex: + if re.match( + "Fragment would be written into [0-9]+. partitions. This exceeds the maximum of [0-9]+", + str(ex), + ): + raise mlrun.errors.MLRunRuntimeError( + """Maximum number of partitions exceeded. To resolve this, change +partition granularity by setting time_partitioning_granularity or partition_cols, or disable partitioning altogether by +setting partitioned=False""" + ) from ex + else: + raise ex def is_ecr_url(registry: str) -> bool: diff --git a/tests/datastore/test_targets.py b/tests/datastore/test_targets.py index 8e543fe0a8d..301c90e7e38 100644 --- a/tests/datastore/test_targets.py +++ b/tests/datastore/test_targets.py @@ -13,11 +13,12 @@ # limitations under the License. import os +import pandas as pd import pytest import mlrun.errors from mlrun.datastore import StreamTarget -from mlrun.datastore.targets import BaseStoreTarget, KafkaTarget +from mlrun.datastore.targets import BaseStoreTarget, KafkaTarget, ParquetTarget from mlrun.feature_store import FeatureSet @@ -95,3 +96,18 @@ def test_kafka_target_without_path(): kafka_target.set_resource(fset) with pytest.raises(mlrun.errors.MLRunInvalidArgumentError): kafka_target.add_writer_step(mock_graph, None, None, key_columns={}) + + +# ML-5622, ML-5677 +def test_write_with_too_many_partitions(): + data = { + "my_int": range(2000), + } + df = pd.DataFrame(data) + + parquet_target = ParquetTarget(partition_cols=["my_int"]) + with pytest.raises( + mlrun.errors.MLRunRuntimeError, + match="Maximum number of partitions exceeded. To resolve this.*", + ): + parquet_target.write_dataframe(df) From abdb916d89f7e37577c4de80580ba0d8cc218359 Mon Sep 17 00:00:00 2001 From: davesh0812 <85231462+davesh0812@users.noreply.github.com> Date: Wed, 7 Feb 2024 11:17:44 +0200 Subject: [PATCH 012/119] [Model Monitoring] Saving feature_stats as empty dict and not as null in the model_endpoint [1.6.x] (#5080) --- server/api/crud/model_monitoring/model_endpoints.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/api/crud/model_monitoring/model_endpoints.py b/server/api/crud/model_monitoring/model_endpoints.py index 4e39d1d696c..fcd05a7493a 100644 --- a/server/api/crud/model_monitoring/model_endpoints.py +++ b/server/api/crud/model_monitoring/model_endpoints.py @@ -89,7 +89,7 @@ def create_model_endpoint( model_obj.spec.feature_stats ) ) - model_endpoint.status.feature_stats = model_obj.spec.feature_stats + model_endpoint.status.feature_stats = model_obj.spec.feature_stats # Get labels from model object if not found in model endpoint object if not model_endpoint.spec.label_names and model_obj.spec.outputs: model_label_names = [ From 8270e8ecafac16444b22b6f6c483b299e79f02c8 Mon Sep 17 00:00:00 2001 From: Jonathan Daniel <36337649+jond01@users.noreply.github.com> Date: Wed, 7 Feb 2024 11:21:07 +0200 Subject: [PATCH 013/119] [Tests] Use a unique name for the monitoring app sys test [1.6.x] (#5082) --- tests/system/model_monitoring/test_app.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/system/model_monitoring/test_app.py b/tests/system/model_monitoring/test_app.py index 0ba12569d98..eb70eb2bb75 100644 --- a/tests/system/model_monitoring/test_app.py +++ b/tests/system/model_monitoring/test_app.py @@ -19,7 +19,7 @@ import uuid from concurrent.futures import ThreadPoolExecutor from dataclasses import dataclass, field -from datetime import timedelta +from datetime import datetime, timedelta from pathlib import Path import numpy as np @@ -126,7 +126,10 @@ def _test_v3io_records(cls, ep_id: str) -> None: @TestMLRunSystem.skip_test_if_env_not_configured @pytest.mark.enterprise class TestMonitoringAppFlow(TestMLRunSystem, _V3IORecordsChecker): - project_name = "test-monitoring-app-flow" + project_name = "test-app-flow" + project_name += datetime.now().strftime( # remove when ML-5588 is fixed + "%y%m%d%H%M" + ) # Set image to "/mlrun:" for local testing image: typing.Optional[str] = None From 4b4c359fe56d71f36c3ab3c6de08439cfcee0971 Mon Sep 17 00:00:00 2001 From: alxtkr77 <3098237+alxtkr77@users.noreply.github.com> Date: Wed, 7 Feb 2024 15:19:35 +0200 Subject: [PATCH 014/119] [FeatureStore] Fix misleading deprecation messages [1.6.x] (#5083) --- mlrun/feature_store/api.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/mlrun/feature_store/api.py b/mlrun/feature_store/api.py index af390a83760..88c268db94b 100644 --- a/mlrun/feature_store/api.py +++ b/mlrun/feature_store/api.py @@ -93,7 +93,8 @@ def _features_to_vector_and_check_permissions(features, update_stats): @deprecated( version="1.6.0", - reason="'get_offline_features' will be removed in 1.8.0, use 'FeatureVector.get_offline_features()' instead", + reason="get_offline_features() will be removed in 1.8.0, please instead use " + "get_feature_vector('store://feature_vector_name').get_offline_features()", category=FutureWarning, ) def get_offline_features( @@ -269,8 +270,8 @@ def _get_offline_features( @deprecated( version="1.6.0", - reason="'get_online_feature_service' will be removed in 1.8.0, " - "use 'FeatureVector.get_online_feature_service()' instead", + reason="get_online_feature_service() will be removed in 1.8.0, please instead use " + "get_feature_vector('store://feature_vector_name').get_online_feature_service()", category=FutureWarning, ) def get_online_feature_service( From ba9cef2a05a944d167cb615ff120644ed5cb8a3b Mon Sep 17 00:00:00 2001 From: Liran BG Date: Thu, 8 Feb 2024 10:37:47 +0200 Subject: [PATCH 015/119] [Project] Fix DELETE Project not returning 412 when Nuclio functions exists [1.6.x] (#5087) --- server/api/crud/projects.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/server/api/crud/projects.py b/server/api/crud/projects.py index bf3854b0e29..38464b73261 100644 --- a/server/api/crud/projects.py +++ b/server/api/crud/projects.py @@ -26,6 +26,7 @@ import mlrun.utils.singleton import server.api.crud import server.api.db.session +import server.api.utils.clients.nuclio import server.api.utils.events.events_factory as events_factory import server.api.utils.projects.remotes.follower as project_follower import server.api.utils.singletons.db @@ -125,9 +126,11 @@ def verify_project_is_empty(self, session: sqlalchemy.orm.Session, name: str): server.api.utils.singletons.db.get_db().verify_project_has_no_related_resources( session, name ) - self._verify_project_has_no_external_resources(name) + self._verify_project_has_no_external_resources(session, name) - def _verify_project_has_no_external_resources(self, project: str): + def _verify_project_has_no_external_resources( + self, session: sqlalchemy.orm.Session, project: str + ): # Resources which are not tracked in the MLRun DB need to be verified here. Currently these are project # secrets and model endpoints. server.api.crud.ModelEndpoints().verify_project_has_no_model_endpoints(project) @@ -144,6 +147,15 @@ def _verify_project_has_no_external_resources(self, project: str): f"Project {project} can not be deleted since related resources found: project secrets" ) + # verify project can be deleted in nuclio + if mlrun.config.config.nuclio_dashboard_url: + nuclio_client = server.api.utils.clients.nuclio.Client() + nuclio_client.delete_project( + session, + project, + deletion_strategy=mlrun.common.schemas.DeletionStrategy.check, + ) + def delete_project_resources( self, session: sqlalchemy.orm.Session, From aa2061ab994bfc71e2fbc7372980fbde89a65bf3 Mon Sep 17 00:00:00 2001 From: Liran BG Date: Thu, 8 Feb 2024 16:42:39 +0200 Subject: [PATCH 016/119] [SystemTest] Ensure parquet file when creating dataframe [1.6.x] (#5098) --- tests/system/projects/test_project.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/system/projects/test_project.py b/tests/system/projects/test_project.py index f755255f58d..6574d874d46 100644 --- a/tests/system/projects/test_project.py +++ b/tests/system/projects/test_project.py @@ -1147,13 +1147,14 @@ def test_export_import_dataset_artifact(self): data = {"col1": [1, 2], "col2": [3, 4]} data_frame = pd.DataFrame(data=data) key = "my-df" + data_frame.to_parquet(local_path) dataset_artifact = mlrun.artifacts.dataset.DatasetArtifact( key, df=data_frame, format="parquet", target_path=local_path ) project_1.log_artifact(dataset_artifact) # export the artifact to a zip file - dataset_artifact = project_1.get_artifact("my-df") + dataset_artifact = project_1.get_artifact(key) export_path = f"{str(self.assets_path)}/exported_dataset.zip" dataset_artifact.export(export_path) From 791c22992c199eadf0d8e6ba36414593eff9b04c Mon Sep 17 00:00:00 2001 From: alxtkr77 <3098237+alxtkr77@users.noreply.github.com> Date: Thu, 8 Feb 2024 17:54:33 +0200 Subject: [PATCH 017/119] [Datastore] Fix GCS storage options handling [1.6.x] (#5099) --- mlrun/datastore/google_cloud_storage.py | 2 +- tests/datastore/test_google_cloud_storage.py | 33 ++++++++++++++++++++ 2 files changed, 34 insertions(+), 1 deletion(-) create mode 100644 tests/datastore/test_google_cloud_storage.py diff --git a/mlrun/datastore/google_cloud_storage.py b/mlrun/datastore/google_cloud_storage.py index 6c1900a6a21..04f7bc2a6ba 100644 --- a/mlrun/datastore/google_cloud_storage.py +++ b/mlrun/datastore/google_cloud_storage.py @@ -60,7 +60,7 @@ def get_storage_options(self): except json.JSONDecodeError: # If it's not json, handle it as a filename token = credentials - return self._sanitize_storage_options(dict(token=token)) + return self._sanitize_storage_options(dict(token=token)) else: logger.info( "No GCS credentials available - auth will rely on auto-discovery of credentials" diff --git a/tests/datastore/test_google_cloud_storage.py b/tests/datastore/test_google_cloud_storage.py new file mode 100644 index 00000000000..415c60956cf --- /dev/null +++ b/tests/datastore/test_google_cloud_storage.py @@ -0,0 +1,33 @@ +# Copyright 2024 Iguazio +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from unittest.mock import MagicMock + +from mlrun.datastore.google_cloud_storage import GoogleCloudStorageStore + + +def test_get_storage_options(): + st = GoogleCloudStorageStore(parent="parent", schema="schema", name="name") + + st._get_secret_or_env = MagicMock(return_value=None) + assert st.get_storage_options() == {} + + st._get_secret_or_env = MagicMock( + return_value='{"key1": "value1", "key2": "value2"}' + ) + assert st.get_storage_options() == {"token": {"key1": "value1", "key2": "value2"}} + + st._get_secret_or_env = MagicMock(return_value="/path/to/gcs_credentials_file") + assert st.get_storage_options() == {"token": "/path/to/gcs_credentials_file"} From 1c9fa9252d96734a95a32f6c3f75294523528816 Mon Sep 17 00:00:00 2001 From: Liran BG Date: Thu, 8 Feb 2024 19:36:24 +0200 Subject: [PATCH 018/119] [Artifacts] Add Index artifact key and query tags with new session [1.6.x] (#5100) --- server/api/db/sqldb/db.py | 77 +++++++++++++------ server/api/db/sqldb/models/models_mysql.py | 2 +- server/api/db/sqldb/models/models_sqlite.py | 2 +- .../c0e342d73bd0_indexing_artifact_v2_key.py | 40 ++++++++++ .../0b224a1b4e0d_indexing_artifact_v2_key.py | 40 ++++++++++ tests/api/db/conftest.py | 6 +- 6 files changed, 139 insertions(+), 28 deletions(-) create mode 100644 server/api/migrations_mysql/versions/c0e342d73bd0_indexing_artifact_v2_key.py create mode 100644 server/api/migrations_sqlite/versions/0b224a1b4e0d_indexing_artifact_v2_key.py diff --git a/server/api/db/sqldb/db.py b/server/api/db/sqldb/db.py index 4dea5e120c8..37de1b00297 100644 --- a/server/api/db/sqldb/db.py +++ b/server/api/db/sqldb/db.py @@ -532,6 +532,7 @@ def store_artifact( project=project, key=key, iteration=iter, + uid=uid, ) db_artifact = existing_artifact self._update_artifact_record_from_dict( @@ -899,15 +900,30 @@ def tag_artifacts( artifacts, project: str, ): + artifacts_keys = [artifact.key for artifact in artifacts] + logger.debug( + "Locking artifacts in db before tagging artifacts", + project=project, + tag=tag_name, + artifacts_keys=artifacts_keys, + ) + # to avoid multiple runs trying to tag the same artifacts simultaneously, - # lock all the artifacts' rows for the entire transaction + # lock the artifacts with the same keys for the entire transaction (using with_for_update). self._query( session, ArtifactV2, project=project, - ).filter(ArtifactV2.key.in_([artifact.key for artifact in artifacts])).order_by( - ArtifactV2.id.asc() - ).populate_existing().with_for_update().all() + ).filter( + ArtifactV2.key.in_(artifacts_keys), + ).order_by(ArtifactV2.id.asc()).populate_existing().with_for_update().all() + + logger.debug( + "Acquired artifacts db lock", + project=project, + tag=tag_name, + artifacts_keys=artifacts_keys, + ) objects = [] for artifact in artifacts: @@ -930,31 +946,37 @@ def tag_artifacts( ) # delete the tags - for old_tag in query.all(): + for old_tag in query: objects.append(old_tag) session.delete(old_tag) - # search for an existing tag with the same name, and points to artifacts with the same key, producer id, - # and iteration. this means that the same producer created this artifact, - # and we can update the existing tag - query = ( - self._query( - session, - artifact.Tag, - name=tag_name, - project=project, - obj_name=artifact.key, - ) - .join( - ArtifactV2, - ) - .filter( - ArtifactV2.producer_id == artifact.producer_id, - ArtifactV2.iteration == artifact.iteration, + def _get_tag(_session): + # search for an existing tag with the same name, and points to artifacts with the same key, producer id, + # and iteration. this means that the same producer created this artifact, + # and we can update the existing tag + tag_query = ( + self._query( + _session, + artifact.Tag, + name=tag_name, + project=project, + obj_name=artifact.key, + ) + .join( + ArtifactV2, + ) + .filter( + ArtifactV2.producer_id == artifact.producer_id, + ArtifactV2.iteration == artifact.iteration, + ) ) - ) - tag = query.one_or_none() + return tag_query.one_or_none() + + # to make sure we can list tags that were created during this session in parallel by different processes, + # we need to use a new session. if there is an existing tag, we'll definitely get it, so we can update it + # instead of creating a new tag. + tag = server.api.db.session.run_function_with_new_db_session(_get_tag) if not tag: # create the new tag tag = artifact.Tag( @@ -971,6 +993,13 @@ def tag_artifacts( # this will also release the locks on the artifacts' rows self._commit(session, objects) + logger.debug( + "Released artifacts db lock after tagging artifacts", + project=project, + tag=tag_name, + artifacts_keys=artifacts_keys, + ) + def _mark_best_iteration_artifact( self, session, diff --git a/server/api/db/sqldb/models/models_mysql.py b/server/api/db/sqldb/models/models_mysql.py index 95206ae0f1a..12d98ebf4f7 100644 --- a/server/api/db/sqldb/models/models_mysql.py +++ b/server/api/db/sqldb/models/models_mysql.py @@ -213,7 +213,7 @@ class ArtifactV2(Base, mlrun.utils.db.BaseModel): Tag = make_artifact_tag(__tablename__) id = Column(Integer, primary_key=True) - key = Column(String(255, collation=SQLCollationUtil.collation())) + key = Column(String(255, collation=SQLCollationUtil.collation()), index=True) project = Column(String(255, collation=SQLCollationUtil.collation())) kind = Column(String(255, collation=SQLCollationUtil.collation()), index=True) producer_id = Column(String(255, collation=SQLCollationUtil.collation())) diff --git a/server/api/db/sqldb/models/models_sqlite.py b/server/api/db/sqldb/models/models_sqlite.py index e466d622a81..4b2a4d16b0e 100644 --- a/server/api/db/sqldb/models/models_sqlite.py +++ b/server/api/db/sqldb/models/models_sqlite.py @@ -205,7 +205,7 @@ class ArtifactV2(Base, mlrun.utils.db.BaseModel): Tag = make_artifact_tag(__tablename__) id = Column(Integer, primary_key=True) - key = Column(String(255, collation=SQLCollationUtil.collation())) + key = Column(String(255, collation=SQLCollationUtil.collation()), index=True) project = Column(String(255, collation=SQLCollationUtil.collation())) kind = Column(String(255, collation=SQLCollationUtil.collation()), index=True) producer_id = Column(String(255, collation=SQLCollationUtil.collation())) diff --git a/server/api/migrations_mysql/versions/c0e342d73bd0_indexing_artifact_v2_key.py b/server/api/migrations_mysql/versions/c0e342d73bd0_indexing_artifact_v2_key.py new file mode 100644 index 00000000000..ebfdbcafa94 --- /dev/null +++ b/server/api/migrations_mysql/versions/c0e342d73bd0_indexing_artifact_v2_key.py @@ -0,0 +1,40 @@ +# Copyright 2023 Iguazio +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +"""indexing artifact_v2 key + +Revision ID: c0e342d73bd0 +Revises: b268044fa2f7 +Create Date: 2024-02-07 14:46:55.639228 + +""" +from alembic import op + +# revision identifiers, used by Alembic. +revision = "c0e342d73bd0" +down_revision = "b268044fa2f7" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_index(op.f("ix_artifacts_v2_key"), "artifacts_v2", ["key"], unique=False) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index(op.f("ix_artifacts_v2_key"), table_name="artifacts_v2") + # ### end Alembic commands ### diff --git a/server/api/migrations_sqlite/versions/0b224a1b4e0d_indexing_artifact_v2_key.py b/server/api/migrations_sqlite/versions/0b224a1b4e0d_indexing_artifact_v2_key.py new file mode 100644 index 00000000000..a2763d5da98 --- /dev/null +++ b/server/api/migrations_sqlite/versions/0b224a1b4e0d_indexing_artifact_v2_key.py @@ -0,0 +1,40 @@ +# Copyright 2023 Iguazio +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +"""indexing artifact_v2 key + +Revision ID: 0b224a1b4e0d +Revises: fa3009d9787f +Create Date: 2024-02-07 14:47:10.021608 + +""" +from alembic import op + +# revision identifiers, used by Alembic. +revision = "0b224a1b4e0d" +down_revision = "fa3009d9787f" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_index(op.f("ix_artifacts_v2_key"), "artifacts_v2", ["key"], unique=False) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index(op.f("ix_artifacts_v2_key"), table_name="artifacts_v2") + # ### end Alembic commands ### diff --git a/tests/api/db/conftest.py b/tests/api/db/conftest.py index fa0dd2f6a9f..000303d0768 100644 --- a/tests/api/db/conftest.py +++ b/tests/api/db/conftest.py @@ -12,7 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from typing import Generator +from collections.abc import Generator +from tempfile import NamedTemporaryFile import pytest @@ -27,7 +28,8 @@ @pytest.fixture() def db() -> Generator: - dsn = "sqlite:///:memory:?check_same_thread=false" + db_file = NamedTemporaryFile(suffix="-mlrun.db") + dsn = f"sqlite:///{db_file.name}?check_same_thread=false" config.httpdb.dsn = dsn _init_engine() # memory sqldb removes itself when all sessions closed, this session will keep it up until the end of the test From f75defca974991a1ff04cd6dd1e908df4990818e Mon Sep 17 00:00:00 2001 From: Liran BG Date: Thu, 8 Feb 2024 21:13:45 +0200 Subject: [PATCH 019/119] [Project] Wait for project to be deleted in Nuclio when deleting project [1.6.x] (#5101) --- mlrun/config.py | 2 + server/api/api/endpoints/projects.py | 1 + server/api/api/utils.py | 1 + server/api/crud/projects.py | 69 +++++++++++++++++-- server/api/utils/clients/nuclio.py | 55 +++++++++++---- server/api/utils/projects/follower.py | 2 +- server/api/utils/projects/leader.py | 2 +- server/api/utils/projects/remotes/follower.py | 1 + .../utils/projects/remotes/nop_follower.py | 1 + .../api/utils/projects/test_leader_member.py | 6 +- 10 files changed, 117 insertions(+), 23 deletions(-) diff --git a/mlrun/config.py b/mlrun/config.py index 8cfa83d7a27..03a3ba0dca4 100644 --- a/mlrun/config.py +++ b/mlrun/config.py @@ -408,6 +408,8 @@ "iguazio_access_key": "", "iguazio_list_projects_default_page_size": 200, "iguazio_client_job_cache_ttl": "20 minutes", + "nuclio_project_deletion_verification_timeout": "60 seconds", + "nuclio_project_deletion_verification_interval": "5 seconds", }, # The API needs to know what is its k8s svc url so it could enrich it in the jobs it creates "api_url": "", diff --git a/server/api/api/endpoints/projects.py b/server/api/api/endpoints/projects.py index 6dd46c19281..54063fa3369 100644 --- a/server/api/api/endpoints/projects.py +++ b/server/api/api/endpoints/projects.py @@ -255,6 +255,7 @@ async def delete_project( db_session, name, deletion_strategy, + auth_info, ) elif is_running_in_background: diff --git a/server/api/api/utils.py b/server/api/api/utils.py index e35c2bfd412..8e1757b7c15 100644 --- a/server/api/api/utils.py +++ b/server/api/api/utils.py @@ -1172,6 +1172,7 @@ async def _delete_project( db_session, project_name, deletion_strategy, + auth_info, ) elif wait_for_project_deletion: diff --git a/server/api/crud/projects.py b/server/api/crud/projects.py index 38464b73261..49035682530 100644 --- a/server/api/crud/projects.py +++ b/server/api/crud/projects.py @@ -31,7 +31,7 @@ import server.api.utils.projects.remotes.follower as project_follower import server.api.utils.singletons.db import server.api.utils.singletons.scheduler -from mlrun.utils import logger +from mlrun.utils import logger, retry_until_successful from server.api.utils.singletons.k8s import get_k8s_helper @@ -99,6 +99,7 @@ def delete_project( session: sqlalchemy.orm.Session, name: str, deletion_strategy: mlrun.common.schemas.DeletionStrategy = mlrun.common.schemas.DeletionStrategy.default(), + auth_info: mlrun.common.schemas.AuthInfo = mlrun.common.schemas.AuthInfo(), ): logger.debug("Deleting project", name=name, deletion_strategy=deletion_strategy) if ( @@ -109,11 +110,11 @@ def delete_project( session, name ): return - self.verify_project_is_empty(session, name) + self.verify_project_is_empty(session, name, auth_info) if deletion_strategy == mlrun.common.schemas.DeletionStrategy.check: return elif deletion_strategy.is_cascading(): - self.delete_project_resources(session, name) + self.delete_project_resources(session, name, auth_info=auth_info) else: raise mlrun.errors.MLRunInvalidArgumentError( f"Unknown deletion strategy: {deletion_strategy}" @@ -122,14 +123,22 @@ def delete_project( session, name, deletion_strategy ) - def verify_project_is_empty(self, session: sqlalchemy.orm.Session, name: str): + def verify_project_is_empty( + self, + session: sqlalchemy.orm.Session, + name: str, + auth_info: mlrun.common.schemas.AuthInfo = mlrun.common.schemas.AuthInfo(), + ): server.api.utils.singletons.db.get_db().verify_project_has_no_related_resources( session, name ) - self._verify_project_has_no_external_resources(session, name) + self._verify_project_has_no_external_resources(session, name, auth_info) def _verify_project_has_no_external_resources( - self, session: sqlalchemy.orm.Session, project: str + self, + session: sqlalchemy.orm.Session, + project: str, + auth_info: mlrun.common.schemas.AuthInfo = mlrun.common.schemas.AuthInfo(), ): # Resources which are not tracked in the MLRun DB need to be verified here. Currently these are project # secrets and model endpoints. @@ -154,12 +163,14 @@ def _verify_project_has_no_external_resources( session, project, deletion_strategy=mlrun.common.schemas.DeletionStrategy.check, + auth_info=auth_info, ) def delete_project_resources( self, session: sqlalchemy.orm.Session, name: str, + auth_info: mlrun.common.schemas.AuthInfo = mlrun.common.schemas.AuthInfo(), ): # Delete schedules before runtime resources - otherwise they will keep getting created server.api.utils.singletons.scheduler.get_scheduler().delete_schedules( @@ -190,6 +201,9 @@ def delete_project_resources( session, name ) + # wait for nuclio to delete the project as well, so it won't create new resources after we delete them + self._wait_for_nuclio_project_deletion(name, session, auth_info) + # delete model monitoring resources server.api.crud.ModelEndpoints().delete_model_endpoints_resources(name) @@ -384,3 +398,46 @@ async def _calculate_pipelines_counters( if pipeline["status"] not in mlrun.run.RunStatuses.stable_statuses(): project_to_running_pipelines_count[pipeline["project"]] += 1 return project_to_running_pipelines_count + + @staticmethod + def _wait_for_nuclio_project_deletion( + project_name: str, + session: sqlalchemy.orm.Session, + auth_info: mlrun.common.schemas.AuthInfo = mlrun.common.schemas.AuthInfo(), + ): + if not mlrun.config.config.nuclio_dashboard_url: + return + + nuclio_client = server.api.utils.clients.nuclio.Client() + + def _check_nuclio_project_deletion(): + try: + nuclio_client.get_project(session, project_name, auth_info=auth_info) + except mlrun.errors.MLRunNotFoundError: + logger.debug( + "Nuclio project deleted", + project_name=project_name, + ) + else: + raise Exception( + f"Project not deleted in nuclio yet. Project: {project_name}" + ) + + timeout = int( + humanfriendly.parse_timespan( + mlrun.mlconf.httpdb.projects.nuclio_project_deletion_verification_timeout + ) + ) + interval = int( + humanfriendly.parse_timespan( + mlrun.mlconf.httpdb.projects.nuclio_project_deletion_verification_interval + ) + ) + + retry_until_successful( + interval, + timeout, + logger, + False, + _check_nuclio_project_deletion, + ) diff --git a/server/api/utils/clients/nuclio.py b/server/api/utils/clients/nuclio.py index 051c93fdc49..cf5460e0034 100644 --- a/server/api/utils/clients/nuclio.py +++ b/server/api/utils/clients/nuclio.py @@ -18,6 +18,7 @@ import typing import requests.adapters +import requests.auth import sqlalchemy.orm import mlrun.common.schemas @@ -94,6 +95,7 @@ def delete_project( session: sqlalchemy.orm.Session, name: str, deletion_strategy: mlrun.common.schemas.DeletionStrategy = mlrun.common.schemas.DeletionStrategy.default(), + auth_info: mlrun.common.schemas.AuthInfo = mlrun.common.schemas.AuthInfo(), ): logger.debug( "Deleting project in Nuclio", name=name, deletion_strategy=deletion_strategy @@ -107,7 +109,13 @@ def delete_project( "x-nuclio-delete-project-strategy": deletion_strategy.to_nuclio_deletion_strategy(), } try: - self._send_request_to_api("DELETE", "projects", json=body, headers=headers) + self._send_request_to_api( + "DELETE", + "projects", + auth_info=auth_info, + json=body, + headers=headers, + ) except requests.HTTPError as exc: if exc.response.status_code != http.HTTPStatus.NOT_FOUND.value: raise @@ -118,9 +126,12 @@ def delete_project( ) def get_project( - self, session: sqlalchemy.orm.Session, name: str + self, + session: sqlalchemy.orm.Session, + name: str, + auth_info: mlrun.common.schemas.AuthInfo = mlrun.common.schemas.AuthInfo(), ) -> mlrun.common.schemas.Project: - response = self._get_project_from_nuclio(name) + response = self._get_project_from_nuclio(name, auth_info) response_body = response.json() return self._transform_nuclio_project_to_schema(response_body) @@ -132,6 +143,7 @@ def list_projects( labels: typing.List[str] = None, state: mlrun.common.schemas.ProjectState = None, names: typing.Optional[typing.List[str]] = None, + auth_info: mlrun.common.schemas.AuthInfo = mlrun.common.schemas.AuthInfo(), ) -> mlrun.common.schemas.ProjectsOutput: if owner: raise NotImplementedError( @@ -149,7 +161,7 @@ def list_projects( raise NotImplementedError( "Filtering nuclio projects by names is currently not supported" ) - response = self._send_request_to_api("GET", "projects") + response = self._send_request_to_api("GET", "projects", auth_info=auth_info) response_body = response.json() projects = [] for nuclio_project in response_body.values(): @@ -185,16 +197,30 @@ def get_dashboard_version(self) -> str: response_body = response.json() return response_body["dashboard"]["label"] - def _get_project_from_nuclio(self, name): - return self._send_request_to_api("GET", f"projects/{name}") + def _get_project_from_nuclio( + self, name, auth_info: mlrun.common.schemas.AuthInfo = None + ): + return self._send_request_to_api("GET", f"projects/{name}", auth_info=auth_info) - def _post_project_to_nuclio(self, body): - return self._send_request_to_api("POST", "projects", json=body) + def _post_project_to_nuclio( + self, body, auth_info: mlrun.common.schemas.AuthInfo = None + ): + return self._send_request_to_api( + "POST", "projects", auth_info=auth_info, json=body + ) - def _put_project_to_nuclio(self, body): - self._send_request_to_api("PUT", "projects", json=body) + def _put_project_to_nuclio( + self, body, auth_info: mlrun.common.schemas.AuthInfo = None + ): + self._send_request_to_api("PUT", "projects", auth_info=auth_info, json=body) - def _send_request_to_api(self, method, path, **kwargs): + def _send_request_to_api( + self, + method, + path, + auth_info: mlrun.common.schemas.AuthInfo = None, + **kwargs, + ): url = f"{self._api_url}/api/{path}" if kwargs.get("timeout") is None: kwargs["timeout"] = 20 @@ -206,7 +232,12 @@ def _send_request_to_api(self, method, path, **kwargs): for key in dict_.keys(): if isinstance(dict_[key], enum.Enum): dict_[key] = dict_[key].value - response = self._session.request(method, url, verify=False, **kwargs) + + auth = None + if auth_info: + auth = auth_info.to_nuclio_auth_info().to_requests_auth() + + response = self._session.request(method, url, verify=False, auth=auth, **kwargs) if not response.ok: log_kwargs = copy.deepcopy(kwargs) log_kwargs.update({"method": method, "path": path}) diff --git a/server/api/utils/projects/follower.py b/server/api/utils/projects/follower.py index c7427251fa8..bfa210ac23f 100644 --- a/server/api/utils/projects/follower.py +++ b/server/api/utils/projects/follower.py @@ -208,7 +208,7 @@ def delete_project( projects_role, leader_name=self._leader_name ): server.api.crud.Projects().delete_project( - db_session, name, deletion_strategy + db_session, name, deletion_strategy, auth_info ) else: return self._leader_client.delete_project( diff --git a/server/api/utils/projects/leader.py b/server/api/utils/projects/leader.py index 064e3d69ebd..e2b232e8e51 100644 --- a/server/api/utils/projects/leader.py +++ b/server/api/utils/projects/leader.py @@ -114,7 +114,7 @@ def delete_project( self._projects_in_deletion.add(name) try: self._run_on_all_followers( - False, "delete_project", db_session, name, deletion_strategy + False, "delete_project", db_session, name, deletion_strategy, auth_info ) finally: self._projects_in_deletion.remove(name) diff --git a/server/api/utils/projects/remotes/follower.py b/server/api/utils/projects/remotes/follower.py index adafaeb3617..c214b72a6b3 100644 --- a/server/api/utils/projects/remotes/follower.py +++ b/server/api/utils/projects/remotes/follower.py @@ -52,6 +52,7 @@ def delete_project( session: sqlalchemy.orm.Session, name: str, deletion_strategy: mlrun.common.schemas.DeletionStrategy = mlrun.common.schemas.DeletionStrategy.default(), + auth_info: mlrun.common.schemas.AuthInfo = mlrun.common.schemas.AuthInfo(), ): pass diff --git a/server/api/utils/projects/remotes/nop_follower.py b/server/api/utils/projects/remotes/nop_follower.py index b2f7272cfc0..d1438c525d4 100644 --- a/server/api/utils/projects/remotes/nop_follower.py +++ b/server/api/utils/projects/remotes/nop_follower.py @@ -62,6 +62,7 @@ def delete_project( session: sqlalchemy.orm.Session, name: str, deletion_strategy: mlrun.common.schemas.DeletionStrategy = mlrun.common.schemas.DeletionStrategy.default(), + auth_info: mlrun.common.schemas.AuthInfo = mlrun.common.schemas.AuthInfo(), ): if name in self._projects: del self._projects[name] diff --git a/tests/api/utils/projects/test_leader_member.py b/tests/api/utils/projects/test_leader_member.py index 9deee79d7ec..d302edd6c6c 100644 --- a/tests/api/utils/projects/test_leader_member.py +++ b/tests/api/utils/projects/test_leader_member.py @@ -97,7 +97,7 @@ def test_projects_sync_mid_deletion( leader_follower: server.api.utils.projects.remotes.follower.Member, ): """ - This reproduces a bug in which projects sync is running mid deletion + This reproduces a bug in which projects sync is running during project deletion The sync starts after the project was removed from followers, but before it was removed from the leader, meaning the sync will recognize the project is missing in the followers, and create it in them, so finally after the delete process ends, the project exists in the followers, and not in the leader, on the next sync, the project will be @@ -492,12 +492,12 @@ def test_delete_project( metadata=mlrun.common.schemas.ProjectMetadata(name=project_name), ) projects_leader.create_project( - None, + db, project, ) _assert_project_in_followers([leader_follower, nop_follower], project) - projects_leader.delete_project(None, project_name) + projects_leader.delete_project(db, project_name) _assert_no_projects_in_followers([leader_follower, nop_follower]) From c365306295149ca68fe700dae7b77172edd91c72 Mon Sep 17 00:00:00 2001 From: Liran BG Date: Thu, 8 Feb 2024 23:25:13 +0200 Subject: [PATCH 020/119] [Projects] Fix project deletion missing auth_info [1.6.x] (#5102) --- server/api/api/endpoints/projects.py | 2 +- server/api/api/endpoints/projects_v2.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/server/api/api/endpoints/projects.py b/server/api/api/endpoints/projects.py index 54063fa3369..54b4537c0ae 100644 --- a/server/api/api/endpoints/projects.py +++ b/server/api/api/endpoints/projects.py @@ -202,7 +202,7 @@ async def delete_project( # we need to implement the verify_project_is_empty, since we don't want # to spawn a background task for this, only to return a response if deletion_strategy.strategy_to_check(): - server.api.crud.Projects().verify_project_is_empty(db_session, name) + server.api.crud.Projects().verify_project_is_empty(db_session, name, auth_info) if deletion_strategy == mlrun.common.schemas.DeletionStrategy.check: # if the strategy is check, we don't want to delete the project, only to check if it is empty return fastapi.Response(status_code=http.HTTPStatus.NO_CONTENT.value) diff --git a/server/api/api/endpoints/projects_v2.py b/server/api/api/endpoints/projects_v2.py index cdcee559d94..c8a4243a2a3 100644 --- a/server/api/api/endpoints/projects_v2.py +++ b/server/api/api/endpoints/projects_v2.py @@ -93,7 +93,7 @@ async def delete_project( # we need to implement the verify_project_is_empty, since we don't want # to spawn a background task for this, only to return a response if deletion_strategy.strategy_to_check(): - server.api.crud.Projects().verify_project_is_empty(db_session, name) + server.api.crud.Projects().verify_project_is_empty(db_session, name, auth_info) if deletion_strategy == mlrun.common.schemas.DeletionStrategy.check: # if the strategy is check, we don't want to delete the project, only to check if it is empty return fastapi.Response(status_code=http.HTTPStatus.NO_CONTENT.value) From f67c5db67664188c2b1de024c794ecc078a1bd20 Mon Sep 17 00:00:00 2001 From: Liran BG Date: Sun, 11 Feb 2024 22:50:16 +0200 Subject: [PATCH 021/119] [Project] Ensure nuclio function pods are deleted [1.6.x] (#5110) --- server/api/crud/projects.py | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/server/api/crud/projects.py b/server/api/crud/projects.py index 49035682530..36a4f708a36 100644 --- a/server/api/crud/projects.py +++ b/server/api/crud/projects.py @@ -423,6 +423,22 @@ def _check_nuclio_project_deletion(): f"Project not deleted in nuclio yet. Project: {project_name}" ) + def _verify_no_project_function_pods(): + project_function_pods = server.api.utils.singletons.k8s.get_k8s_helper().list_pods( + selector=f"nuclio.io/project-name={project_name},nuclio.io/class=function" + ) + if not project_function_pods: + logger.debug( + "No function pods found for project", + project_name=project_name, + ) + return + pod_names = [pod.metadata.name for pod in project_function_pods] + first_three_pods = ", ".join(pod_names[:3]) + raise Exception( + f"Project {project_name} still has '{len(pod_names)}' function pods; first 3: {first_three_pods}" + ) + timeout = int( humanfriendly.parse_timespan( mlrun.mlconf.httpdb.projects.nuclio_project_deletion_verification_timeout @@ -434,6 +450,7 @@ def _check_nuclio_project_deletion(): ) ) + # ensure nuclio project CRD is deleted retry_until_successful( interval, timeout, @@ -441,3 +458,17 @@ def _check_nuclio_project_deletion(): False, _check_nuclio_project_deletion, ) + + # ensure no function pods are running + # this is a bit hacky but should do the job + # the reason we need it is that nuclio first delete the project CRD, and then + # nuclio-controller deletes the function crds, and only then the function pods + # to ensure that nuclio resources (read: functions) are completely deleted + # we need to wait for the function pods to be deleted as well. + retry_until_successful( + interval, + timeout, + logger, + False, + _verify_no_project_function_pods, + ) From b6c2d3d1daba6588e7b5e87bd1ea67c3c29b68f9 Mon Sep 17 00:00:00 2001 From: Eyal Danieli Date: Mon, 12 Feb 2024 08:42:35 +0200 Subject: [PATCH 022/119] [DOCS] Update Batch Infer and general model monitoring overview [1.6.x] (#5108) --- .../IG_model_endpoints_drift_analysis.png | Bin 36033 -> 0 bytes .../IG_model_endpoints_features_analysis.png | Bin 54215 -> 103608 bytes .../images/IG_model_endpoints_overview.png | Bin 58781 -> 134770 bytes docs/deployment/batch_inference.ipynb | 87 ++++++------------ .../model-monitoring-deployment.ipynb | 38 +++----- 5 files changed, 43 insertions(+), 82 deletions(-) delete mode 100644 docs/_static/images/IG_model_endpoints_drift_analysis.png diff --git a/docs/_static/images/IG_model_endpoints_drift_analysis.png b/docs/_static/images/IG_model_endpoints_drift_analysis.png deleted file mode 100644 index 54ad54cdf1d27021e79d5423f9a851c75bf36ff6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 36033 zcmdSBd011|+ct`dQc96l-inHVtrb)ZC=`&{d16vgkRc#Nhztobgb+f4IAB{5YLy`j z!3hu{1Q`M(1W+mnfkX%pLP&y*AwWn11QH+w&PMI;ZF`36JLg>A^_}yFxcAOpYwxwz zv!3C8?q~mU*8QaRm)pKnQ&ZDEed^eGH8l+fHMKR;FE#*IzS~y%2k@Udwo;JdP{Kttc&caYT6GqT)(Oh*$0z?^4%2MGqezvpvs6NgV|g zH{`h|$Iks%NZ2u>S@$Ph)k{v_c#KU=oY;Tr!0t9mq0ZFNKOBX_d@1D{A*xWgB(;VO zUwU}{4A95l{)8K6wRryX(wRM0!7d}Ef4^(BQSQrKQy2c#`b_uWri8u!=n%LgG*Z(c z^KZ}i*vOKoj{A4((ye}LJ>X|We1U$aa{;N?3nR60D=P`ywkKJk>=!gfFy#ixmEp+) zVS#V~RMeTVTuE|J{dtNv{jZ)T-etAKz76lsy(joXYhd8fw0AZ0p~7j{Ud_Mq>a(TU zTm2f_j~SM?ShiYJ)NTBs#vvy~*pDbTb+*O8!3qqY4Bww!_OAgiGT*JoQL_oVA^C|x z_$6*su9r;!3T3;jD9QE?$mJoHT+3l2SdwCT4t1}283*U{H{NNH=|Bj3^f;BU#!Uw+ z{A(38$`{hT2}>1O|LWe#6+XBg7co%2Cp+MpoAx$gms4}n?E7}qy<|uaS@2MoR>9Xj z72obwq>g)XHeYOAPQ)bPNP9`vulLbQ36QaNwooR14A4vdrsWtE3FP10TUA&P+oT+md;XIvE+d z36kX67Rxom2O#*;S3I^feFKcJZ_A#QYm*Ze!m6HWO@&wa@Y!@nh?};l2jvP>TQsBW z(8J?pXDStZ{)tvbLpA}w=JML6%?ZYnk%u8i{%}>4JzzfEWfVC-kOJ{@&&pdc{sSGf z{PvlsZ!MiGG)_uduDy?jMK!ln^Aa8A{yc>oDV>9_6wfF%jG)g`Vg|%Sje%?`%*R`Z zUc{+5M}&Q;+WJ%%^QQ9wnDPFZck419+DA2)44`F(`=m|eD+Kg`%xzm))r7(%;y#R! z)5QOm?1#n}5L1NZ!J}CZ((SALqo?OWI74vw@I-$^=W$`c#rA@0v-#64v8-N!V8}G8 zwAFp7b*q3>1_PO9C0jVrA%&R7`AJJG_^>u}*vq{T%}p3-z^G0kKfE##%{#0NTbhBi zxR)*!GW8QMQ_FV*zsm^NYqpk;Nhv}DoeIJlAtxg(^;aghp&~UF;cRE;@Se*sPwzV` znJi;w&F$2W(CSo2f&9B-H_g`~Kj0oU*j!!PQkR|f@abs9o|7~m&#a0q+7~Us#U9=y zPfZ6@nf{OQ(!+@|%2UyOXl`VI?+6PG^D9yRO*zEHqzI3RdfG~6P^f%ZVs>)X>yEf+ zM09N=%^@+bOTNoe^NSstAv%B=Hc>gpOlq-vh!^!q0Sy}Z)guv3m)5;&H$oS0 z^w_a5b5|BIm7wac%esI6{c%Mdk)}iTK4f#grJ^^6 zmhaN}>$guqR-eR%(^3t;jap`TKg=vFfAO6#g3x#_;=~4AJHaff(eeDvhPaoxUiURM zQGNmO>l&>ytRr8f2%|g#J*A}!f5>+7Yp-_HNqdRwf7v~{HEv9T^!z!LYsq0Z&CEo= zb8DO=(D&HOvoB8;mIqxmbY2<0t4sX);y*Q3ZYlhd1}>^AxtjQ*i=W}-@7ub%+OK2H znL8{}J{(mz2PD(!z}xOi2`dhJ4{56%v9Ma{Uk|s%66-8704doqm!&Uxk-_+KcE}WF zMrc|}WK{|62OyA;EINc$;-fpeVOi{}?9E49ZJ_JX;E-({JGj=G(Q6s+aB* z`UDCs!70MWlHT|yb7Kb0*QQfrrtgV5pVHGSm>Nb=yPKVmQejENkWz}iN@%hCF7!1O zf=jG$Sb7&6vK$eWcIQ#)HjD(@v>t~&X|r3r0VhWM(_oh#E^W1*>TeJ%r8K0*S%xiG0`$d0fOb+2#plkq2W zU8c+|Ubl~DA^S+9pV6hjl9hI!r!u8#Yec6ywl1&Nn_%PI*fvEFOg|W^y6Tgzp z3B-0y%T=jj5&?aq(sXe4seQSf3{kCM=rRv-927d8tHc_sHK|_B33Bb3{a!fMir5fW z=1r&oGvym_A|`A~9E`;^pDUqDjnK2v z$jSRAHUDya`VKLJ^b?4W4k)CqHbhMj<69`{88EZFVi)!s>Aq5D-9$jyP0!C1qDJ4? z6~aaqB!4P8@itsAE?Hh=$LY<;&TKFVJX=^;S&CozIB%&%`s`bYVVsa0{!r7 zIU{CkI6mHZe5(bl71!zLta~+dxPh6rAq}MNs?|~RXh|^H^X>p>n@KpVHmt$af%hyM$~lz5cL(#Ocf!MS*Mr?XWUGtQ8{8h0iVH6vOZ+tnoqVU{pT6FPmm8HWmWp;@YLLT6x?v*?F@hIEe_A{iew#gf*34;TVo@p3 z1A7Z=r|^hyt)3xmac14aYN7x6LiUZ$J4OkO?#UD7u@g%)M}Dt`a=toTZAjZ88w3uJ z&aot*M0$bFfEUiE4jZRK45rc9i3uL7~N`zd&*>)(r$faScMq+U_*+`&@R!7+@A^ zKDdhL3`n835bKqPb)t(l;E+qNltx)DwC2zxTW@f@=feEcB0lxpo#gIYE!DLn4wvu; zPM)xSSn_}262zyJoM-r0e)liYdU)Q`9Qfs*dgy$3+y81W{humhPf=eQ=zsV%MSdDM zee1}xj0>x7{h98Z3B*Rze|9L2nxp<;QnqfR1AA>9lH}iKi7QxNv(-;7J31 z)0?rb7OW3VN9d}J4iBDH1W-u(z@)gy=ubDOX?Rq^pSRO`z(pWl4DL&x_IPJij}`F*#Z9MSxAch{x0 z9-odbw{1#T`{~d!t3~(IQO-+s+^3_Kv|Ur`pAJL!{pVT1n*Zt2<-fbMbeigl;Ua|v zGeZ%CJy!g^LKrRRQ`?PQX&W7?({5|&Wr6&NY1%2+U8_Gn{`B%KDcGJAh;O^5J$5{1 zqtJgf?p0H}ywW|^VhO(SddD@hbsw*6-;!doeM3I=hZM07CO$w1%IPIAq< zCIxb&{Z@wNcM9eXb(}pyvM-7<=hYw|iV!tcr(rh6k1V{=3w&sTOL($9d1+cu*Q|nK z9Z~T<_1A(HCdvqH#RupAS}0wYx5y=_2d~V%0Pb$kl-NHMBb!6qb3-OVPU= zxZLpPpCZxxsv+8Jo){JP!7$GTx`!DE_rt=w@?$zRA=$!0R8i7`z?=7lLu}P>Ji}4& zESUZrF1n|9XkZ^d5RKq1A8G*fBvryGOx`)=Pl^lE))dkU1N}}k$!PXg$6W7r*v5|b zO>0`2H%`5tX<4V!C;UUhDCw-Q>sI*Mw^PEB(IepG4l ziqcm$+HG2hmishJx9LYvdum40@-=+gZmidukQFA)bl5q>PY#}=*$VSXh7UUIAo}FT zQd2_Xd77hQm}DrWC<3td2-pHgit+Z#a8gcI^YJIhDG*|jJ|>i04TCPIhvulLaP(ePUNarPUK9*BWM|!uYLeY+k);FC*C` ziWjC3!8~ep3H&lCWMS-P(VPPC6jixrEnk7xQZk&E>o=rmT2?m-aj1$7$+jD?#*ZPd z>2LWi=9D;sTGWda$ZKlN2Di)Dv->l2Be`)5Hvz-L8MJffPOGG@8Z2ZWj8W_cH$>*c zhpIwc(8HNw7G@&e>vr`zPN&_K0(0_YmEFqQ8QmAg@4(feZnlDlo}FccCf}RG`B7Lp zpusMb6oil1vaH=~J`35k=S6Vg8zI^Qu}do4Y~+21+< znRHM$L9XfWfJ#~6p5%jvh_fV4UA4jxhjrrggkE~##=|?5*i0o!IXV$8J$Gr87pbLT z*25l08g8L9r#I2?VEe@LwU-HX$FJ;N6JYbfpOOgImc=-biZ>+~PuWZA`JGhrW(VuI zS)?i}Z|=0#yUj7iKf7KcU+sDF5|Vyq5IffyU;8yYuu5C=yv$NY9=V}4QDs5c<&xDG zJ>E;iyF_4Z`&(kGpyBu{C3ioYh=YBb61Yh!aBxPbSqh}ECHBqC#SmJDFq;{uFCML_ zodPxWh3?ms;KiF5+x+ikFVNHxh7bE zamkWQN*4QvhY^Y?SMLhYDcAaJ<)z{o6hlR+opim#Vgp0iHRXCL2gywo;rT%kq~G1% z4oV(u9Y0Tm`{ljcFG2z%O@3<6&etg^h#y*0pr?soRLL4;v@(S%E%IpHx1pHO2Ho{I zCK$n)UyNHvM?)ELf7$6fzKKR|9!08cAC9L&mX1*#8k(u&&}-?#CoMq^JS6HgiwH#_ zC*MOl0Z?f2+k|4%A@!_=I=*g(lf7ZgD7SF@OB;}r?nB97Q}susds`T=aAE|hx>$G5 zBl|ZQF>f?ezoLOc-1wIUv09Dm4kRxxZTo370DD)@+^pRY6^VR*TQB@z-A`|R-L@gL z`DDbm*sviPc@YQ%z4!W#Z1oAq$`e4E8Wf?g+qX+SoK-d43|R$PvaLBg)HkU3E^FV@ z(|2h#ldhLhJ7V0sagJne$&U)tdw_pU*pR8EXI6e#bW_LaQo6&;+olxAfZz{Zk8;s{ zA*6t9=T3AJ4d6|ITcbD{TXAzypu^R4MiJ~r4>#KLFNdW`D@=erYYDP_;By=^8l775 zlJ_3=d>wuJGwzuj>~9Btfs=9^#|JGt8|{tudIw@+k@G1gqFBprM*y0rsGna<%%EF( zP%62yY9OliY?$fQk2sYp(Fd*em8_VG?@#+4!UM|n2);#Il&e-Njt0b$Xi zMpQHu4+smB18z3mx<0;a^(t^}2dwOmn#uezr-v#)yWh0oCCTQcpo3|{?UvLtYB??Y z@w))4#A9ARAYzC~@&W|66lBZTTidaeNG^ACn{&dlT$~v7{H;L9^KGnsNoAH6YfbJv5 z>*Y_85yKZgyK&d0XS8g{@YSej3VZqLI(pFk{ZnoFQNyM+`L4dU#*KNCO^-Y?Lqb`1 zp12H-4@geiffb6l&t4g?sAj@7f_5u^Tu@Uh&(~DPt&0@W3Tn&xYpS}UbAG*8=9MWT zG&eaZ4iy!J`W~Sb*dv<#>!_AtslL+k(MtU-tQmz?=~I>zdD%x$;aK8)}p&{9WCVJjn~)hoVwL| z{r`S5%ctw;2rjZDl2sKl`AwIr?VZ@qgRNW%jT^y+?DMZbJJykw8EYfF_W5CIavtF0 z?xda;)yHYR=yNU0C%<<8{2Dg=b$_|r3IB(#^Zx{I{C`51clJ&BlS2#s9X_5h=<9v0 zyWvnk*T>C=$OI%)-^z+ozS#>MaiH(x$V-H+EQoo*_Nkfw#4Svv{=3$HK-f* zYnuP_uATo#N8I1WWa|dNNW!SYawRQr(p$mk~+z!y=bM`%VuWYD_ulW zrsfTgyBrs8!~ouJpRh3IH7jJ1l2+U?_4G|skF zAsJbA>F;;^^L1psS`{E+SHm;^Yw~YH*aKp~qKOp%;Cdk-L6RA>cv_e#qm!OQZf9Ls zN7qfqc~&$njn2G7XJkLfa2DxjSc>IP`!->0f050=p&zM~fm=cUeu_L#n7Ke` z@@efXFdYriMu&ehmwY(T#~SmBT30RG<}DWVbA;}G1$WZP+-Sn5rM zlNSy>@y1TIyo%Ahz3I`93;^;ia3>r8SX6^BT|*SJb>ruKclna0L&@o=k*)Sb>Avjz zf8FN!){qPFV~@k+=}ctrt}(Oi4EXK>4gbLmm`1GW;|yxZ@8i|^X9#>KxFlH^dFNm(v|K&CUl-#xbi$dZgQ&t@D|3E z+gEpblb`wWbpH&H_qgY{&2_EE7PYk=8xeP6M_A-??jmow)j6x$y}7=z`NPwhe(%70 zjv_L=etjs1Z|~zK7^EK~Bcx{L;)RrT-ouPFaQNj}k0x(e z`_4KATS=R{N=Fd9AinlZMold2eS^YBt8gy7(dB20Vp% z=&!UktKixvJv9?F7IZ>^pJgvys zVw7EJ@|cbgU)$)gvY5PZCuZR|Geg=6b6kAqSxfH2Ua4xa#2Ycbth%>y*I9+ZS}K>AMw%BxGihg~q%awr4@dity-n-VU_eR<&NnvpVd%DLsG*yVEOy#`=t@hMaw5a9zK<@0HvI^Y(*{-QH=RuJBdO)!k+fOY`GU(yd^1_v= z=ZTN)_2wCg<6N>KSL7D)q$PH|-Lz9@c{H0(N6#;tbXF9QjkRkmcw`tkc?lV23m^JL zm=%H=?Kfo<&t0fHeo;bB073`}(s{_i`eNyKu4Ef9CX})YaG>3N&EV~YRgi?&cLAKV z*v!7QN=2Ylja--V4`xCqQlm6aRc)-}CLqLS7OgYG?M9eU{S@|vjB^iEzpEIdd4&tV z`830>ME*))?gIN^=9k;yTcIkl==a$Dk1rGVP-&E}cz!c;sAQY3sZJ|=OY;muL{efJi*(*?ia$#DDB3ncJJot#14LTK#sE$)KBil&@<*D+g#h2pg9BwCZQZ$#iLu#XI z009I+O&;ib-W?&Ly|pUc94gmxR9G#uzXt4j4h_f|P}{hjx97EYKE-E|^xpW5V56H= zZ#4O>419=Lqepy7(wmt~{;S}qU5y>bmBolDjD%A-qGw zQ;SK;tZ>tlblF}m3*MWU-LJpngkspeG7sjXsO43AY76{!JG~o_4hM5ik=$Nq$RQ^GDJp5Xb!DkFrjOvUz0j=H(y9G>VURx84~aH$ zNdHA1Q8w8;*)Fl3^J=?S&O3`)yvgftPgZ6-vqYB=jw_Q?K6tT7HSnihIgpX8vZFfqne55rEUvN=}F?&23%^@3@Z~-|p&#?UO<*!}zQ(7y z3-W5|wxE8ehrSU;21gA1_DKJ#eBh|Uarxe~=7CKUSNxVG$*MQ=Qz~(9Tr#l~F{GX# zEJPv;n#iy)tE_?Gm(J~1`KfLIa(NJwlyKy}`gdVHLpQR%Vv|DHHUqmvxo`4}ZQM4irg1xIzNVEJ-^k!O!Ree7|HckyC9nh!cnj`6xG@Ya^l zIwc!0F9{A;MTRAlqWq^6rPE=$h7|2z5(oga3Hx)Wv1Sb6e9gkMp~cZ0c>5gN2J7Kl z)^;J|^a~zxrgddrG#zyt4mdHBrc;yg>+O4xR-&AS5T)FJchHG*MyqApf5pq8&9Mil zofbPqLsOFO>aI-k*#WZQp%kq#3b9U~BIPSOa*!sLSW884=YFV!QxU%}_ABEdJ)}|Y*xVwN z8%63*w_M#=N&?(6&n^f2*1gzfxU05W!oO({pW33kp(Snuj-w?Le;@yh{L23;55Z4y zB>l;N>mJ#84V4^2gr9HQF}=5By-~lBZyE+2rL;S3B~jnKIz|dQr=<}a-K78ghg1G_ z@y}@XYeEr8X7eQx9mV#|>KoW~P$T{7C}p)9NA6vRj|d(xtHI9Ep0;*#sZ{12J}4())2Ql?kL@h@$G-+J1m-McBn*aaMeE`ww>PHTZW1zv!P+H z3%L?Q6Vf3r^d;6S8;mW!$M!a*V)VqQb=|Iveghur@e2EvJ}{^D@snG4<`mJvo<)zj zEk$!)b#p<|JAR7!qKm9!?YoWH-W+JB%(4*EnhRq-0`16`$exxVd{le!Usq3-CT;PE z%QybA;E9bIHKzAVvkcuNlRqXV^m5!bKY=TvwWucc#=3aXvGyW+!Y?nxcO?;f4u`Cz zR^+3^wk)A8sJf#mEbzozCz0V($;+!kuK*)4We?~O<0l(M&LqV-)biY0CMTWCY-w_h zFJBzDQ7t#xNa@xdM6j;-E~b6#auaV=Ca1*DZVQ%Y&&F3?!E#z+?U;vwpzX390N2(} zh>uui!gdnKGXstU1%da7q*qCvt8QKaLr=4;dftd7>*qT5(z_kL*ZaVL^XbV6ee=gv zu-dzF&eT)=V^*ALir%7*SN#E>kUR?>F))!C{ba6i;=lw)_~7CIvP$izDs<-fQ0!3j zgiZN)9atBnJVeVtkAuCO5*T(1oi4B~&ZD-3P21+q)MpJm(9BOHr?|m0JQZ_2`Z4f2 zQ@*j?fSYtyS4=3@e?(kxi+45lW&V=IDDd{ZP#h8@52}MTyq8#1VffFe_tb--OHH)a zF($6IyW0l4_@%bgE#2$em36 zfzmIg?KdXXb109Td;|^HlUW>+j+@}8WV2of*l(U?HY7DYHjyr+ymR*XqS71?DQpyb zRiIdl9)c|fUH|(DLM78PO2@!nHrFSJPwWAtOFls(vQ^=G-=DidP+tNqO&XR+L{G_S zV=(VFxA^j>h{a#z-VE^xVs}(kutg8ay(VAMm`J}9@Bm~(0?Q;KhcwBOahU|;YV)!| zBjm;ecRXf08UM}4`GA$NhNX3N-=ZE=ybMeh<9$`6$1tUhG($6$J=K^jn9EnOw4KTA z!7rrU2l}Bjr_QF%DQ>WBPtzD8E%A<4%OFZq@AO%Gngj#cb}6m_q8lW?u9}lM{*hGgob&(flfpV_2g`bT&SW zOkkc=)b;^EzLH%eW=DYxS7MGzz@+%@MGEb<*SMrM(54-mEa|a_$QSt3u~fFF_Cl{) zB!P><$;m6B>}E~{a%CLSF6rOf#G9oUCvv1xYY%yVO+Wg6bOU1SRbulhFgDTxMzh@b zpnWoBoJiZ=uimz2>Zahg^|*%L0O~C+w*p{E9zK%oKTEqk?EveI*^SSQ39{2Gtv2NR z_5;}0_*$a8RIk1DXa7Guva{}wE5XAOZiE?L*!ydRDHYrl`QxF^^~>tU059qGMTBbT zxbi~tju+#97gmm{mz=H|NKVc;SNG>41ytWj*P=E3L>S-s9YjAXDgvGXRlo;t;4$r`*sEX@J2MVa zESl>I|Dl!CM9cxQU?KW^=#!(I0M+XKzG^>J2P`<5O28xnW(nKf+si6Z=Zc95-^W_?uw zMFEXuAX;Bvz|d!fxM)^_9vSNq5i%$}X!VyF_82v(irkVv~`$CxyU*-~GP`xyUXe5I!R(s6cxte~F>r9q)%qo>6ULh1WNvR-n zIsE-S2kb6MZB`d8$v*6dckHv5?LH*-HNEbFjbX=iHUlh`b!Qo@BBJuctpl3 zDaWKyK3je+*mUXhNqaky?A#+Pr7blMfDMhWb`>68RhyUU-F? zUOGuFodB$BS2*dhH;`cZ;NZ?AZq0(fbxIP7cL|=vR%|N_6m6*eYwkCF6|cB><7v&3 zi+8DRcLcw!TR<|60$>7S*r6wXiUD)T4i(6}p%oHGaOqUUeZr z;mH}jX^_xXhyG|iuA{$6*lVR`#Yr*Y^=Yp+E`v=`ANOlRj9+j^wybWA>)4YcrG07- zTV(Ha6AYI>1N^Or|GLP2iG~2k>v6#xlbc)9Td<3FZ&HbAf97YKYMZs%fm)aUT92X) zkN%b#03P^PCJ%tw0nym~H*olj_MyLWR{+!tC{6(T``8bVZdl!se*p?6^pZsl17>&#+w-MyEiL>i*CV@3g zk=(|z$st)n_fUag3BWv`(*3;$@GGMSmdyS(AmNV#zw03S&Q5BaB$zonN4qtwbaaOZ zX;B++0RKB;VXh;B%9g@bP8_NWV^qYjK>Fcjulwc{k^B7WBro{Audm5(i>N#)b|oXj z&l8qkR~pH4B2C_kizRF+h5r#2K}8weGn1v1SZ6<_Z2^}!Ao_FB8u0{5Eq)0`8yX@V zpreJV9yry`Fm`}6^N!)yNeCt2MfZ2?7v)bcAYb9yp&UzCYgA$=H_^v@lAoe+h z$nS0)6OEw<=ZOx`W@5WyCoMTm@}5_F|2FT83p@4W`*%pv?^tI-+423CC%sJwAVO0E zKJ_waDoIEYTTQx-VTJH@B=s;oPW@y57)ELpl3G#qlYj=)j=-(1i)hXakn+k&?VW{~ zusA2IaS9jh5m`a?sf~wdkA+v(L!82j5_)kw0tw9z5OMtO%FB6C9!A>oDg#Z)rZJ#E zrn-VpcfaJhuCbi1VI+OdaGLGPS{#m$4qb9@NI*l^V>_kZT;j{r0(xU(EzRUHlqgO` z=CvJt1JG3He=6#--B5;s==L@eXe&;TR3CTJHbycQQw624GX?nw&MDT*;0a|lf=uo= zpYm@KHuy-EZnML$<)I6qryrq(t$Cfl=#>*5kFL(ni}8&PJWNFN0)RsAUYRCfyaF&m z7+d*UPij`TUwfOonFz6(_o>^wZ=3#%IF1>z(Cg8bDrqiHCi2-`!KCs_O;y_$M7key z2UbnIjRWx>C8p+-mku*64}vpNJ&2CVM};c10RYCA`2#Eo0dFO**|1Rf%0XkXAo%0n zvs}%F?L9m!rC;*qCZPP#`BUP1$Jyc3mRpxKO?d%Tlno)8eVf*Xl3<6jI~_Jv)GjzJ z{FxQgxl5#&cpgOMt?!Y~U0$ZxUEy320Y4p(ZA z^*+##8O~8)WXP+l*VtqX2mV$Xc9=y3v2p0%kUt#iBGw2)3^fn&lvhup18xjkn}N=( zj6ba9gIqkZ#+l+XTAh;m$l%v+iG-3WXOkg~QZB%3k6hbT9+m#8&5ayhJ6@f?&zW6C zK*v>v1L3WhP0W6pkpng)NvEq&3Q5)MQvq-9R2PZnB=4FjNuHt1=p`?zVp{v_nAb`^ zs`e@^f+;gl1Qg zR?fTB?gp?&c0Uvd8Z>h^nJI&dKF&xi#6jXaS z>+9RUq$838XjoKO0|4bsLOx(`)LcTQ$91AO*3c{I6=U}ZaHlU}sEd}me^E182XP_aFvO>dV0^I?a` z2nI}$o_Oi>+xlQ7%?_3<_84g(2%HqNcUMuq$K^hh3Y@OeP9xDxP8vg`8~i-UzeS+5 zgWzHu5|TWtpB2O*agz5K!CqjqqAu z8v@i{Yb+oM9RUQTb-wdiS?OV{9N2kw{jD&p#}SeowfEKKBLiJ^+9K_lMAKHwCi0}0 zPhD48?FA@3?C=$ghj;z>s1@TGm09y;_mpelp-KcUOcRS7UGS;1rc0c{T8Cm5q28ow zw=Z&oI6_H8GEu1T8YoiO`h@z%g(i{ypeTEK%%JRfBB-?E<3{_+0d@1$lEb@}SWy;K zpwsMFlxnT%Gm}7@lu^Ytbp1TiLys)N-j&&E=Z8uu@(LZ*F}khFiX!?Q`NI~s$_FFG z2#n9DNAeVT25?cBbEjGuM{3aPae4B6$}wKr23O6W1JL95W^`7@ZTJk6`G}MY13MWZ zi{(dOINIeKsuzC#u*?QZ~cvo-qEu$XS%6_=&KEacX zuTh;ASiF^rila$+Ei!?gFdIl~S4M7OU8=Lm{7ovOJDb}C#3~am6;w)rM2#Vj30BFE zzozZPVEnZS)ik=Za%SGHF4zHp5yPM7%0SeH1(JRHx*pX5fD^WtDlT9)(U=uziW#5< zG+eS)5mdoboveG>EI-=g$Ovin$5f$OlXi;mNADv|ae=w-OuyqPjWUP;O@Go#@QWT1 zDEix8n&KO=o~8e;=-c;;_oGsyu~8&AMfIx3EY`AI|CvEoOY9Sgcgd#FT_3vR0Z{7{ zWhGS7iXG3$w*=@Rc@=)h;D&$@E9w-*rhnJm7 zVEi!HQ5kviMlRKyRN(+{5rc!92cp1Um?~1LkLF0O&8YdlY9NgsAIUq%G{x`FyFaJj zX`Y-EhXm3y?dzT?C75TaH^bmrJB))a)GSO69~hc26U75r%>J(rY&rp7R|=0f8D5_= z=Ne+mxi*pRCt*h{zPZaS6XBqznNPxA=D<5;L)N1T1UF#jo|xA3Yq{qU{9Rf@I6<)M z2U_&oIBb*6t*)16s6*Qe`@nUaz?*-J5sSNCI-=!BzVaEW{3j(2osS;8Q60&<$K;+< z1j`}vh`L>Yb?_)!#;yCT)STV>T(8U7f!mvzNio|qhN|f#WgV|+ezKYqBH5$&u-#~Z zbUPv)k{&StP@WsfuflFHCDx&Ay1lH5WL`zn#QS~0Bi-(nUm=r(e_SElM1yQ(SKI9~ z=cI^@t~D^0vo6JKpgcf%FLs|0EDuaSIm#fGPzYE2Rw)UT026#s9HB9uj*ab@U+BvW z^5K=LuY5Er%A+H;ZncSG+K@Ox>&=X8mO8+^R<3^ov~UfjO^q?9yE3(@y#n#7N8-#* zip_Yt`33xD%~viqWuQ~k7}8_k=F64(A8La*XH5HtwW>Kf)%Fg~DCT$luNxzyh|bE# z_sA<9J%W%+k^&@E6^QW5o1+tJkpPBY|4BboO_s)o*|U}-Gn5ylm`>Gs%N6G)>%`_h z?i?OGwei%*RtktZWBK0Y^&d4Yev}wDC?#iP|E3Q`7ekH|sJoBnTBdk34_wi7TMbaR z)f`)!f9nVn4ENIxNNnxHh<=}{T_1CQWPj_XL-JHK%NvLw!r%X-6=fYYbl&e(QHVc) zF<&#c*auD-*j#Ade6F>khU@xvc1q>ZH&CYevZ~3%D0v`3i@q&Yf+enRFiNgzT+K`2 zQ!b2Vz7LzOi*6l@_?{}35V&I4rk@G`VhOwuv5sEcT}IZaa`Fz0_k@abuUWD&y)PkY z9uK^(vvgSL>KDdyYs3#~hNv-BT-InDB(I&b88>c6G*AK=mhyDEMNAqAAK(noD^g=v z8R7<(iMZaR(Y3Bzb;Vvc!FVu2UTdDaU08n7=Bvq0%&E!(cvp1Sas)mp+g$l`Td(~@Tbrm(+0NVA~_jO%O zco!e`MV6KT;nY|f??wikWXf97t<pvOyR!pnC1tTnO(! z1--wx>K>cB`+5eTWBm%EUiWyK1q1@1jMQTUJ9R*L^-X6%2;lC+oj}jTPKYWM7JgMG z!DEzrT4T%^_qEBLwFqeW8cFZB_Td8DBTs;4xL{&Gfewf@yuu^$fC@eodKgztus-P>O8qC`v|TJ<>1O`3U){%&$W?7ON)3H zLr4N66*^qmu;CQ2qEPTAV}UIzT|(KRdt=u=s>&S5{%pqGzgD{5QFEzxQx)_iDbej` z<_E=B5qwK1?t%EN(?o?g6rRvEye&P1O9{FljX$}zt~~HY?N?I0nObD+eoG6ts7pCe zH9ag_5E<}b?$mEg-<`#9ZlQ|ZjAze!uP}7Nwfc_wY21Ol zicrR{2S9F&BB<6azjs5K?V3l~w%>TN^zM)Ao-NJ{YTo>_W^wg!mq``I){FzEMZ0{F zA8jjSV}6N5$q(DNDH9ETq)_hx=5$qpI%-q_dRqrj>9zca!F?a*vSCHu=&E9Own28R z346I?hCo-GddE7_8(<5w#fk5J^GSprw8UZRMM`45umXuSQur{_kGdD(1>rc+p!e@ zZ3-^Em>cG+MJGD@em#Vre2P()A;5muvd@BK^eKSzj*V&#fskAanepi-Ni|Tug|z%= zL~V|WC3%mH*5$NXim+35t>+gg>#r++M>&RaIkDfd6Wc+G0Th~e7WzW@J!4mScl7D! zL#FLcYyRA+<7Cy8?jw{qh4&MyYvoXjef%LzVW;R3FXm}?*|?*Iv0t65B}?=K0k$Z& zS&FhS141X;g`bWfj7$zW0VSp;lNHv&Q=}xAaP>PIYAvsRn7Uw9<1#krr~LA$M$x7d z#hF)|(Z4A%aun^om!;KITHGtv+HUo*F!W#n0)bQw?4lMt7tz!&k9%2xe%P0(_=F(d7MAl3KU5m8K8lb(9Pz zkV~Fc0Q1F*FVTO$*-#MO{2QU$^!=p=3q%#iRA~xQ2gIL*x#wC4lp9!7a9V$~K(8Gc zcu~DR#cgflI&BA3-5$Ej?wpZ&{Y&ofzz_95l#BHyb~$uUyLxxBij76X`r+29wva&p zhtUPu3^nF*V=q_1sXsoTm?biI0rj3wfU+vVsFfx5*8A}%^?`_5+jz|$W$obVhegy5 zZUMfep?#?PAyC+y`#}M-d8GNo*Slo=jCIgO#qzJ5Lp2C+@1ps4F49>FxWrm&zVMt_ zOr25QcEEaM70!)inmGYRxW=K&$>VaLc(j@gx=O4?7+5&8R%L8jnoV^UE3Bok{R3o< zrXk92xc1D~12sta9f8xY&dbZ+%t08WA#vU0BpvwV!gW#nn>@HAUO#^tw3J5l*tAqD zmP#G~e7_0~(|EVX+uU}y^fys>i^)7 zQ%)W#556&hQ5vmI%gE)gS1~X+Kp>x1M#9(uUIc&7)@PgJYhWIX&I6$DVx!t>DqZV) z#m3g(oi;>}Yq~2}@zag$`si2#p0-~ivuHgi42?ashu#s3x|u!sRNd)gkRZe%OE*#* ztIV)Y)YEs{ZoyPH-VT@T$B&!Y313WK+e>{*0@8uTvhJnc-u}Cz`^>q939M zC4mh;Ep#SITODjfBtlEIcPfNM;jY zI?IZ;;tMfp829LtOVEW+2jI5VVK=^pCXd)`KvNF&_hdiy6G z6%(!an_5D7A;YWgG!^=t;FEj!X$`O|R|t18!a=@{vnBU6Dk^(Mq`HWm)yH!s!8qbv{3YRp}&tJPvfSAComrSbA&+6hCa`Ho?R6Zp&egk zNMW3Oot2h{p2XJPh76bwHEvp4^d@*fSk5qeA&aQWSAQ@*ATMoSsrSD8fXAph!2S*R z-Um>!ekVBik1K||WH%j!N7JL4r=cq*<*VL4nodyfXPso>BE7Tv2g0hD zhXK{fAw*LPWNQyo#My(;ee z!5_7N8{{*tdJU*j`d@&^{y%(V+0o4j6_@0{oDa|a501KeZ7t)k3fTX@(bE6VNx%yR z^Vb$6F_X3;Y8xmpO?$z&%^-S+wDKwWsWk@#r49>PZ{%$J&gwg-j-kC-I5p2GDQd>2o;b7B2xl{Bv1$e0tqIB5Hh{{ z#y-!P)>-R(IP0u)-uL~g1n%s;@4c^mUH|{@!c-CsViuM}H=ep<5s#{C?G(ccA}*G* z)(+wMx4yIt~I8TMnfvs<=fb$@efJh;?Q zYZhvqQBK)q9l6JyRaf^V<*l=Yde(>A4Ud4qZ7+BFQh<|ic6}r()Hj^C5R7r%&Cky* zux1mVQ%OG^K??c{{QZvyBEwUvGRsLQito?7CsH_*@7-QIQ<5{O`d7Aq+HiHHL4sf@ zTI$d25YWhb(&38I+j%;=7|#}hZek-jEMa@$M~nIuKFt)T$ieew#JHG2$&`D|h0FEl zT^`R!$`C({uZR-%ryhXM!`#Oqg_{k4Kl5 zb=V%@yOb#FU^uJMh=~U&$*np1tv7lUcKla&$K*TOzTx-QnlK-Jlx3m{u}!w`;^`qD z+VK56Be17Kn~%n4@%;6-pdNx?qlLXoheo|TWxGn9uH-gZ{OtZ?2MRx6#U82v`Jw}v zmc|G|vooc+*Nl0(R%ozpqgQ&@iLVo7W%+!e_NOjVyuVLGwdCZr!GJs}zn8XTZTVLk zc{imXPIUsA(*5s}$gfk{+%4D6)c!`b+w{y#8IS;Fk$0P(!o*D1f69HH6>@g4d{q`J zYOwX@@8JlyK#Xhk`n-sCoU$K!tD#V^3FS9u&goy=N|fUsQT*zJzqs={!)F^aFw$IT}h3|aH{U89dOk&&l#Chg$7@f5d+5>u-9 zEh*mQw-`GRlZ?4bAC0X!;+wz5c+O5(f49bqpB(bj>FOrIA_uR!yJS`IGvE{CK(>X8 zXt)8C%%HYd9VWWev6QKt)Q|9!0)s-E722{W+I>X#6&f7PaakF9Yp16W@ zUP#8F+=G??6blrEffjveFAZM~(w@r0I6Lc~ovp8&dL9?y86ns^M-|63;8Eeh;aSWD zzO%NSb;Q7V05hrF7F$T*K}Kqy8?-(z9ro7zUytnIC3lfFcj><*o3vBMs(ZquT`X+8 zss^^@XcvQOB7WptKdu@zI!;s5!{-V8zK91Ty>%tJ0|m(n4=!C__+0{Pp(!yXe{S~L zT01;&zJ4;7zDdIy>MzY20IMDJvOe6ps<1bpWde4UP+^)urPaA--1e?(;#_ZzB|jWF zAQfd~go2pT+2u5y#PF{_=5lDLz%k&GV8@_l`o3a)-+7(Y7M%e*j)ZE{^~0(;AT?J> zJzs0h#4A5S2S~if~E59}GY2v2dPPT@VIs!?^MkV(;@% z&I2=ptz;8^fX`5T3++T`!!W#tc$Y&5x#7VHWP<(qBfl)y#~n`ayKb8E3Xn*ozp6R< zPos%ketAPp?d_#M(_s`B2HPWzL|jzbXOh3AIY@pw%)MFF=9LCq0zEc(pcimod1s9f z)c^=zrP1KZ>S9yj%n4`J#!+gAM6vY!F;_M6ITm4~`NF=b!sMAc8y~~q!Lq%>ke91V zOxk6^Cw<3DZf=biRLgsUE_LP~upj2VacRf5!e zw&M-0GCFdED#TVX=!{F_aSvq^k~mf&cY?AGXC|HKv?=Q8ur)nfm7$=Ht)=mc%Q%$C z*;hYb}xMy1No3$Q1 zAk_|qK4tE)9UHRqdi#*W@lm|9*8B+h-|o?93y=8a^)=q8_oidLWnQ*t&4_dLHtGhn zkj(4ewl7ExOH|HA0|7l9`OJ2pG?-4n+nGEb2S&G2)3-D@|JTYC=A>S@$dhIVnb$rs zmWCg|zD?I7xwjEg?g!2TEm5Lba%dDsnp=uuw^W*f>x0so?KD&$=zv2AMU{64=?m;H zpf#6l4XruZD_ZEsh4FOTt1oi>zAoD)KBPD9b^2lYGpSu;neQKNI%UwxGg}0ixmKHq zfL^_tn(?yr5vB|p^DGZLWhfh4*w$$OsrlYeN`z+Cp)EfLKl2*8`9-*`gDN0l-O?sO z9N{UbB6)!%pY~#$y6w2d1IPT#$$3Tsj16`gCNd`LnqiZ z)U37|Y&B31^Wr*}XrWugOHX_x($df$dtcTbF90&V$48pedR^@jDs%%)SDi?mqk~lz z(q#w05x@&ri4@b9eL)>U?KPewzm@OUobf1X`WA|9WRFCoM8 zx^FlA0o8Kj4r}kZSmu3>*yj9umY?y8;6f~3+jdjZqo|HsfdK(y1G=4S5A!yU#378d z7sW$vzcfaPv66<+(LhkQk6_0*a2rlxhmful;-NzE0oI>ELk(8ZjSH0LqNP*!^I&_Q z1tN=R*ey>;Us~*gi*0C%&Avsu64>xfo<+~)#;C#vjPtF5KBA#8J|T&ApeNc!YS2lF z7vH*5ok5GnmMI#{`sxPs5_h}Aes8a9pQrL# zO$FF88FTfv)A9me*h&XW?-2qrqv5o?*#t<{P3V_DsF z*SfDc>;8!V0!OM1=GpDYke37Nq|q1}$We7>-=6^j61uKS)_wCIJoO9Bca}Zgba7{U z``B)Z&3zCwd0(%mpWQ=oqP$!yWZk>+LqLFiuVzUZ>iV5nYklo4n{%sm5!#0<$=}d~ zt8F~G0zd*`M;Ws+VvrH?hQ4q{2ok1v<<9#q{LY*9q)i6{hhN3v8MCJ@WpjhQHnmo# zes=_$cKdIs>1Il9|sc{ALsF|KDKDi#}ph`$)k>u z8+u|}+H5205*pr*I~tjlnARK!V_x#h`~5PX-pwPF??nL2$0y{@>UQ)gmxC1#o3+a>t7M`9!?7OdLU zn>XyfLalo!HoNTKSi0e`)a&<$;{E;W*7)!78FG+LcYO-v>F9-w4o(0fm+tviR_5K? zPeMEfOFH-V#xf$?3X#vh3h{M3EZv;DdrPOD&T*@IW@C$DIs>LV?z95A-dVD`FdVgM zd}8>-wLE!tBRMu`yO90+n+sj6SWElC(2|mxAZMqKT*^iCSR6|j$A-;0zjzzYpZr8U zaKM3*QF;WJVdj@gBKxE2co7Sj=J!CUv4@h2K3hC1nOo^b->~2GHIrXY0;Z0^#)j9S zjAG0A8Re$qy}1Q+7BIS2NC1ckpb~RqDC-iiF8YMNaJK{&qzo1A3i%;#dYv5;ee$$H zo%BkrDSSC@5#DQhu+49$!DN|k(As)k`aE-uwrkLJq0OB0>8!7JygR1(v+f?}zYRP) z;FDn3c~tzXVBRzl{IK&dlCQK>B+uq(SsI)SEn7I$`|K-*b1!TzY}6l!^lsxoY~}p+ zp;?_+>sGa(3%h62k40YvdY-FgoPL$E;t8o@JU9|tTUOb?mNUuF@R(l zIAF}H*>%38`j=_FMAw5AlX{7}`=+|2vjny&@P?F{k~W=s#{WPyrkxzYp*Bn)r$o_3 zQ+kOD{GJ71^U^+yYU#Kb^0EOi7=1qMe(TJizL`a9sC;^lua{UG5!TC%^UNr+Wzw5% z9tV3~?E>+AZElxcdA)zkVB=UmGh4Tp0I#m;+Dqx0SJoFfJgQ;1S^pBY8x@{X`O}4U zi<{$%b_EsYM+7rNi>*gyQVf7XQizvM`chm=+NMq9{0+zZ^$P9I{o=BF=(Y|PPh`_#S0DYT@DzNaJ( zha0&JVyiYHK5WI2=Iiu=!0Ty{{e0zZ2nq`=&$1f&S9%D9*RA){7eXm=Cx=&L!fWGR zUSWZDjsOvP`fL356fUBCD?y%U$vXSaotUo>is6NT{Ke_IuoELn^M?TZx4I^SI zo2tqZLie-kt7r8RCmw||v#(0WeCneU>O>2jV=TK0&f>9f5-xC|HmoznwxT{JCS17< z*6>Lp3i>VnODV7#p~Az5-XbeBsz^mc(2!C?53%eL$lmwOE?X#%{v51xwAb@M1s9ld z18edJhY8|BTO$L^U6QPo88@WsY2{H7`&~ZZ92$h1UaBhB-rjpG?#FgP3~D8fExk{F z9)4R|p3qfR8rfQ0Ia7K>Fp32BvJApb&f8%1^T4YMw52%xRxB2Ex5#p8rP#1UZYriI z&sbEw}#$;m=58+n*j317?VjVI6`TOgs zcGj$p-8bKk%%O86(!YB3-?N;=!#MdkHVmd## zb}$-0e6s{+#TqLjGOLH2-H9YAxG7nfN&j$JM1lmwbVD>}1ae@CCU~gGB;lOr9*-QO6qvHnUGiG$4iLhCBP^=e~kwrzrtsDVJ?k1JxmvMWsf} z8~D3ZKjz#tbMcIM&JBp0DS&L{MaXq|;zs==}EvS4MET zPZ+~BbqeMF5HPwkhw)T(4KK()VU5!cp>umAJiT9$B8<|+O zBof^KHi3@^0hCEawn5Go28O>BJNqrEdVoX~OiyeGF{sp*dFijjUYvvWLE2E&aE*TB zVyhK*fIPR<_38UV@~L}i1Uf3KYbPkI?;i}^YihdrPp%}7ajgFB?Cs=8a?RUEK)oP| zd+@p(NCHLT)=cpVg!I1RQpnZp|%;A0o+i3xVw%8vFvUB zus;w_(G-uqougv)MK&~>0QfM9(IT-dMDA{%^%2q!)H3rdD@_O8!o4;vMGU;2(x*@y zglvn0%!e>^HC_05W&k7tZ1gML2C!g3%3)Wx%nt?=DR4PY{X}JVdg_VQyDOUA)jMz9 z2vpW*ZOd5D@mM|}Z~&7CkSHG#W-)5KlIKsJRmxSTO1g)!7J&FAk86H+U+|{a)2hsu zskB-C#JjIQTlk6bAnibj+&=4R?IS5F@i}yNUTBgv{njIUw&R#9S zE08jwN`lX+qSaNe#y5Y?!hN~>pMSjSgOC_MyJzpk(mm>avVvi`=v;uLAM-y4J%L6O z;V<9-(dx{I3?n0};H_x=?Tsn&dzAp2FM_Ek>iDp4mwz^Sd85~KYYc^8-z}g26jXGC zQryXm`V|wUUAfMPL-Sdu@Q;DiA;Xj6Wt|(_ zGVSx~zuJ2-M?H;D@!vQu5BJ>c-&cp?Ppl)BUrr96>Fyhjmy@`T>bd-X_fz{W&Be>$ zO1!#61TcA->jnvG6-Rji`RO^M)eu^l|6;OZWn2bUEkLL}04Iq*aQ}`^RR3M>FB}2J z`t9-`H{ba86|~vS=Ba|BUQ+vbB`=5}s^vLXTY@53JwyIBo$YbJ>Tk;*Xte+QfwUX^ zNRYLQM1iRY#nxzBLAy=#yIbzL7^Mlu7oE^|Cs5gb`_0ScCmI>~yfmWU4%CFY*Nyjt zQq)q6!dHe-5-^@XU#|9-wY({I9T0t1{x_O_{wtrK@xPu)^dFUE#MjJ)cfz^ZPFC3k zc=Uz%*_XGR9LnHnKiqS>*V}mSSds0Y^Ko2Is{uICq<@hLC$9v01aA<1$U0y0J3vhV z=zRwO9TfKoSI&u2gVq zj`C`;FtQ?OMAmcNhs!7~HFa`Wn$iE6ch3ir`|m0;$=?5AMaJng@;|P}v=AZy3MK+O z-0*TZQJ5S3Ze9MgjbeJdhATOSr$>F7AmVe4dF6W{+e94BXrJ8Xkn`SdK_hZLlSy$$ zT_oMgu;!3!6f-w1<7AsZBq=V_>>^lIESwepTKC=j4$+0D@?By<`MC)^hiStNroZ{n zNDQP!SK%$le8J#ZG^Ulo%uk7<7bn!#5sj63B0WJc}puooNad0fq>tDmND_?rY^Cyxx)EZ+%_GZ|Ympl9tRJaHU z$kDu5R49O+3%C0n>j*8Pf^zZ__Oc7(G()q|{rCTz6UhHrQwLmfe$`$yku} zPE1Vfxn&$UWkNIw_sEkhf_@pfUPi;7wL(SZ>Q{qUb?h{Q9}BX~S6}DFCGl=_`_GB; ziPH`gQI%?|p+_4)il$kRsn%jz^d`Gtc=ed9`&SnCg?(?3@4p(EoqaY-R0(%hdfaOR z`qTR>C9PkTz$;dG0&8Nj_IJXRpS;3Twy;j`&q0=MmDzla0(CELHN~p$`v@@|*5(y^ zT(g}L$@0dyokj|3^!d?G%iJ{EclQSPYOh`%g|bumYu)bSen0R-c!D=Qfi5$){KZ+P zzZBKn)eg9~a{RV+!6-(|Hy*y8AWnQWm=-dMnCE`Y%Ij1_K&# zm_qq>@f@`sSY50B~j3C zsqm?4q}p?@M@zWYST{IJs=g1lw~5Ii+-bJcMn%+_5uGgjryN>8-s-nSHo*I%AE3iJ&@$-S+`?*j^Z zeRvlKi&ov$;mBhfoS0T-;%P$R<_`zUo`*e%n~q2j+UznHoGR&Bi>Ohh5B(%A+l5Go zV)|f9y%KCa13hpN_gB*XUiX3R8IGMT;C%5lq}{hE_lX_ATBV>}$6KTz_{@Y8sYL#c zpG}%Bm!}+`F99}#M(?;m7m1WDRVL_>5uo?Sa5Ni-#mXwo2KY!>4<*i>?MW}C-&oP) zOjltK0i_hcbs^Nkha8L2ghV(9#rX7l*$@ZO8^tx%bW7d(f>caUe-+(nPR4`IBGH1a z*c>2C?vcjq6g6VFyR0+R>$5t7o5i$7?|30w8h1=%btZVBT&5GDvfh5(g%%ufG}TLzX-bh6mtO>ZM(Wf ze{=(=3CAYXjofyrsmFL#b2cmDu*4XKy9E6N=T4S9I#Dzw zxbYB#x3oy09~0colZ~s2U2JtdROYJ+t1@j%eB05gI``JrYzQ&T9UpT{OMuygvBZv7 zR3EADrxlO$z?3U@F!y9KwK5d@lOVzg{30jWzk6}9-RO;!BfjF3mP`>s7e&s*>XEr_ z2bjo#9Uf?Lpz;lrDn_M*1o66j00EcR24>ely>HSX0lJ)glKte;=(na`)A_b<~teS_( zP*h}HmT`>5A9!bpFVGVqdi|G?4jLR^lp9Sx*SaqMYDt%3{e4>M!>c2M<&NAG2NHEW zMFFT)d~=Q4XD8oEA07m}{3j7*WFaXM6a5qJ1#cH3V)nE1B zZe2N*4wS(EreKhS_+a8YaE3^;H_)Uhk|DtXEvRfON0kX5^l`1f_H3V54UtVb_=7+} zNa&}RKq>urN_V zHIlkI-j(_hmluyx}VjxeR!--2TF?`U|Y6BROB`mKq8D`{3h0Sc^_&&mbzTNL3d9Y9yGBxcD4$Y&0&U*{o%*s|@ZEo~rKQNP;_JBc*HfCeqouR)`di_mRy zo&9EXE$zA>#_QWWogNCdth7brp-0iSL^IFD2<4LiL!SC3s8kTpZ$!Wlau0JHaDAz7 z?ciz*m{;<=B2*%tEVMN&&8&I^gmO=B=OG{I%mPOEOIB&{03cO)s5$L#9SxIT%?wF8 zBQ~#CTo^(vfu<*P$(&=eg*|viz?{M-#Rt z?tXSERhoZ}(U0;8vPUGQ1aB`)0`Umn;X}Mn3U4BMiK{P4@mo*rC*o0mu)9+DA1_Z`c1ad!Zc14-r$^{ zl7H=BZ*{JH-NXAKOu`HBqVg*m!|hJY`t}W&SH3|%u+cNBjo7YSNKjqrycMnSr4itI zWktI4D6pw=E8*B|)z^qecNQ8sDG{Jqi*J*L-^3OCIwVZ!UYAO`+)^|&iRJAsEb@wHU3v6Z!2yDd4P6Z+h7FCWQKfWhaMgDgBAemqoE+RXs)*M!s?(JyJ zav2k&g{)Ch;QFR_bat;BKE+pw8GQsu<(&|6&i-{%RZ87~F=)!%u3j00{lawLlJxD) zFlVQ3b{yrk?omV6O}-1|p|A9tUDJ+ym1yRn_(53|XZA(4pM~D4dcEyK$8NAY><&~2 zj@_ChrEX+hW^tvy->(^&+dn9elZ!eAixCYxi_nyU-FTC!ZT3TQw6v~{6Ks|moG8^? ztzqFP4{JxE8>=5)w1t2O;A@K>SW1G~LeyUM`gs-kqmuJfKG>^CTfjKZ@YTNx)VsqQ zNjY&|2;N`DPJ+d*eD0C>9Vump<3gnqu z58Jb^R-w8d`y4!>cQ(@W(m~%ykO%^{&f=!ce&4$+pbO}Evf=hyX}m)8OEfxG*g43S z5^4RK*ljrYmY#tXevWUN9(a604Azf|@U(`e|)A zQ*{@)a&ERp${r?KNN1fx)eVdZC4Bdi!JRvrZazbrYhiyqVlCXP_lX5715iaHX0Mv- zy&>vtk5(6C&|Lmx-kOPZkpLV0v{0_UoOx~}8okV1aVUA7cz?LRzNYgaG7~d1THMw; zzuqq9o$-ng>n&P-LAc2uFT3z5^G;PkfHyq=?Y<8Ig-HAXb7;AE-c z%S&d9M%n=Y|;l0##kn z9v#^*aj(9?uItvqU45&5-QzO235AvvknH)zbH)k~o4sSOd&Le001R5H)(N;&zP0R< z)sh2;X_+)VmYJKmrVy&l&W7w$!^fjq39t$cb2}(I0u*|Kf|t4J^x5MgR{EE3;%Kn| zO^P60$b6`?PE3o?S3x{;)389k~{p$yCP43cjUBmFP^PB8#TVv?^eL1H`%vP$rj=szEGh@jz^wSlwg}+IT?hKyXp_ z+pu$P7>K8p_B;3hIp+(zZ{iJvO+i+Fzq~h%8$vW6Lf!1ety8Y{gjsM4{+Iu26+U#9 z_;Qw=bVND@6s>=ynEr89{_kQB+kjfC=GNzZ^ZoC7BH90@_0AgNm%64PeP=+mUKVWu zl)};FRH63rbN=5WQ~l2qp&tsGVn9|8>NEHq=2I~ThFLP<>~&OKy@-^Tz2^c3`nG({ z)XS27EF!1Bgr1uwz&u$d)h@QsGVV?Z%e2xv$bf)b<6Z#uBPk9)aB0zHl#%Er!bludRs_n)A3~-}8X>vjk~4L{LJxSxcCWSB)e{nzb}qOz~QE zlh>e21niGxJHG!CO7>pH%>cY$sEnD;ehCcf44p07PMHeH9jN(5An+y!f{PFTScG2H zil8C|Y`-*HXIie3xN|O|Wn)iG*4oKG1iypYMrJx(x_F}(+TDg@+HM3&1uO~l?I7px zK%WNnV7ji^cW@cexC3Gu--kEH8R2BRd z7=-!3@1gn|{)fMg#;LWSk6Pm^--nl$KK=S;$5YNLicyy7s!r<~A}5dXUO@LWA>&c4 zfpJ6CbQzPZIJs<#hxjHs^8WYLA8utPuYRueorZ;*gOTX)M%i^L;59GnxPrTOS&mSHn>WJY<-log zZdIENNeaDV1sD7?!6mMqk@*wAKT5WpBf|!YFJRbWCUyr(jXCjkp09=|aI3~`&_-ez zz$iPipYte!^GuMVqbvU>yI#wanqAdjrii+ER~c_*Mbh@AyO!oII)&A1HT@>?KogqR zVmvxNOsVPI_{9UW-rl$*2e;@Q4X7yN7PLHlj6xJPz^zuL;;v!rP)8AZ`5;QNrZLMKJ z0i&yJ15BTb8Fpl4W_R^vKy#@}<1Ksrq+CUbdTI@~05tr(JLm;y#A?g>77?oP8mJwL zS$Kx8W;C9L9{o<^#>vm671{~x4k)Ja`t#YXATx*YALq}5i&pnwz!F>6E<6dGQ7>!f zW^&{U<1ssY{>Dy_8ZUDP*l8v#53Y`ddK9C&tZW6(#RTE`l*dGAFRWbeojy2sM?Srle+Ej4Ei&>F8k1o(}yPe^H zz}9Nq*oBcNL`G%9m^_C9R(WW?w^!Z^C+0!}TYYTl)-#Q@P*o-sydk|ceuB|KRIgN# zg^Q$>AqbCsGkl@5=7RK%qe-*L=u-Xq|jJ0wpcN~pTt@oAP@0gYw1Nb6zp#v;}x<98wC zPoChx*Z=(?!e_h5IG>1yg?rF$lw@>s*liTrw~z|Zff9C&6Wzj+WPSjun-^05&ESLB zJDN~(Di^TaUT~URuOUHtDJQi2?ep3RDmG+O>O^xq(R?F-i^3o>nCt^*`x!RC$-H^2 zK@9+%FeLor(|Dh2-;4{Vk>uwZ%v5e)q-ZgX1=Zca>(5An~i(sJs7c`*wub4#; zK#QTc2g*mcLnv2`Ufj?0YF%ow! z4{rCW0dS|^yxA-Vis+8JjNwKG&+)dsYR-ZisTP@V-PGeK?xb-L7QFiOC=IH!S8dob z-9{!=PgGD4iYY1OD?QF>b=m-yOL6!PzL<~5bu>(j4mj~59Eu;kr#&M!%2Ba#>nF1Judh2MmmMX!UbK@8|DMp}FiN6SH z&8NRq1H~{ZK-^ygw)y)Vye67Tl>qrGAn=X{gi}fS`Euv<2Y5bU7FYN#3ef0=CDCNe za!m3%5!BKtQI3TdYa=;l+E$uZjgu5mlJjR?xxLywj8$0%c>tiYf&aOA0>J-38T5Yj z51u1+0Ghvq%c9{x>)luAuAPcxaFyXFY6KPDEO1mP-Y)HGQ^2yM^NPqUslr#jL^j6j z;TQlS?zOTH2>f2PtoR%SrhPMK2Q=oPPF(`R8}V#?eg(10Am)z`HA7l_=NO18q%-(3 znP{eG`VBt`YSUxgK$i00dqDStrbR-fxNQpR^j%C7i^^t$3zHuUt>qer?8Z~Z#N+RKlV))G-i_b zV=hD85-MZby58*IWYg|%R$n^~MaXR{Y2E{kiwz}wwMl0VmVHSl700Wh#bfJ=y-yor z#&K_UJV#rNTRcHrs;f}j4$Xl9Q11st$rec2vOIMwtEEq3>fd$}Ug4AH>yq|A@p;|2 zRGHrai8>v?@oM98y~q1SyOyB3?__#`Ga+L3L<-t-{%Fz3kFsG4SU)@&k#rfYTwnvO z7Z~v`;dLJ$NhP}xXX_MC_-(M$(g#r?vk%hQK;(kA-b$}d-(q$VEK|_u;VgLz4)*$> z6WsR=1G@_aSO_mwwEGeNB(D1?6z{}ahOCS2D^%}FKCZF$9+2x~KgC!Og?fg?OPDl} zw*mcoz`pxy6QCjbmX;kp2U@|^zqJQt6TZdEUGKzpLS23`(~L9ZM4`p{_czAr z$!>}P4YZ>e{@TG>zl`@{Au;}@?9(bN;!R%IH4aO(v5@vNE{9J^aDCW)eEXo_7plm zUOVd_v*=l0yfh1Q1&zezmt@IOUfd?g;9i(Ya^ zHV3tGIiYp$Wh!yv4Yi5XmI@6?{Dp?R~1Sp3d_{e5F-_eC+&)ws{N-Tp3Yuj_e;hl6_;j zZ0Z!dfw@HX((y%H;ysXA)_u=Lsjp}R-5Z|xrp%lu9IJa_&!0RT2trBFpo{C24Mh`t z`PMA8V`1X_U^FBGr#;xM*C>YeyBxlwEIL_ga~6J1Np7jxnL4@)=uxf#e`WQgLY8@N z4@OB=v@9Au1?i`DLC9`7lU%Qk{N>-g;ckq6w8Y2F85 zyY_Z;RH2vn^_R`j92ZzZa$(z>Ewm|B$rDQeBd<1wQ(?z(-_h3e*$2|RFaEJczb z|4mn6%m-i7=sY$^TA?eAxk**g`~;N#@ie}iD8sr9ByUR)(+ti2U7jpvvcp~_u}VE5X__)X(B z7F-BY)8GBKCy)N8w8%=mu{ANs_unM1dVSWULl>oY$>){_M&seh|M#NM|E-PA{ww0l z|4p}r|36O}8vj$8?EU|>_rLBgnwKo1A;k+1dB( zbN2i2K0bbHW{O^IRb5r9S9kq8T=A_GDiRSA002Ohkrr150N~*P0BCj~Fa-eU;+^AKgvrL_|k8IO;CgAJpz)!z1y$ zR~!UB!)plW+P>4VYJ#-PYvL;wQ%`e^)0XyV9~5#x?C?weZ;2)1QJ zsHK*FnKE0q^&KG`+7%Ey3oY1 za>p1NgWPcpm3U>*Lj6fjiDV5wio|J3guP^Nee?{CF}E{Z#kBjvL*@y$$BBUv!=;pe z&0wdp-dObfVcOL5mFM~U(9k{M_q(kiF!3Hl z;OeHDGG_AffR_*&2!IN;1i(TlD9DEh@&N!~5<&n7kY6mwM?4GqpLgNmvta&7L$m+N zD54@JBLn$WF>x?8wQ;ntbt*8sp@391W2vg?q$&S~*TmME(deD6u_>dQwcW2G0Dd=K z2x)EVWJK;}{lUhO*G=HXKXULw=wHc9FUbFq#mP$Gg{HhBxtOhkDLE%2Gb8g0K_qf= za(;(*X1vPclK)#A@}Iy93nwQ#UM412S64aNb$ALHVqt)cxQMD7)IkP9`cxNoPw0~8am#g%a$cZcK!jgbz*jo8u808O>=Pai z15g@UQ5ZupF2*U(HR%O;%k>iM)A@YU&}NJ0%<;fqoo)ohdN zF==)j0P(+HUC7XgM_;=lDK|wD{w}YyC@VP>)_=dkgkUWQ!i2UzOUXi03IDf?gr(ie z4_5lWUTfG;F$7@>=4n0uy*7+v#8GC9Vwq>LqjR(9SrKyHHvA`#E_Rfg{ZB@1e_6M2P7_}RWpxjIRm8xJ+!p5oitp*yyz9x?T0eg}W z_P!O>r_T5kuD$G@3WIbR`3vg32n8yPj~&)5+}~(sy$)pT>vi%PK!20*1O^%+q7E5D z{J#{@ABihAm{~?EHt}0Nf294TI=7<(N10a;X#Ms6yLyuQH%XA6F8);VS{}*ZbA7km zhOiOyL_ON}0Cz4M0E_q{Xmivb?6AHiUnRfP+(2!QmDvh}y7=a`c0G@o+4e z)(3ZE3xyAym9TCKu?##mQhzxbYS8xjRn6~z@HTkuR)UMXey}FcJ4%|*2~?R)!8f?A z=BTI^=oSHAMP@{|nAtqK%E9}xjnhAFL<(Z6AE1+71nRc=fU8fnV;K|JG+YW>W?5k* z?kcH^Gmgyq(?y6l%GMd0%+W~sU&`T?J4KA=E4-5bJnfp2%geUAy?eGlIalz>Oo2Tm zwW$pwnV3=66N?BJI-3Cc5!+C4t(o zBe&U6km*_8C*^8Q5yG3H`vfYPgfLD_^F1(KmGk*>M+~jZI8_3VBTZj2U+9W+U%j-X5JU&`Ii7O~46+`gcNyfOZR9NgaEh@W?xC?BZQ=60I!z`Dv6>)_@u zH5s{(3&jXa`Q7B+hLFQJK+3&lFPYlTQCd1$K>OHoTiY*45*&~D1%J<%h;NOc4i!dz>VQbj&#(#(|UvwfL* zALiZsgK~9_y?bA`#pU(jc^tJB&F;k+assDb`=dFyamZooLTjS#whF(y-l!cP)^~Ha zxSWCVuO2mFKG2>n$H@XsSAA_w*Pps87P6H5OfJRr(WcMMn6Qp3Pp;^eHVJ0GMu37Y zPgA1zYu)heZy7N)PkPP_Jp(G;{a=#MKPdpnX4Sr_vy%qrG&$|A#qV9TGlllm8N{-K z2vpW8+u5Fbl!SW1#YzFU|+RQSf9-rRNtcpp! zo9M1DN}Q{7FDBu4qsu7_xs>sPU1Ncwy9@QX4;=hbB^~8291V@Zuzs$7Ef~Oy@kR+x zEBu@izPW=OXQV8{Gtc6*ldM?1Tz9)-Ft|y|aO>C?D0*8Dr@zg`jw7v`WQ*X-P^40b zp_*7l!PB4_j5=bRYP_aPh;~_mZ{yKePcoAAum4bcxjQSO6?1SF( z!z-y+lIyflKJFD4CP8YTx2fso7pt0);=vcq`(HdyEt~q^*ptqbsK~!uHdu-}k^anH z)Z^i+?RHy}ItWB1S@vjYTbVWV>#_)?Movk?B z&+;A~XE^&(UR!-$41HhoVMcQ?6rGfVtBL<}Z|$d$4In)zpoxWwBa_3V zm1NA?=tjE8zRJGp!$O`&2{5=?m3eKoc>(S^&-VwG$t3~N)EDdih z82^>NG_SL}Iuh>iam8&#F73v^lh_ooNhm#&PFY^wn@^p&F`iH%;qH#9y+5bYqOQdm zhqgy=;v2^+)h2+n(Jpn7?&vKk$*bHeodCLx*VJ6?TId{9Ia#^ zW0Ki6ms^!5llIe+v$==UeH?|hP;|eLEHVGvNe#d3*TRc^TbG0dcL(l6W3%hB7FMe* zQLWNt!UI~llsC%R#+JL2H5qxxC~W~AUI$J3?r9zJI>v6*`L!v4*y&SwmtSm)Rf#0v2}cfcy@w|n#GgI7D-VMgonzGZu^ z=t8}w1Ick$q?uR6c_Rf?WxnCqwA06i?atBk+}FFe;VSE+9h5Of0HI#skMnB-zCrsO zWwhwv<&)J$x;5<9P$cJkQjoYJu1ic`E1EP9ro335qq@MiYE zT2^%@Flfa=A)wFBZVSC!1~LXg!=2BmbreXUffA2f$ySwO4xfKk>eW8hs5BN= z&KM-(P#&NpHODnn;SW@e)Uclu(0_OM;lXxXW`YF$tf#zhof>oOM&QK!U=O}49nXH$ zIddsb-OLWDd2>kVF=4ZO#1V@LRD1}fl76{x+``1KPHA~|E#{L{W5sEeuzJluEwSA> zl*SbeO=c~MiJlvpT&!W+cV=p}5kZvjA!j{|g!m^I!8nm-zLv}OCk+!_ZhS7n*VPw= z^dU-Fay^*=q4cj5pTLHdnz(Jtu{%ue=bH|DKOGSz7~j=BwTtWmcQ3~0&E7wE#7&f* zH?cb3(V5^>K348mfYMEXK6FiO2Yn<$qlW(b?@qoe-3qKR?W=U)T zNqd1(9ropfbsOffZxD@32y4Ci;$DHB+cgJ?Cz~ACK|# z%Bn;I_|mJ#=fNna9(x=-EAagpW1ClOo?Fsw{=?LAh^vPymn0UI#nV~Uaa8vF5vVO5 z;R=+nD%RbP{W-Sq{pJ0^%eRoJ!oS`-UdRA4JKWT}o7|zD6;JW_BiAy3#|RWW2$}r$ zvZRXQ%n36Z#7CT~hhDd5Q-(1Cb__D{tPP>A-y!C_ev2=wbWP{lT$m~7Ey?=T0fZ)o z(VR-7%Cxu9mzZYy&{UA?c=`m)U#l?bk$+sHj_8XJf+2B8k68WKQRSW^j_5pT8z;7$ z{7PezXDflV|6L3e8qX*U)kOS&%w+s9(q0Gl!_6snfca9+kBHt!bXCq`wmrIQj^df3 z^F<$ejU^RAtGYTB667VvRn?s<2C3zhdUL5a>kk>c&f25^70-*AT9ZOM#=GNb-)>=} zf%b#pjQNgQzG6y}3n$_bqYT@5b=|b50<<7EU!TKL#+X*rx0Y_tb6LC@yq8wop;4~J z%?PF%+q~B`eq$?KWy_PU8%dJKArB7qHtnR_sLKNz^a(uM4!uE;U;}OeSW-xqb9wtNPYc zy`Xdf&tY8!O<(Ct1fHq5)oHfbZo3kME7I}HxeODD=PVun6>rRCYA@Fg+{+d?w5b`6 zRD&{po{Xm~v?JSDp;lTqDR`^8#B5hSU9?YrsE9$j&`Xm3adi*C?@_KYENiM)AuDIyz zeVWJU&v=l57iX7St$AO9jO4e%20jMN1X9WpDx;U8;kYtE(JnCE)So~Qv z*y)n!+S8WkXj9+zSs`JkW(zOM(CsK#{pbZ-tVyw#0gphfUQ7aMH~K=eJ*wo!Y%_7s zS0p$7bvJUun66Rpsix=VCvKPhvoc4r_6k)?{?SqnhK?%N5e3shh2zz)KCK!L}XMq_&YUOsm2@7{CexX$@o>zQ}ofRFCq}M%*yhsB}Afub_A^s3Yl9Ay) zWN1LZV+JRtd>EMW8rd@9SkH&!6ioU8N)JV?w+LI_#BT6R=I>S z9v;w;O=R#rjk#W2LT}!J<%vEyBjYnnUqMR-8F0f3*!)f3FH)l@~3fAS=n2p&7>jcDZ)XuAEtii1Q(R(sx1m< zTYb4oum$^cKQG4fEZg}oP>5A%71*)^`qk))DU*?9`jn~DTl3`bnP2`S;bl(DJLdYOH>i#1rbF<1j0F3+*647D@@5zgZ_tFZ*VgkmQ{lA#QRdJvf>% z3Ug3`u6CJX`S0Q;(Wv`FOCv4YogK#r@I$=#l*Lpt*j6nL?>GEL5QwEV^u6`b&+oTv zW@a9UP~D$wVq@Q3+;nV_;y9Z<3M#Zd8Gjx8D5Qk-ZM8&eowMNBY7!BJ!EV{@ZZ3t4 zl89N^m-~pqe}A9r+iOx@A=;MZr(SM?oW+^OtMPkfYizv3$Y<~3VPgc-4TZq({oGbp z`(QChLPf#=NH%zQ~NeK+up%5T)G3mkt z>$~Rr+?39}RL>Sv>>#Td0oHasg!3asv8DYNRR={GQE!kdEYcis`?*&MR(KG>0H3VV z;F4y>b;;G&(!h6c{nxc42y}L1@F5%`%sDIesE_Mp-hLS90!!XMyfCkh`RD@S(dvdi z(&JPB5smWqip`vByC9DS%1QL3oKkHU zRwT&0$z+LDq?g34PG|R0-?d2NS;P@r>V}wDwx`T>Mq)NqMC{32J^Fc9gOJCo`O_Cc zYyBh?n0e@A8<#KH^1cN3$voGJIb4>L(?uQ|C-yRlyv{)}LvxzuS$WUZ&rJmu1uqd1 zE}oYLDVs4EmebgI$~X$zxmjHv>bfb{KI%t~RPPz@iL$imn0(UOtffa;T~4!gP3b23 z3_JeRFe7h4H3$}-*qYl?RsxUIqrxyJPt&R9SXf1(B z6}dE@6<;n1bUrbk0V?_uiz)j?cwgYZjiHvq#pW(B6dL8Z!3vu7S0b1H%s#%e8u}(# z?bQrHE+AOlT7pWc$Hl&`hwxVGWu8Z%&((=oJ=NI+l}FaiARB(F)@$HH#~;MLl&kU_g%{Bs@f zNQq2a$KHtatej$UU&_0UeYL@8C1CZ(BO{0E@~3Pzu`ms8t49{s3Z-~%%t#>0BhO3kMezFablt@Iop*orcn*B#Rg} zz=-$F*Cy3MOBG`Gt#el_65`mpZ6XQeAK5HN8&@2Grc2vkvIK>;`CO@}iLWpwLys`W z_PvEjV$e6$s*eOD-6b(=kI$D$xC1l2<{)qC$F?WMlcS^81o01_ zQfD-=&J9A7A6uMr1u+x7GMY_Cft7Bn4s4aJw~4VxB-JLeS;yiVuyyBo<=z4d)g5~0 zt=BcrVJWyT6NI|tbLQ#%NPoNn140HnCa~-qvIznzr%Jorg>6~C(|8Ol`35%Y7SDLK zYw1b8PM&54P-ELj0-4_BpFN%gy?ux0xY=(MnL+OWT@NRg^P$9sE-ULpfbc5Jh>Za> zB>+bm!!3M`CiESh{N(!2R0RKW&LcI#lH`$U?@j{vQG_TWi$WwLw=S`)k9GQ31>`5o z%Zxufl+-Z5bO8TriK+L=Bw*;I`Ag6@%zCxrmwR<&j#%9-NQkBWQG!4RvUwE-`VOsiE*w zRL_^CzJ96ZmN@}xCP#}VD>K3Q_BBbs6OWB#DrQg^m~%n^f41vvnP08{rS}t{BhW@O zpc!-e<9LmVf}@^ckaW*m>kLInqZ)gr8Bb!^(Nx*%ANPv+DU{11y z_j}%0l4uLG^uO+U%FaO9>gI7Kxf@-xJrt_q3uKv*TH*77I zxnhcd6Q4v{+flx+hv+eG>E(g( z&!a-4FanxYpId3oxrW)9F@!~sz>@D^o5u!hz}- z^f|yS1bbfhG1hrDfu9TBMykj5RsMKonnuJn+9u)4z_w|7>G5NM_yi4D?h_^6Nr$u_ z7k;g?z}5_>q+v+1lTQ(O?3opywWx#|n=#);;pn{agY8iJGwb&dBfvPXxuXdIAoU|V zilG6}6E*{>4AmguA>=4drFHK{?h`5{jy8~jJ0XPgGdXJyRyhicV&!UxQSvP@2LlSD zD$wU3>s>`Q#RBu9tkE?-+xtiD!qY;B+PQ zFw|)DR-6-n_2L#HZK|lW^+K@&9bgp~%9PgU0JBL@!En&yVpoa7roCxK2ji$mR z*mO$MM2ciZs!7Bd*a+3DpjmxLX%WY7s-N7f;@lRxt28D-|lV&A`F?PwZu* z&XpM@pL76Ye(s5!3^y_Dqw8W-4TGLL0?>}FuAKuxs%h)6J)2khJ>Gq(=@TH6uyQZj z*1dZZtxgP_Aw+h@+&b}|$FU~)CX`?v$xhYcQ$P3m3MN?>Z?{5bn+I%e4@7Vmr3WLp z=}X++ZQ~cCcYe48sbY6}9Xs{BydFsC!B+%!(TrL*g}3A`);(n-X`vGHWQD)mCnjZ_ zUh>mPZ$`mVA3>?*mwOi8>4@v5H%88efo!Nr%lFV3IKD)r%-Tfr?XN)4Ozy|7T~cJW zbSCWl2M8@fT(>AIaj*!|b)u6W4ldD=9HoCmY-YH=;y%m%687oo{3h!$cZnKxH*oeD zI1s1{tvE<=0Zg}GHSG_38J|Ep|I9M4B?gO^)f*~o>0{{YhaGh^+$%9fsoGecLnr_M z3$MNiRzt8GCcta{a*D}nQM!dEJWCP9|J^~`5k_;HN+*Q=0*6p_%s@06l^Mk_PgtLW zlblMW!2v3Br${+pxv+SG=!I*({Z6(1a%<+qqID>HY&tLw_x*(?1=JnIhNqt}6AlQw zv-7kjFN8xI0Rz|H*5}kL<%*m3%Vq=n{gfyTU}B_^|(1Y zdNMC3w>-uFg2uWUXeWq-H(Z{bS1VI&?v&K#QBLU~&FTC#D$I~dEW<zJ05tFlx`E>C1a*S|~F1Mr(z50vx@aq+Sz!RN9GOzSAVlHvI zSA5lq4R9FqNMz})J&@QGmBuv}q_l{D0zDT0hCAmmOe4g9{|*M}RSE~nn~8T{u0rRu zA66uGU)vmeyM1U9Fg(pQq%Ng_wWr;&k0tx=yr6QN=CR8vV!-a&Qo@L?4x)zu8z;Ao2CX!uj?W7f?`FonMt=9t0~^y z*NX3SZn>ECTl8wJjR}#oTuE$8zzFaGaj@GXwNOmRU8~nA)q*8omzHu8-BqlX( zB#wU%LWi?$yZ$zJr9(Pq0@5vbI%{e-Mwi)j7Qh?F_s1!N)h|eR0>dEi`}sUl_c{W? zjpTN|8}e^>JOh6oHWx}K`N)CZFJJ|)n$?EM zlj_p6lUXn+uAqK$f*gxs5U|WaP^iE6P$HuojG@w-9guy1@!*3xZL}|N-@IkznDVDZ zgg}4;eCIrY{Zn?HDEDGKWFKHJC8jFpZrBiqFHf@c8`0-374Jaq<)yblqx2W|H`cjX zK{M>43Z19m5HN~m5A)jaML12vun7BjH-_iY)KfkRwN zH_pD?Z?$9oE8(WMc(%U>`V?z76tOz#Hk4_Fk|7DudQ#X4l(@|#=TvuOen?@7h(oQI zM^O@_mX(`!t$|3e2lFbiPWZM#Xmq?r413{cHuK4#lTWoXB^@)%LwA2!M$=(Q#aBLL zf3_Bj-8v{*$d^nj|t@_bcRXEVomjezFMTU@c+ zoUgd*MY9amAAkE87`x=~jYMOqevFc=RY+Pqc4%=s(tlM!F@zsEg)gPbY=GgGVB`?VYQ6yTN}bWFEcXk?ac2f!-H@Erxq7T@Rf_yO1j%*S))He_sF1IPV;HhHf9z?K z`-fq+JMu+A16hm{Q9&pdb}n`A#DMdx8%u#p&=IQ#Je8kLs6JJ_y9i75(GuHl7n`vr zXs>$N{Atxluf+`aTniFC@ac43H@BYQ=iy4v0N=+(ROKQEiKzI zL+M94eg1Z>_u#we{@3v|1*nPyCT*#VdRSC9KRib%<%tng%mg2##bU3G<~8b!$%Z-8 zIBi%~BUp>imdn6&h|I4qWl{YbKM3xOItRO@@A+bG35S z(mW4)1LdC$fL8DrR3LdS*6OCq*hz5OvjBETV?_n{8Nw=v=Lu&&5cymwfZ?ujd*2Q5 z1uXbvus17fCNL>(S7|wt2`bi97R6wg$BiD*7j)`9X|x$xpFeimuz==aLzs4aw?_0C zf*Wyx8rRmt{@=0iLZ>x9jG_lmKTDT_cL1)XvA6$K) z7lKa2f-w=*>Crb0o}ayQO--XYCYrnnuudu{kwYM%8%ndn6d?#eO>)c;WU`DKkt;mS zsKBIZoxwn;PKpkI@ehqKw5DN$0(Xv9?j$W~Gpqc*T-1XP-!1LEzXvEUgj85gHRtjd zXzgmehqj=qsu`Cyv0a6a&KvLL0_-lHvssQ;w<4ykzh(bW*WshF+lvguB5AvMAXaV@ zL5(hQ(J$DC!Rs;QzE5i+c zFE+a%RrN@B0n^xxNsKqE^orVv$Bn3jR|s>eXY0PVle5ag(Zo0UVg>RU)cKbb(94Gi z>?7bW1L|oGRC1lY0c*Vc$0ym8W&|ua!vY@8j$~v?r;~kbm3s3V6N{H+P-G`y)EG3lW;Lw*KC5 zWiCorTl!6-@|XE+j9UBei?>iG{2_`WK)7aQFj|MAUtWIPx2adiYi}v{pgPrG6p|-- zxfSl5E1_Q>MX^9_`dUSKa)*&09H|c>BKWt$+&|wI(swJamNvghVvg^Ff;VOF+$u4s zQ%XL=b;=VKO#I-s6~&|c%?WW`tId5v9eV(@pB)rRD}+@kx@%2(#m|e^3EjDwK5@dW zIH=Rnvvo`;>7;%n zgSO6kI7m-)`SeM%-~)ZM6e!P8`XgO-U$miP7!UF7M|$n*Fcy@o4CBvEvq=0cuE$Hc zBXd?mbkG}8^>(WPTBu7)9wZww!fmO0bb&b%$X0^EFi@E@GKn9pWmD-bZzxxpm!dna z%1h_pxaFmkW{xsv-NQXt>j&jWrWV`eByIF(Ob`$+3~)~mrPY2N%V`WDmo;r7u_|yg z2v9;bQaF9|Qz4baByWSyD{CErj*;|F0Iz*l zjX8X6WT%*AZ;l0V<)SOYn@f!&P7?<*q_f$V(!Bh>F$C+vZjz`3lmSnI!Z``9z{k=L--P3DmE zk`5sD8ChD~xIB7%gG;xM8PWUtb>@d0t`dNRUg)rOo?NKw;QNO-1r<17X=OF{X?3`p zwf1-fPmZYJlj9zD)#Y4t9VHeN20fA!v8}PMjOk;{ESM}PhWBA4Y1dCaY2RE)m_$4= zs0eKeZ(nskF!wR6^Ja1zHGZmlUOJlN_*fULhyTekFoeUjpNVa#;FS{&K#=*SJ&tYt zTIh}YvjPHw)m=1MMnLkdRfmZJKZC&~$hZ-KnhguMT$r2u+2Txx07ePE0~{NDA7i=z zv(j?d>MF9!Cy~hPj;zmcg0Xgtr4GvV@Qo^3zZT@0MaSx=M)dlKe%ug64+e-vb=-rN zj4_%uwM}a1M}5O=kuAT1=X#vYrqVcx$YkoViRhoKB- zd=}d3*sV^(9&%*8sT6{R>j2w=bmOIlRLoAO)AfKta7gt2=P5TcuiGhn(*tyD)3=5W z@^sW+j!&Y(x4#H4N5fFHr{&6BU$MTQRT!=!m#h)Iom`g;M?lDtCw^j$(Vu3H6XtIb zKJv|~4Bu_@o<~b;u8W`nND?Shwdu7w1uHq3_kI2Wb_8|9k|E|eBd>0BoX&ID)eX2{;o+1g`S4Kge#l!(g>%*2G`{KXuoWOxr+qwN{sd*f9v+ z_iwFD(}~cs@WBY(^d(XxUSX!Tc)HJqVc@Wb@bv-+lm1?6KzrX2y;fOsH*5xeBIKS>(McWHi3$jtEL&HG;3 zvpxiv4Y{FGfWO#9({bA_frw(tsML~hl1irn0P-?R+DU-Yb2=&s)lTQf8)rCUbK`3H zzFQ6qz!?A{m@L}w7aV9==?XQ9g?k@PG4S$hJkISE*5HXIX+VtG}s z)DE-w^6B81i6KJZzLh3Sz#;nAPZNZu2-P^BOT#ZIzK2CeNXp$Vw z3y*NLIPspu)=+9*5#|D4)ZS3+1$Xs8a8MJ`I~8*-7-oq5K4~cM3wPc(AxTcCOx{-w zDW7%eQ@u);?xzz$^MPXJ*Nw%g(nj;?6~pszp3P(|kGgKqU&7xwd*PSzk3ASg&$|h) zr`y&@nBDzMBaP;|;zspcds~kZ>C-c8z1e!7Yx^~`Cx)`YFm`!ntK07D%sM2lb#PCM zZHpKTAY)$gscRZc7aVcJo}-9ffl&x5WG9ItotmuJckDLlY)G|`tJzcMst`LATnvw~ z*2*g&+a9V}u^H4O;XW4ZIZ8#n78vwCS{%fp0XEXSaPW0e zK7RHswEltcqLI&Iqh2e}#7)`+zw>@c782~177A`(L<}{@z`i!(p5GZh6o7hcKSq_* zU2**p0~F@*!6%!%slH_A!G=Y5s>kmPTPX`&7d;s4mfuVBc=$b zhwG{MlSai>m(G3hGW&Z~0B^Y3trggN)}h+pg9bm77toLaf9f;#ypR4t?R?lz*GyV5 zN(ttuSx-?Itm!YPNv~R;)m_PGfj@Ln(Ua!JF!NPQkE}##XamD&I#>oXwM1 zKGY&j`reMInd_kz)GZ@!?dJC(t`$8Ffgv?;pyqWG_JHf_5l_#kch`)8YXqQbe>xOq z?s~Xxzz)8$g({-7!H~)tUNw)?q;RfKC(r5de7N%xtCf)-%x71gR54mZfoCI<_MV8> zb=e`z!9iyH-{(U(V#Tc#9Whyw657P{EX`*05Ac5M*2Ji879L`NRi_siBZdy3j) zU+y(x4>axvamET}@OTePJL^b&y zTvnQJr|sQsM@;{Cj}5bioh~@TylIz0)`2r*`N)C@H zvEbW*Lh?2MB9&o*l+1+?g^)kB~|NU z`%EH-YmEj2Qy)io_W;>`n3zFWu80dm`#-nPfYD}71aPCRzE-?qly>MdaCC>@VIcY| z5{AiDTsq^0zDRIWMkzvZg?yLsz#13_?;M(Fn}I(;BKa$3d{U9J^pa}Bi^@KMV&X9q zF)2(|I(}%hRa278E74+@eaF@Z5qpY7=j>{RuVM)6jW=hSCxT{^hzbJ7hsOI35b2G% zFPzBQdm02!U$qEIF|&iSFjH2pUwX!zJnIKfkPlwoTPOEt209&#t7lq);@bQ8vegLE95{Yq;C`_8T_JqAV(kHe z9C*6su_58d|BmJwEdH{r-Q^Ie`FFm@$!f>;Ddsj1y*{Xjc@i*bx8GZ6kQQ*E&0EeZ_-@ z`SC*SiQ9>t|M#_?Kl2bq22H|(AwV(*LBRc{DF04(A^pnu>aEJb0aZ(l$+o*m`Ty-b zc^S6!N9OE>MrS!JYFV5a^yD5X=v5lOW7t+=B}|{X9MRbMJ4*kZ8~vMg`)mcJ``Y_` zr3!=JcRBu`F`)e+0L1=JZ04ex1IAckUm)1Jc2~-moyY2`JN7!h|G?Jy@-tq9hd}uX zupQw4RsV&fFnZ%DLw^Ac-)8b#N}_f3zMZ8*?81UR3F~%+T1NEZ%6S373kH+bUD862 z-L{+*t~*>)caY+`LT&$F_Wmc96~bh_!)64DfQKrhZasOO?Q+b1=)gLqKL#TC13Ld> z_N2lmR@1P{&ZdCn(vt*Z0biP5kV#*Dbt3qO(~|EBf3WzM*i#HKSOQtPkoHQgKm6KX zLh??eeqjLDwT!;}OI!Y$AP<2+`!K`|=NSGpnt^oZ|b3lt03I|FZ3e zc!>Hwxv9_6{tYu>6u-<7Lz7ePuk7VFK|M+PLB^Z^)dBY3Fw=zrVa6E9kVgGyh5t8M z`cesl{e&`cufqR^8RIYrGc2$vL;nFu`_B&f31MX!wSz{G?#2Ix85VJf?;wC4F#Bs& z`&aOEI|zauUWqji_I~l-neiuhAq*Bq@i(pd8!cOjg&<8&da61Ff8QAScdL&RMu7lG z$^UpAUieQm#yTw4-iaS9qIkkLG)A=uzr`PfU_cZ!e)&SS2zfUX<&ht3&KF3+ zr-}W=aMAy)Sr~_~W%}3+E@GsPQ=)M%8>G__se|I7NvJUnXOM}h{;L9oZNe&lWg)jr zfTdYOL!%xhfX5IFgR~`MU_bNEvjF~sB>tBfjC4RJaIt|@k<1(ghU7KIgLqZY-`Ev{ z|GdAY;Chr%I~l>Bw!Hr&pMN$>feq2ecbvLn|5*b6$Z8)0u~z$|q%*(2^h|#wlD~tv zo}j@Ht3Q(dWq7|6l#i>+Xz>H7j1nf1A0qxnVRBLdVu%uyBgMR|E87+8dETipf;z!t zhJ67NmCws939l5$igY)9`abcc7V&Lge{$HGv@FuSgqr*AQJOB`DEV?hL+hn#8J(l~ zU<#z){f|@MWZm`NCeg*uZFQYuSK3UypKJ1reK+t)jELQaYInTwyulXlZ@fMLNL8PU zg|C>->uQU(-ro9n=24g`fZ=mB1dvH&%y6=tSxS25cFrN+Zr{&n?Aui-C-HL3L^Gr=DTd_kO1+$R#uX2v4L+ff8#=P8q4@93Y-9VVeMjpm9n1YhUj5 zri!&D%U&;=rZxd^aXbKS(i+Lbf`w7bX(nW@~(={ zrN!#an92C%ZR#7XqDoULt;%;~`z`obG$`Na$YC+N7)_o&hoDzd1gSIVF#;)IJ1*Lu zpV-d-v~==Ue7F8OP$5zlr(ZVp)hmwu>G*tP0WsL2FC0D9+AZqx?xov0l^#E2=bDXF z@~)Czn95?O>^wA_kDn%&jjyaq7G$*5eZVCkrTddC|Ggm$EXX_u!dLKFEt4Rvl}4FE z^WB*0srk$RF6atJyyriOCqh ztOatMD38y>KF>3ciAX=pubW(}lh`tVBd>?W6{<1`5|@!fubpBJ38xNhlIU@g)w z$A22u9uJn<>^fE13bQwOsaG5H>(mf3%rM{3#=~+_hxKlG#}&)*f_Ty3^BxaiZ+4Z* zJtL3D%|{YG7g1L8VY*x7t;FVA2f4G2?9i3SON0@azwnm+P15B4twGBTk@t}mdKtX< zd}!-i(ZmVD=Ll49Nd!0v(_16)8Ku8X78SupRTM{}E~7%8xd>d9i_wq9APBTS(L^fd zc0mu4fy%0OFoD|2OeV)&{6P)gS3s0T{$^-*>7oIf%DRz#_^c(f!TnrFtI9#IzDcRT zi(aO<|LBIQM-8*pbYfYO!smY48seowH--~cB5;`w;?28REvM_Fe*v@@)Jt^V!lT^H zmYp@x`xHaYsZch1oA%1IKRMyZ)+H&u!NKjqv5*KB{tCpvS9^FySBE@OZAJepP6?mq zFXy~ru>bG}kz|KXRJ<4lTrvslmepju{DBh%a`>gzl`L3cwyvUp?bgF^9+mpoV- z6mF?ExcwH2z zr*Q3LZ8Ag15EPU@p*l$!EpAAS7Sn4k7b?sQ%Vk1|xM@-}9O*=|z}W5#cM11b?+H|z zwVcY2b)^Ik6lr)H6Pfi9!nP;6`k+4|Yz-tyF~04@g+jm>+^sL za29mSLj=r(q6&ckhzM%m3fnkBP z>{{=xR5>%xuT~Ut9suH)+xL5yLu5|J_42C1wWhSedYvwcI6v$_vq&);sH=m=#t-pz zih;&IvUf@duQ_~y-SPGtA(wGF27^jB4J29Uw3Vf$-yha$=BxQZO{ND443brdz*^WE z(}1j%S{#FB#I}_302L9UC9=&*Vn{AeOdJ-=`&V~8lz^8+&NXi=T-KF>+5z_>vBCbh zF`36*q0(UN{XpbTLi?kgkt7I?&rc?qpa}ST~w1xCikEJ zwkPWR`37ryvS%pwvo-}^sq!ZH{_{)H$w*gno7-b;GN+AfDJ>PggNStvaWTs1o5%&q z&k@VInt&TUVLz5=U$xthTk1*&hh}@wO(T)zgg==Vrp8eRq{)XCmGF{Vw}xZXTc2)p z2O>Tu(R7`MVlylEf;bime6XJh8u@lV_#5_We3dKiT7MTyOw>JqPBmf|BjH3b-OQmq zaBqXP6muOdbXayqK?0x#Zv9hXg$<#c*zrcuuZ%>pVgXZG_)crDF$F1=e zlH9lhIk0W7!WiB`&6DviPL$V*F3PY`QvM(Lxc%sKNrSW&wk(%(iVghs(_d5B_sIBD13PmRWSoz)J#B`Uh`0~3oh#Y3=b0iw*+#Dw@!^_Z^RnX1VW%0 z<$T#lA;~0GT^`AzlG+yp*M}FXcDIrk+Smtg-MN^15guOJZhb_1=B=eOr2BR~G_N9z zQC2OKwQa?}vf9=Q6D*jOhh_M*3Vx99Rk=;>CLvZZ5iEka*V`i@1qaQ zvpnU`3b9&M*VU27t^yE(VNT``&rC%_A?vg}GT?AZlzslSi z8XPU&WWizdw{AULAL2UiWR8U?K?(0K)y?H~1VgMo-(APyRN6-5-L{muWg;Tjb>9%3 zEjaLnK%5*iRzf}zOu1!_>!;*e$V+d%{u4#6ZvVBpj!j=IOY+DfpKQp@$ zf7V*Ma=wH-cJP$f9qisG6a~k}5+#`z>G*+_WoT(G6cwo-2>S-l_c@E#HIh;a^Y;$0 z-;uEOzWdYc9)q*&7U1FD;8A-^>{Sk5{H^x8{pljD1IrFaXQCr*8yXS6b|+MB289N~ zK2a|U+xcn6m3#HZm(fu+i-)0v+f<{=<*3Qh=PA*BF4ul+6#au2H`pBMhRBQ4RJUG} zCG$5qmvw6%lnXX`QQ<;!=_jU0mz&5gkf%1jYl0_`*tDpbeS`54=vyhb8Z|+q7=x?e zK76qdKQC}7T9AI}(x%J!HaA$`7rIWLz6qrU;rach zgO3k%XJS~D+K58kL=s?h8au4Q+=NBO8y10N!%|G#QhC6t44fg_$49p(T z#nr7lCh^RUW4JwU%Y_|Uou)ohIx#L*dLQbJmygNpW!WF6MPkY6N zxL)na+XXkGnpT^BTe9Yb7x@V$G@dJyq-?#_pK-scAeuvCj)(HPt)YD8LP-F&HCqTc zpSO|3R03kx>$TpfY$+P<9UmyAqsOHga5S2T{xSQScS zEGw7A@>R*xv=gkI2=5fhy-v-*f{vgBJ3VN{jpI#_ib$k)`~GUy0aj(V>w3@8&3GO6B{I0)5R=lb zn^>jZwW7dM{9a#ZisgY8Ocva7AR^i?eYY!g^0g3}r*M^wqbsDK*j?&AM$J z`r+}Hia2uAcMx)eoz!4qW(e@hZy;_KyK2NYr&~B6D;&}UaWmRW)tACU+xcrb!^ z6kCYe^xfU7K0#yEJPT~lV!56qCDmg@0@>tH^!Dl^hd9i(I_16HyfD;Q8uw|0Dj11l zGjyME%SiAMbMpKctnr+*;5uOkV$tG=;DcJq&@@Bx#3PoeSIr*Hb$!bo&}f7& z^iC~5XPBWtPEcvy#Iz<#FOERC311kvYF#-N&CzUoB(Sh_bKNpseW{A7s zrx&XI7XvDz9bB_@c_v&;QX9f7?K)HMzxW(HfX#y%Q$Z+RZ;1le&QWfq+aB=XQtooX z91EmDh+|VZ?2~{!WCsrLX#BQD;+Dt*+#2tLM&ZD^mBfaxtlxPHRR$9>bqFVX&$3r4 zDCLAJe_po`#i1~=y<#!r`mFz4Q#AL9*>|<_mJKjn-qd#Je}%&@9u8+}L5HFIw*BLS zcr-@@v8}=nfY^vkm5uUNz$J*=6=ha`IC`}yaBTbP0R9xyv4KkFfIAu`dPmuEHE;My zUfH;FpMU3rb>!{hwGB%eq!2{QYE}^)f?Xn&CVgKNQSZ+nECz{0mQ*tMae+K64I4y7 zY)+=lFbTrd$CDId?aQRH=3V+osx;^2?(wlmx*%Kk4ZAQqd+-J<(U+@|fk)5m?xDEi zD@IS^KA}@WBI6W6dvx^HLi=ajO4A911a23vDKXWJLk2&|Mq8sCpun$|`g>jkZ`i~l zJe%mh#kK)%?DFigEymk3ScqoK=Zz2DrF_1nF~8LNI#*vqk^-nW= z0=KHQO=9&8#nD|w8^@!_UbH)|aldmj*|40g#LW?S#fq6rK$&?79Z{d8V0MeI?{ZqJ@8@6;LrOW{~LppZfy`R=&qvjP;;FA*V z>_DGen`P~@?x>E_^$0!5`;(MFt}lhsR_L{Tv2>H&ePMhP^Yn@PR!Ah zt7Q!zmUp+#cf#Vg1_(o(>Dn(}uJ%3k`V&~AmC9Pn@UrG}kyN+23DC64(s-M0;V<6m z4Mq#rnj-6HHm&+h0%mX2rgkVLpFLHzUjzwIZVQ+@8vdi7{&F(@GfWgv2lYExdtJ!V zaMIC5!D*cwbqOvTiEyLW0y)FI>pQc-mLt^*S7&v*F8)1?`EvdK#AnA8)g12V0z9Pz ze080u&BdE^SGuni)$n4FKaYovY>wZD89|41S-fkKD-Qc2EV{J&J%OuDHg%&|STN^t z$i>v93`^1|bCb9#s6XASD|<}L0tj9WYynbb{lV;gSuE}8e!_U{IT}Q582IQFh}ixR zPfO1J4o@}c=hf%w?jeuembqY3_xbgSP;bKCUqn=H7R`@%EX8y|I}CQBC;~KL5r}Y0 zz9Q}B3)f?9lulK2CJPT%31^!aP1KNlAldK__0A*jcd&1dR%tceaknzU0%N*3#QN#h zF{=&r&%L+pXQfY<;m=IJS8F9us-}g4O=*+a0&|Rg#$Ex-qYz{ETP<_$#p96B4TCp7*`rTA=+*Vv#3uEr8?E?A zV&5Usz}-UnUE@w$BSKPJtmP(g5>eTzy2zH(b-rCtLtbbWbt2|G-P_pQPOztw!A=c6 zZ~{F^jr9XXSK435h;APzM`4n5uKECv8iv*Z3*$_39I>nGfi+3`om!fCg%hpubm4(E zwj}X?2c&ct%?L<(CQ(Qo|vF{wLJ*q>64uWg|!N$edqRC5RfU`X$v<@V|NxKx9MJ+iyBnu>m^ZvyKz z7o1$@(Ca-&@u1k-aZP# z#{|Vtzb!spci&eSN3F|CGovX;6D~$LM?C0q zLMJ5RPeE90FbGxYpz%H++A?7A#ehrnZLu)3c2az!O&u0bE4sK=h&(JDzaF%ockNhR zZ4eW4$evShglxI|pVs@&SJd`Tst|Qb@V5aP5b)hL-4VNy@n-@IcJ8yfdAefHrm-e7 zCFU0h0#=8ZPg_B`O*j(k z2)aJlr@HQhnc6?zJ5F~9EnMX2-OZP=_PPrjrnsnLIA2=b4lKz<5c|GRDW2NY&4){! zFl%1IWw6*!%0)r8yJ#}b66bYspSBn`FS(f!DX8oW<6PIbMF5uJ$1M8gYb{iPV$i9W zhkm5EE`c?JYkuH8vU=J_PMGCuCX`+umRqRK{cnOE8N*^O=2k@97vI*o?yrVgchu?+ zwF$|D%E#c_LTw*@tHTbRur>W~Ip(fGQbv+|@W-9A8`M_hvcAjLo-2L+a$yC!b-e*6 zaT#wjpiAUf+L_o|6c$Ae&kbWo=tYT1HD40qn=mu=HY%GU`Y;?hRX$o@#t`ko&{S3t zkScb1IR5zE;nwGmo_QQR`S@1U3tkr)#1;cIr<14_tuhd|297y6ifB%N&^S-@@GgRH zUD^5jW>Xr!Lr1rv82(c{hGQKOuQgBSqYy`yuGr^(vy`3T6td+F2c3&=q?0!VUkr78 z&-Jf=^E>^n)*DXlI1fZA(VQz)XaD60#HUHlCdJHwvRC-D&neUHY5WWT(W2$VZ3YNV zhNhB13vt;owRlA1fR?tf#;_CB#HXo76>a&{ph9w2Aew z`u0}Imi~w#{#WKBKndyw*xfG;8Q9#+$LnkT=IKz29*C3;GnVTR+^3PstcWX@e$J#d z)uj3XaPP#&|1HVv^-UB^6dW5ico>srn$8>tW;w457p{{T8CasrX5Fz)V;|xwa^{Yx z{^Gj)dvd%R90Jf7#-jIq<|Nu;$r9lQw^XmStx;>hcl#x&n>n(_Z(WOx362fE?B`h? z{jz%4Qio#V@lu&e2J^HnT~74x!~E~p=b`PlK7E5nDsOm0n?wdkC(Gr{0N$oB;kH@; z4>%*?)it%z^VQ~sLZ9K**1%My`wAw2CH2IhPv%!Za;d{95Y_1rg(D*8QUKH0fi&2h z_AtRNCxbbfj$QHzp#`ESC`gbu#n%5p)(G6mZ9uyZ>q(qW&l{8eShwCPAl$7U$*d$< z@ReU@K<-rj_SwCT)kHbs3)NUZnap8Qt2xLe?h0gEUhP;Zk&OhoYWBeED{ha<3H{sp zjyR|X7ra=$U3O)=7I7(kxwOixJJ%@F{e4A27s>T?uLntqHsE`BwxQph#)=Pa!9TSm z9TELu&>c}ZoUih?h7+c;K4iY3eNm%oayX8$ALBO?g_|x^R{|j8AMj=^VgSTCd@RY* z%ZnJLY&_*u?nn&a;z^#r{YjzS=}EmI7K$IY-!>8ju*<_~zm`hYnyS1)#SNMOhqV9q zPg=dsyS3q~M!#tqj6D*MvvxEbPSfTmhr?Ka5wtW-1e#L3*IiWr3jHICRhv{OL(?eX z5-z9aX{_%j-_a;kryo7vdOmSHU?rSCx=!p+IGo_;mgRy!*Xf;0;((^gDjg>fro@(N zffyh__0^jnPm^BCX9tk$z0^Qi8q>Ya0xr&F7x_Jq<^eJTP1zDri>A1X75IFv(xR^X zqxeIUJw??mMwF&q z<#1w!T<BbPA2yUBET$iPu~2uL z4kYq~6$-pyA#V6X6HG(jxpBDcBtNm)g<{O1k+JZBmJ&kfO{|6zfm6LWh;=4c^}_V(>430HIV7g3^8cJKN@5R<`FW(3x#| zi6RBuu8pT%+ouyab-m;l9bB$IEi~#ksf2NbPZxZ_{d8+mcEvih_XZJ9rAj{*KFTk? zl4LC;Vj#*O-s#ClKJ!bQVUB)Lt}Kp$Y%<4ob%?!rftAk4K?w7gN9X&Cce+0W7Pcfl z>fbzB8E{tXauPazs4biCD~!R@OA2jvY%=`1P-U35pjmsQQeEQPi|xY&L?5C@l3rqC zxvbMwcOACdy}GE`yyXNfT31wX`PJsAg1D{wjH+CJDi9`HNi$jwr#8q@$`BYV9?sU( zF}Z7j_Zb5r7ZfQ!#QmepHJJ!UZ1?~YI0|1hC@YmCYA_SH@-HsK6>iy{yvUS9wd5Rs z%#*(EoL(*>S~G*!7mw9W7d?i;diBn&!UwZu=DDZ0&y&&~cjvSTOuJ$On#5tD)blz# z02vP#_31fgP9r*v3MEGQGcV=-WKIka@Q6q`utLcK!?^v%ezJQ?-ts;j>#1V7_+yCQ zCXqE^Fz!b=je}{?Rzi+W(}|*6T4sk1D5L56(DUX#IREEIWSlIp8+Nmu9||Gw#I%4>&_%?zSeqNyw(D3Rl3ex(n;n&O7f4iFi+1g3LHX1X z7o#l>C39v0{$dC9WRY@_X`#~iX5Sv1FL>qPeR$|A9>5@pSzl6|87a&6vpv$OPxvR$ z%F~!jT}LdFG1$c&dWZ~m?kjeb4-OWnp&aU@clm4;%xPoru}8lKDOiBR_AKE2%xzL7 z*L5NH#;35OfE++g`v}NF>2E8ed81$fhfWLFV;{2ua?jWW zABf=*q?rnQpGuzPjTk0j8}d*)-H&*Z%&Ff$-o%&zBGs6WXg}>uztv6np6Ly_r@dGz z26Pfa*hIw)eJ#ql3IE<`Beh?N(#zosurv#xc{B8;iA;ebN-cu3YA3;-xEZBkL6YyA zP~@6!22PU)@*f-}PpGFLu)w*Up*Yp=b!L=iYlo!5tpqs*zJ))u*B)iWP?^q>cFx7f z!FX=NjLcmfyFaJ!NTr9L-rBJ@WFQ=Aq#-dHnaF`*|8yWRVSP7Nb#0PlmHp&};gKHh z+zb1znHQ(nw$Js5>_bO|HU@{si#H%b8_lLztk*7jV%2V(E>M-R-bOGsfxli9dxnI| zga#;M0E_Wk)3vd?h}*d_6`?YO6dVG1R{lle44xVxXb%r+0xk%vgmF{;xxK-AU5zJC z9zOhVj?5yN39t78_DUK0`)<&7R`1HjatOpMoK^^g{g2%~b*a7BG)Lit7c;s(ZkMFNviWnoJ$f0>q;(i&E@!jYIz|axwkrxz zl3YRUv8*PPQGswYemf?q&!s7kgCzh7d;-i4CXipQJDbY8L;EKt^NV&r1EqfG^)Skt z=czG15?zlZ>XOx$`E2yW+@3&fYR4LFH>Y$Fw0nd0vD}xAzGIg+4-j3@P(8oQq6NP_ zB3rlvtUBq&?DB_7r>WXAwl))$QXWB3L<})eIxyf@bx(Vld!+i{TRS9F8o2^%P9QHb zpuQw#-QZqADmJ9Oz^iFcXY&;oZu3GnK2;lpg=n zh<%xsj8slH%4z=Qi8y)!@+Y{u$ z3BTI*Z=Lhi?Gn?uGB2>29O>nu|OL2bE!e1=7xx{^oP7nkEq2Xf!#`1Ys2+i}3_Pj)BdYB2^wh)-={Eai@0CVwH>90mA3 zQIgK?s~y<7a_riNv#7Hqs&_lsfyf^GKkD!M99?4qf*}|oIFP_k4j=L2pm6EYF&H*2 z=p0N=duYu+>HzeU2;B?&soUWRzNh*U3_1;wTGQqCYe)|h)E;P@XUF5=LKH3+%*7F!|V}%h@AftUTTv#~s}o#%c2hTYf^kx^JvA zVa3p0sB$Uuv(K4qHc?S$x1hcsOKT#o-|u1$t_6f`(OmTfBy$U1Zm`#|LSaQ2+NifL z?`Soq?eI;bRBUEd1mPBbZBmhcJxR_`_^meHx-#CHGc6uC^j+lzNHo1NIhzvjl?m@6 zAZc%3baDWjM4YQ8lXM$-Yj#96*Mq3`eqP$)a7LnJ`P24rLKXe#(_4xlsi`{L1U|PF zhw$3NY`y+?LD8q1EZfuRp?6GY5_2~OM0aq9%k%HrU(U379R~ZR!rG0VNx;V*ZS?Ar z;yXF|d@c_+)RMVfG#Xu(yRP8Pp3XMy7fY{j-5r#>z*reH8(A_+gKP}P3UWZ^xs0P) z&$hUJuJf5W9+8n)uQZNKVSMpSRXht@mBFZd&WY6&7j=k>9VpF@^q6f*&XZp; zH@bPPwI+LlliCnPUzsiCqwmC#-w)aa*I>y5T05W8x z`dI)}sn^mZFqq-vue9)-VAQ5wxBAg;t3-Ju7~**or3qVD?B=lwUg0o(MVB&IvFv4L z^3($|qylVdqn?28SzwSKU(w0^Xpf&|*|f2}9L%e;MZU_95=0RFsO)TeLT8s&N(MAt zV5(}bRXrUw@Q6Q`?84F?pA(*@w$|_{A6*(z>k1ehv0}BqnxlpiAru%NML>y*n?0># zh6$gcof`~Wdp3RW+h)`O4p}J;ldDSnc@Me8>L=8yZT}4)83d9iYN6L|M?fPHMjF3W zK%Us4#ETkGoVg~7q{SUTuy{|1yb)EEId`-j zr5^k&+F|2vX1SQ?UAX6uNXXa?WWd#KCi>ndIEg#x#+)1YxdY~U4|DWby>>7m=j;ce zQV$c1_7E&UwXnvm|8(jxu*IL3n1m4hjOrNca4@e@+5A~x9kY6 zB4CQnSK2k@X;Et)cov96qnlb!6@@YCW?h`#LPV=CvJKAHiPaXj1q)Q)RCI?=+d{E0 zWbFg>4}$ru=yy5(ZAbXm*6`|RFQa>w8jm})$PmC}n@YxTiQI==yvd z*W2~nf_aKZ!5e)`qmW(R#6D;~=Nrmm`V`1S`3(|!F-GOf8$qYAO_>Li!sa_;$T>DphO9WWQ@#nvf+E0R%-RnXK=5zsrAbzRZ;f*)l!d2~iEMfVpfXV(ZU_ z6HP5Bc)r&H7W=HJwYkR)4e_0oxJP4kEv6JWq2s|CBhimPOQSmc*!I zGarY^{##yNn+;DUB-fl>s91GM*@rxBV%#w(jBaI#;1wZGi>coN@K4X{XCIt9%&c|O}vzrsb@*x~^$Vjyw zYf}`%jO$8Rbd$e8 zQBVA7I$ThgHD{cWM*aZYX!?pZs&ydO1mSQ#b;n}0x`(yNZYUjso!xt3U5zux__|N+ zlUMR3r|f^(Up(Jpj21uufwi0l;rIWDUt2=G&j;Ir#t@^vZ zsAw*<+W8kyotgUN=?~?WQ1&&~`G1b4_WqL73-#ICyE8m{Akz6eeu2}-cNWa-A*(=E zM2_8z`rw#yh2`{dW~c&d>+M0Kuk6Cuv~o|5fHSh4wqKgJ>Y0cn6;~Xt=?>zY zez5PveCD;@%Ks%$4(&Zx{5`KA!drpa?(n)meT6l6Z@GDOONmA?JN>+QZ=|&vH z_Z@HJLjMaWW9(~-G&dT!xBekZ$h7|hXyo=;y)R%mfE7Xu&w;Js!q?zk!n3vdpN!%E zs>%WbpT!R7Jp?ndsw~I@NtN+g+%&rGOGZh7DxJ##vbE7_d9a<@i>jpj z8L8f@eT5d!s#|KhTF?4m-_QFW3rsyx&;#0$R{T{@x@3w#PgBg=%bJw>s0u@_fW9ay z%pKGUc~LTpd8a#^RyfVC=j^G-wJNSOLc#C4M;#SMWCyMVRW9k#>e@izGb=ZrzwdX}W;etZHQQfHupb~N2KF;j8)|R0AdW}9EtSQ1R?dVo zdN#AoMDHMbe`n3yNBc{Nqh(w)%@@i}ek zN%>bj>U<MRbO;mNgy#v-^hj=Rk$lGl9U(GwMQ%!4@o$Qby?3Z0PjM1!0N zm@0(JD0gqz2r?L>CVw;b?66oJ&f{c*r_UP<@l&Y)QBcDT8&(ZLvDIzP5z6GTRdz<44gn|TDFnU6hEO!bu}{t4b)1z z{>1$J(B~`*RD7&`FWk2K#_3X3TD?yMHs)crf}q2-DJ2`QKnF{c>FNxOncu@ogk`3I z2Reib;2Ega6|jMc{ov&Sj}u?kxYO%+@;`-=%sTpZPLH*m6^l(&y8JdAM*FdF<#t~zmG*dC;|qch>8xTczH>Q86}=hLUbij#M1^m)!8o{#ASo4bVKBo1ZP=SALtF;_Qi5 z$HK2*W5-i@qd&R@dz+%q;!(OFG9j8H0!EP#J_&mzkqA47H3?^I1KsAN06&C>K5S4r z+n3AbrWlv_>>QnR_^Xw$5J;3U8>jd=@-n zJ$vC!Gv+FP%+}ql_0F6dJ^dZg)hAuZkp0evy>!FA$>J2;OZ$V0GN(w-ZXw8gR6_XLlkq&0KpmjGil?N}|U-x~6 zizrS50WsZh3&P}y2>EI;`UD?o6TTht8Ig+D1tCaV;#;6&G0IQVoe!x_?MeL1ZzmeO zv`L$5v&A=3z)6{bUT!<}9B~mwx~^(itgy}wC^UrXFB{CpP(P^rt4qazwVp(Ox>BW? z>T+R+!)YHNI=mIx(sU~o7v_Se3!&U!L{q_cI`_%cEgZ-u^`ExAP%zpG8qb!#H{&;^ z0Wy;@X&%^SJ4gh~3UBnoqKs@_7^j~8l~4Vj7hdtimJToR@f--Z|5%9bLhC?_MZnt4 z(?Gg6>-;?FA;+LnVOt|T!NQ*}$ml*O{@TA+(KS06XDs2+ozvsQyV_!ICi@BZsw>VI zb+3>neN?KA0VYYqVyRNNm54{JaHd%2=H4pK{^E2Pv=yhVyL}&*7PHajZZhj~c|5lr zXT^U9Ps0}VH-Sw%j-V_qIcgm60D39hKs;|&EA;{N^~Z3`puZQ-tP?>^=E~3l>BI;iy6`CG zvoPo=0X9cm`UwGE#1AN|iOA@&146XFdA+U@PY$&*g_)hZPb*yq-Y?YHl=C1Ur7uH+D3dyA=G4CnYa8rEQtdQ-sxg@xu7qLWrIU!FM5-Y))=HmuJZ)9i+YMc* zv|Lq4VAGF%x*2lZU##*`EW7@ef6e(x=v-_KYi~ zJe(tr_A=;DKHf2Ju;?X$#<%(}9zpugi!zhHQ+gHhEdlHA=$+F>K`*n)AtnbBq{E5t z(bS2L@Zas5HA=9brd|-xu1yi91n}X#xkcvx#R33>uSe55*JdY92Dk!nrVuDG^x9xg zT-M0|Yfhnc9|ZIVZt9;#El(wCxZH{a5;{2gAfWkcPHrxYP>JV@TWgGjhwR4*XwX!( z& zIzW$`n$tse-9vF6pnzS#7QNm{id_E!YNAE8?DqDspZ~Qfu|lbk(O1C@55v(Y8g#?{ zcS=uLqU|p&usN$AYoL36O=Zi_fkrf_Jwnpnht1`GSU4TajevOf8r){K`z;;yeh(;# zo7}Dn3%(#ig9g%lti4{p&~pa@^^3NDQ58uAIp9eJVhm6SdP7ArYLg~#Y*-|48RfDV zy(G?=?FRmR+x$PTtkZc7ef7DrglDn))ZDLbbsku!zUvIU*($nzP{SR`>9&jm!*ecR z+kStE1=SP43y%R3e-}+6{2&*^pxH`VUqXt|9+k}LraZaNhfdkt^a@L6OY*HL$$PdU z{15&v7`&s@`fI2O9}ptE-F*EzCed~*fOj&hauWZyjpe^egntKkvLcrE3gD-J5&qv; zFu%QDfeeBveg8Xxg4m1Wm0^PKwX6NdRLy_aCKJ=Ya&54touvOhXaDj_0NE>N0nPFM z-Qs@_-hVwoabOh5LxX&JyP3k$TG7#)w)ucz*YOfqRIIQ>CH$+_`!E0AjvB!3=k>(- z_ADCMjP7W>q704ZFQ>lr8T>nO?7su9z-LInbKhQcMfFntr%(K!PYWo%mY*nDFNgle znfK3Ic?t*Sl)K~Pyxw0P`2Rej0d?Q|*RplV(53(Ajr=qE<4N?&?m_vCnh9ykX6 zJzDm+Yv4%(8n?6Tih=)e@Bi~pRHaw?M+|=5|E^&p2IqgJG!dn;umJ^0|GM!%U1UlE z)zA5noA&?ntNzDOuKT|dkpACf{PSoM|Nmk#isR29HT-if3QWVPHQB2J&`Q$%kJ&+s z$U0UZPEJ~au< zmMu`fcC~MjNqC=w0d%A{=8;#b)En8dfhyH(qX%f~T8lNV`|piFMtEr^Sq!W{E_aS6 z8^EX)ziy8nb4LZ>xh^nWXzr@cF$qK2 zIxh_C{?P<F)xPHgs?VwRIMCyX1L&Y%R$T(p zH;cFs02Yw^E=(4%<->*(U(yyRdQD~u40_WXnSWrX^!;xA`(*sh4(ueqZ}0U_f%jZJ z+eumBz5#NoRO;l#r~9yGBSiA8NS@VjEctS2TFLFfK60jr z{ReJ_&;)MB`E&XP?l!6(A`rPP+ggj5vrGaaTR4*(s0Ylyq5ifB3P8OMDi|u(>-pB+ z^7hW+Mre+H(e2gvGZQ!)sM;ObzEtzos z+R#2(<#ObXEKE7v`W@lhY^v`SrXi-Y`Yty|aaa3O#>SbQ3Q{fpC3zS9X?^_w(}3LY z1fMczppt@?!XXvk`s4_@stkaj3{8#AFl`!+rABG`J)8I1Y!A>}r@pe z@ZSpYRv{7r=#WU(!l&iZ%cy*ClCbxkhrEzJV#a`&(Np@xrcnipaO4xK`rAxoz!6T^6?m{r?nwkD%%bKO_@#mCU=3WCxwFCUjd9Hn-L`|2&u6Qk=vq>I@lq@0s4 ztXrKwrI__6`72HLja&Fo5=`<^M?ErSm8y{VfQ3i0*(U=?g0Ke8bpa?uz|%uWQtOsd z;D@6%bpz=%+OlTlHP*-74Bg>tKSc?d#Fh_aS6nL%x@CI9(Ha`=+X-^VoB*aI828-& zmkw70&?MO|YrMca`;SNt$dK!zB0d-gV zEFHJnv096~4_7h{Iure;}S8W2gX{1$0l9{~jn;W+_`W4CUB#4t4GYiB+xR z4tgPf?gJ^e{Q(;->kJmT=F74%08!v+6rG>DKCJUC)bzZPzwLw*1s8z^cCy^_l>tr0 zvLS3R9|1rxE+Zo7<$W)mbpQAGfB?h>P|vd}ME_4ysh%CeADPhV7%X+@#CpvwLA~n6 zxMQ-I3#o?-D$v%EsjJC#sg^iL-}*X!v`A7?1OfQ6Ky0$cc1%XmEAUASr?;kQKn&3- zUs|FHdv%h79!efwc`(+aKhzJm6AVy{$>qm>Ww`D9^d`{`_1)RrT|p$h`uDFbzHYb! zU<>u`RX8vq7EGG~*5wN2mQM-><_cs$)XLxY!M_p@!otx6L%fJ4C-uwco8*SdBz;&E ze6(U%{RUQqb!UNz-#~Yg1iLe)hd6kJazK4^H_%kNhbjWD)i{fOYoOP0KAH2GO!MY` zR3AgY7h>@j7fiT>#|Yz4x~+c(n1V=|PZ`XO_*U_9#(yLt^V62(dNDyk$GyArDiV~Q`8(w_HE+CkzE5W(1RL5RDr zTfP7bDu>-cwFyIk!kW4owxk=a{fhNF$)UE7ppR7!1=bi#g8RrcU)ZQ?#{KL_0&g%+ zfn--{gY9{w?I}hyCXA#z_M7C%9UmYiAs7x6GXk4>(J%)o^u@b=GVYloqDU1o1m^uV zs)f?jCFO(ZB9a+nn4F>gm6o<&!Sl5g{hspm;@~n8RT^aIve;dIN`$(qBCKjk7PclV zo`Pfm*v!7}Tql4U?RNQI?tveGE`Nw`8=mQF)wE+maCh9}2OT9DyZ~zcY~vcqS8Wy0 zo{N_q`l`+JUg2UbxyOL8`R-#r1QCrJK%-iA+y_qh;TQ$O{m7Y8T}SRnnlCW-Mj{?w z6%fpNXW}14oKuMW+>I3QNpLg7>Rt@^Cf~fX#VNx<;EkZ32j$B~75NXhC zVEH}|h$uB7GOha^E~ytPyNI-&-1;PhTwaEo>Pg6OWdH}e z_%GNX_)J)04)<@BE?)zzuZWbsjZWAyp=Q&1YZ2{zkuR|W>jKMVH#UB^^jQQ)Yt=wz z4x-Fbd?WMcFe9IvKtig_7ctJ9^jvo@Ah))Hcz!OpbW&=?ZLI~;&bsoqcAFp5)jw<3 z7CpC!)7_Ik-b(gh*hGo`@S7Qd_yEetkNB+J0-!#W&%y0vrHmQ7Az;5ve-eG4>BrFa zIyWN4iWI|pzrO)GKsSYG&tq1~eIX?wg;6LG)&KZGb3+gv-0NPPc6#S|r!)Yr%GD{AzMY{kBBGAp$G@w|xwJm=izpL7|%pF9hEJ4w6l zUp+fiBq(d62~7Z(1nb1QWP}(7P20zm%54$O&d-zhvGA4+iu1uNkcj&Ba%)6?NA(jA zC8IfLMCe)I+4BtQ8fh^44Kp(_FRC&2H+b3*)L8D8w2kv)ceVeIy|0R@W9j!qgF6Iw z2ol^~fZ+gp%lwV7X|=)f#0%x%rE~1PBP|L)vKWszY>hB#(mmq77*wMxzqy17xb?2IWbKCJ zzAo2adR%k`vbC7Uv4_*VKJx8X6Tt5UOD9!*{&szGky4WrX|VV)0$SZ=@9aZf{*$e` zo)K&nzEJgJ(#tHb%brArA5A*j1aeajqN;^*nUw5A-in#UkD)RtDE{9z#9klL>LSGy z!qKFcDG~1_qQb35kR-$;JYC|wmnf(Z?-kOI4v_{Xtc=g#JP-x#%KFqo5#S&?DX zom+yx$twqU2IeapsxH&#{Y;2Bgw&Kh!Ah?rJtT_j6oJqJF{~L3-8Cb%C4AxeMr_RMV&@_hFY?n{wq5ef%w# zF+A|%0qwONcAuT`7j+tgOfK%Kgvq7IVh1#>J7)+(KI+gBUr#I4;O*0g6F#-BxwB~J zF^ET{qONa8sg~@9s#fj5;FFePP?UN`>^p5|-zow^pu+wtS<-+Ti6{aTEU5t3Bbs2Q zsQ+X>gdhyi^uGzIzvs$&Ub*Fd5}L+~YPw=`=}^!p3-m>UKA8lyCwttfhfw=2b>2KN z>lA7$(1V9-38!Md79-HT@(xe=2%nd!H2O5)Et~ZCgikli7VItRw+kss%2iN?N703` zW4mL9ms9(h6ZUK~Pq*{Q4SDQ>C0ILcG54F(i?c~=1r)1%j4yUsg^NJ4K}iN;h`~)L zX-R7I5wLI@b|WN+JAM%ny)7%ilzD9;7wqzroMm&V5rO5Z{u6_Mv(>6?#Nrgs$-Z8* zabEh2rDFE%pdLoykZSpZPx`}5+zJDNd9o6c3^!XBB*f@L<=sW4ytqKDbpb;LcT_i86+fhxmHGRuB3JUUb^Quo?X~t7$&qnfOrArvJj+#wEev`L-E-2XCAgI5CG@8wdT{B-r%Y8ici#BZd;6I7rs)>CbYVNC!15;;;W+)1K&;EF<@OdQlG)p5g zMH213GG@Vy_PK^%9v-}@fuV)wuCIc_}dDO{SIQq@Wdj7L}=6ymxA1Nf7+<$a{tecoE1%Vv5(TUmGBiP}Dx zGpBQ8<4ZA36tE|``4-c(DUaOH)BfPDhvrW5tzgpj1|@KVWled$?WJ$*0^D4!n8s{Y zx3Tfm8dJ51+E*_ zu~os8Kgyh<>q$=ZDVgRRds|3cHF?~q&azBSP&Qf~Owiy^RDy!UnvM!s`}j39O>S=- z%3?jDic7ss%za@M<4oWNe(x(uPySUv;lZ|fEt!vo+yD8=bcZ9SZnQk8pe52lMSXUD zI{@yDAo|)p65)(|!iZ~isSK*r37~q1LL;GO>$dGhYEgl*ie;NX!qRd7JF4qY$F&yAH2^s7ZJBhLpvr z5nXp(LT$(wEbF`dHf!tpnm#yl*q-)sQZS{ysC82o`(|8+5;x6j2PO78`XV-J{^V8^J}x;oMRddKZaWW2Y$v!9*9EkD3XK0|DRIW{}0Y6`BS;v z!07i00jOEgGG%3rc$m^m>DxihWF@rHalq#OAEeA_2pIW`jq!o^YX3Ep{O|Yw#|g=|K5DQ&$S$ccou)Y z{tsC2uVW23BS4|7F@i(<7p?MlqzfzsfGtB!%p2?d4S4&PJA}vtI393sIMlyh@4pS6UqAj|b-k0+4uISFu(Bk!{nQm0rfc1YwF%>EK3g#a zfaMz!CUXdX3^(78lpQ+(AA1HedeD5o**a+dRZ~Isi$$8Ra*ETaG%WaX_APE&#InZW zR62#-vmzylS63&*ah#Rc|YNld^1mRJOEEt8D9o6o~2)oPpF?HsG~Az!WD%kFYe#kJ7@l16Qv6i|bmdP5w{Si}!_%W)Dm zGHSgvJ++Ec$lw7$eZ&W=d~@BMQGp5fz!kdRaiP;xp9jy5^EW$0#DV( zEhg)dsZwS7cst`x?=YPq`3xSy@uLPlA=vMaCYj%_-JOboDz>%qUNhdGw{&U^Gs5o< z8P5%8b`FkmW`2m?li3_>)6aTB@cWO$GG3_GSuaxpjOCX&Gm-}5BUYg9Aybg=7ENqp zqAi6NYJ){j?z*< zxsDVV%5_Is@;Dwx+F2fgPl8a1rui|GOkk8%RIc?p_CT^&GPA!%_yN>&f@EW{wC{5; z{R2?U-u3>(8sc?gMIu#J>E8ag&=2O?n@8jZ4^G-jA5g*-PM~Vrv3sjLB`7q`-fx4x zdFrMAri?hzgE$(6tD|z!6-v#a(_o#~=$#9P6@v{&xgM5Gpe-=Jfx{uZ;G{SJ@c7X@ zr**;z7A~iY0A_K0?o2@v7ox@rMOd4UOp36#1b{q@?`b3D<>I;y+z^W~yjx-Vn9*De3NVQJ&#!nVDAt$^d#}h8yHe*5k)&^;nWm0Dnv=i z-=oZ?kj^5_&>n|z=9?3VmWEo{#$2VcQPGPgk;L{-dX*36x>vgcRBAZ9MUPi_Oc4O0 zrgbc4I%PCRD?AIN&SxpCAD39R(hRmgz#=H~=+1o~ybr;KLMG%R+ZoA7a@d=|8*K-m zPq&}5*jb*IiJUHbrmj^EaP}GcJs!|!uw)7(Ky{Ub*|Zi4Np`VQJqRXbjqVPBzt(Oi z93c{pL0_@%oBG81{>NGEHmK13IO@Y{<^zjEKXpx5y(W7sKu+*D_u2l|41!c^9&$e2 zoptoQ-;bdP-jb9|=EnxVPM8hZDV?a1(NnYDd2H^;mp+^;$RKeXCpLQ3w8FX8f3flT zQildn@nCn+S1?Ty@+a6al?i(;-bAe$ObV-c8{Jf~8NmTik-jB zPO4)sl2V%A0D%>KbvpY@tavq)cMl+3D(dhe*)(yYN=1#rdv?gUZBUt9E8dw)?Vxcg z@o5}boMzAH`JnFadj_EvGN1dCXaKUo>k;TxqtlA;9e}nH_Y*`W=ArEletek@LD5K| z_JfI!PvVhJTtUcbFBc0S`r=#Se0GS%Y?Dha*SkaqAt@4)T<2zX0fHhL+PK2q; z17UH}*-(O0cZe%pzhl<-n?dy31>7u|PfYOYqr6KPFNgKELD~?tkPYpdoDkf3C`W?t znL3ydg)`uaL@yRk-#zfWVnJ%>1ni{=g1vGqY1q{9QR3iKl9A7AY@aOD4*Q&QVi2eb zDpIr^@o71~a0p<(KFRf154Gc!G;FH%#U960CBJi?Y;#@tV;xG~z zB!fr~Vs~Ri7qk5AMfO^eeJH@H>RZ%#zwYmAJ)xFYV3pbdBOBn(Vd9WNHz~Etb{lTXq@TmUSL8H!>;Zx+*8Pfx^3|`@$>oT=fyF9@F^& z8Z~CEdjxfdQx77pmCIUfs~UAoN>Bh3H&zz4OCCa~>e3LNa<#n~sFumT;=*pO#{St_ z1w}a28(nDOg?puX#S8Fzr?PQ8%xFMC5V(&SF1-HWL_#ULTx6bNxjn40rL@vgJc|(c zYbD(A1p9qf(|F|R#LcG(l4I~XhlNs>jnoUnY|8C>eo}YMs}(I0EFv27+$jrMk(M2j z3B+T%XaAhH3>(rIoElR8?dWU*1OMXC>q^d(IYhqEw#h5mz2*8EDaC-**91?8&S{cE zpwy;1ZilrHa{}^1K}*X_Q;UT>0`3|FGF(mXlR6EQUho!Y#SKP44H(+KlgR(O_RH5r z1Po;XwkP9$)s*g z+pIB23PKf-Jgb0>lR0$oShpnZNuxQviHxX4BVt=q_n{RjqhX|9PP%uoRr0`-Q&Iq6 zg>i@*Cgu}`kG8i@I+|VUHT@Qznv`Pj0 zRIN}_^se-)z}RWy0;<8uf|Z~r!AR%|XqTifwK?)mIRI{uaL5;4@;6#IvnFAEQLiE7 z`L%eba*AYAez>V3)(TPS^NE`&iIP5XTZKx|Kv_RD;gzSda-Ge4;-@86DXiab&qED` zVbW6onwefk*2AHB1 z5ULb61i>+L8PsyVO3@gfAG}#N&+`L*bu}gh)+>am74Qb8H@7;q4O~ePtKiDDtKxXr zx(`s8%SM_s1GaE+WipA5l;Fk5M9_yJL#0;B#V?c8^*jz%`1`IX@Z3{KQpwagU(J@^ zwh^AhnR5=rQYE|*gO^rh2g7nF=?D9%xl2WUrGZx~r?C2M5LcFv<@zAU_-r`QT2vk9 zA^oL9M(Lr-n#O_2qm+2FKSii{cO=S18B$ zl(9pQi*w52IU*{}GsXIb;mP;*Nx08B;>${-mHOsv1@!cQbS%ZEmybwNXB&I19KJ4I zPRn0R)50g4pi%Rgx*UTrg@O2BQIaj%^RMyezwf0;@Zi!Xmx3f`H};4bNkFuc^plMu zjOS4vpW=Z+xhU(BlRoJgsG?v(F_3|g>`=+jx3=DqKb@v9<1NqE7ti+b8I!AYaI)&4 ziy+;K-@>`dAt3L}27izS#u3>EJELpgQL#stOw7mm-O32(G>`INy45Fk;e>G^9fete7N?VPj z(e%LqA)9~nQ-XcMJce^|6L%Q#uZF%PWE)eR=S>FQr@5=V#8-z= z5A<^_w&uk_n%(x#wqK8Y3ekNT(N}pNnV@Q;P@(mqO@nRZYiePbo+2MzW6RCB0!ir6 zlyDMg@Z-hDCz}BFp{4sA?>9gw#1R5^-21c2rb1sJ%P5VDt4J|9U-wUu1RdmplX3Ds zioCXqg10GM-+K}6p*?{-{!{Z;9#4_l&2GyrQ*(|nkY|as!Ldb>#__%`yS?^UM42|) zqWE)6(tJLom1K3b=RcXb!aX4*slK}J=-TUl?zgoT8|G1wg=L6mi+B*Cy=8vYS z)$(33MbNpr49Hop%-hU4GtLwoRnTzGeB_A01Qy6Z2Wtkn9Lbm8wG8(6@9yC6l&WF0s=(q#XM`p3W`Mt5UqJ~}J4F}UYH=G? zf=B|(m9%Jr#>qAaaY_pD;vFeI_op{|xE~tbwf@c{%l}ovYt}I8q`}EM(RNj^$R9Sl zEiA$Jp`Ft-MFG3F@WLmD({@9YMVK6t!$y&W1SU`qvg7v=U(|!q=lwEX-0N7sabUkGS$F+Nv!h8A(GYdiv_j$(z#uTKd1%7|u1Ogx** z1(6IoaM>-&Oj#P(&Bw=nB(KQ|o{U^Obb3G8)lloTs?IzDYx(^tq=qsEb)R< zpC0lK`m~9>g=c696S9LYLLZFdOcKbliknZ}fGLsBa$lzreqi+=KB0rJUYwdLR>R>k zfE5cE05Yb-eFp?aQxm$cU4*jC^`Uya?mU^*}vPM^jthHDPrRVBO*rM6TJ+3Uklcjokh0ps@l{!hAy6} z*X<*ANq(cJwRXGJ8|PDn>0AGo#TyGX8iyP*dva$2+?bO znedpkZC(3;=em_utUSy{%xI&)QLa@p{-Zzzu;8&HXR_^;W`DQQ#cx|NsT-An=mfi6Af3i-TBbprx}9jIw=u_W0Kil2t zwxqC~9OjCcZM3j2GLva9FtyfYIlo%u8yM7TK~RLQO@WBz(!I)!M!VV^ahnVilkhr9 zr{j-Dt-u>DqBhRgwW{E4pBub$6JL^1Dtd6qBt}a@l^tWs-<`9KHX`*sqzOS>23!!h zZ>&BT9st1ieMrYZ&2x;;m|%t1lm`bpI+XY$n>dizV#xP3(-QRpJ$U$Z113tA?*#ED zsMXa+E|ZMf4-AFLWq0p>UgC=`mP;KM1*RJ{-lt${7q2>(`s(ehEK=Cozz%oBm3Ef_ zAvyUXgKUW%fgD6gV(+nT3nRg*V?RwOv##T~VLHCIooQ3F*o)qHTf!e^cgCFIgc`%5 z-CSat5p?yfSKG*G{3_W&Khh(Kp-#vG`j)FkSS!#lt9ru{v9$4pi6UtZ>3(U(I#AcF zC4TC_wl-a|24V4yvfI4IiwX&6Zhy-Zag%>=CvFQbfg%cl<(?gK`aP)r>dEhXON7RJ z6788JwtejA%>o^gNf88c8C5zf^2T~5_S)M8hJMiIZFD)!{S@#Gaf>?`JYRm)HwY#( zFXwB35)p(L1>ZwPsP_~Otn~uki|@!6-8_@}w?BfArZhH5&%-0xyiln*%X2X%nGieF zF?7=u@V?`{s6v7o_xM4Kyk|0|z53vwYwzM+^&OwxT?`1&Tbj0UCt6<%_{w^;VamNC zB5-rUdM40+;((W)>E<~<-?*={*FgYW)FmTj;v>-!eQ$Ty6}N}GjEtfQdFy3;1xXGb+T6Jak&}Fq{icaj90`X{;K|qi zleav&MkT%c-5TF272uD*UUAK=K(n_DXiqtIPNxTKYawV5_lF^9Wicx>xxD3TxToM5 z93`ZV#yt{miqO6s%o{xV36}&jd2W28$rG&~y8i&%mvZw&k6hE}?sRB>>eDBf8Mi(h z#pXK1cJ@-SMlm6>Nf75n+ju+&Fs3F9N43HYz!cq3af}sQu%%B&-93KrycM;4Ly?3f z3Lnp)HJBgq<^10AV+6#<`l5^XF^4FKe3G9&<99zgP!4CgU>-j{~n5Pg&1rXdGqr& z%aTYeAJh@HQbFiGO}=M+V>v}^-1y+*eB3IXVuz9nz(o%8sHQCzx!KjqavAvs9RvQq zXyf0YY1A~RXhUfDk6Q;pd9azwBnkmo^t#kbg(PpLrC(g`UrG3?*ymEN%Ug z<*Z{G2PR804)D3eQG~n((Ig-~N+w`!08ci1=!r1?MiS#4Xiw7@+^03%_Ibz8Wk~cJ zEz^-9#6j;Bw4rt6hX;Y$0{gy|QKq=Z1ulifwva>=cPOS(ah2z-L4m?+@$LZDAtItS z7Snt`Foft~a9(7mx7poPdqozR3c;2zer7mIr zq8&T<(HaCLoRsHKcugnn+2c^KOx+bIkTcOd{m#FUMW7Out&`Tdbbg68B4akHBB>{* zkhQtar*8@B^6lp)%3KRXZ^S{1MV4pTDhLc7-Hup%yUvJ7QSet*>>DBYRFU5sym5#t zZpIdm3r_2mcmThGS6UEC=U&~cz-G4JB53^5?4uFvCs6`DxmjE}#!IB((@#3@${&kM%za@K%hR0u z4e8(LYEC9@4`bheLV#TA!7T5$>q$D*OThdHFXkS(Mobt_!9xRHb#TVw?0fl1=>RC43-uCW+BBFKLiNG zNQ!ZlnGogoN&SNYn@HfX>2WH<>J!~|!EfBp1Pfixfn879txgi7j%|T9ts-qcsueX8 zh)MCsX)ttPhO@;geZ~UFj&SIRC6^-y)!G9n!R#*Q=6Go0a?kP;Fjm~bR9NK}+xLN* zn+usY=)PJe@hX3)wOF1?A63A&C7Xf!wzC?p9=yjzvg%2|WN6Cf)ym(8&PMcGB~m~s zOE#uVqBGWPnKqO>jE0xU4vaBD%e-^h4ACln-9My)kc^M+W5c zXf0=M{w^Rx%hBx>{<~p>jEuT&Y&}no8O&&x(@)d{%XLHCqHF`amaFj|HzM%xJV_FL zT%oP&3ube>(85-$A>Rt~?R3@9EH~RcmaM%~im@?gB47KWyT!NlHx1=Q@muhrF7Nbx zb9kqLd}r_#lTq{Z1K@d^nR<72)kZY^$i;j5V`sJhH4;AT#Lw%z01$7e_Qed7f~L7xU_C=7qutIh zO;0UHr_nU6*q$r-$W5bQOA{WZ8+iDu@>OfncJp;5>V3s_goi$MF!?LxBDv8Zcg|K8DVwf@pd27r51*1Nv$5R2i?h$>k1PQBt^l~Vp+zSPUzLxCPNn@~fZWUl zQ6Bkg&o8ztsizMNB~g5r-!ag2Lql-5%G8>*>z(qX&}9l^v-UiO3X`d)200U7>?pii2E);pbm2+W)C&qpdS=L zT_f@JRV3#NdA+s~@=4HxJIcq@SPU~d_o-S9^oXTT^rx5Z$E$HBc$D;+YN$xKG1q5~ zM2d^Yj*XnX@-C0TmjuDPQB{e{=8GFj-xm?vMoL9V{>z0f~t8(YKIKGa0GWAKGoHtcEJ7a+weIpnD1We)Wy@_ami1L3U3#4S|^l7 z!IDX4i$1fa+GFn&KRB+=$iUOE0>p(eqEPkenZxESQd|-ad`RGVVh{^rM;Zo!lX(_A z)&OHqC^kn7ojH4Lr;02H!m0nb`3(~E{1+3)Q{RayAS6YcIp7hXPNf-iTi#EQk(_n5 zwct|=ocP+Ok()@47}{5`$zqR4ksiXBIRnXS3aw}|sUZXb5PB1jYi`0ZeEFAO>=Y2h zDl}S!v*|=hZPO;8X>w*N7WCtBP9S6}*#!W4tsBbR-Nj-m7=_&NL#{M85b3K2u4x8;fs$6B^M7Mj^Aa(T^C2(Qt*a0W;ovSZ zH@87ojzG|~f_%v+kx|DIKGN_B@Q69kXm>U3tfgv|fMNk-;c~(X#9fG$Mw3h69#xCo z_$w0zd+Q2&9QBzpO>464F%FL?r=vMbr7Z)zyQPY8dr%1rDpAMC0rJqMrcK*&WfUER^uIo``BOg!=rx+Jit31(Zip;oEf5$)`uPlb;rO&|h|0v+qE!>lQHM9)7gtMftS1 zcz?UH7QaA?ywAKo!f-WyY!Z9pZOOGL`$r%EDlZgjTZ_@6)#fF%*!PeuFJ-YGws`^S zT-KjTc6`3dYH?U6b?Tw88*F{Voj3wwH|3N$m!s#Oz9&6z_>gq-LRXL5ZZr}+il8zx zTAMH9(n`Tv%b!pYk3WjS>QdjCYKGHmUffI#D4z|h2{v6tVDk27xIZpI-*}za)abnR z5~9g?m%vcj_Slj>O|YWSrha4elcIaOiT2|o9_RFlu=+LOLof*?Rzv53aZ%8@)(t4j zRlFLhXJjYKlTwSrGD)lH2*qDwDIuuaYSUcg0;4f3=Gd#07BV|KP#9*mYZqbvR^EE4!AgqfYdE||M z=D7yzHhi+tssg%*Lf%%=di#(R|Msf#Bb-X`w=&dW;@xg=6)tjOrf{4(Lay-oS#HcN z$yP(?LH&CX8_PU#_;7H^Mb^DhQz!N;A~H?$7B9x=eWW0~I3+6k7v~Q>Ag7Ih_6TOw z&Eg*ra6q5Mo)p%5Dt=MN-Ae-XD(u}&z#UWt^f|G|G3-I6|J`KAqu|G_A!KDyqFOUU z&6kYYIiaWUN^+AE!WzasiwE~W&`c>S`nSLz{E?&Ja11 z6EKwfwI`V%BJen7K49KDZ+ma+LinR&ZtHp8Wb8@1tU6mCw;C+l+xS$r-Op&t_gy}o zFER>e4l@tf#9SSM%UY|dQ>Ow^$ zR`4Sm+-wD{vfKrYgG-B3zst4}zeROfe-kq$h9KIPFNCI1(Irwihi_Lb(&&Fbc5)KX zbvInUiW<;CUe5qeussIhb^<1tU(2*_3ZH(mwVzAcXgclJ?2(?9W;ojr}DkD0G62C%Gd1Yh0? z4boEg2s{)QF@L4gXwYUc8K6FPcImT#6uVxS#zzvQUh1p~*`8f46^EhTY&H*lFnd@S~Q`y#}(&<26c!l_4JzTiGY*>r{~xtqn!;6 z`V;z1q77v;`>M9scR1P>!NDm8y+AV=0=Nynu?bMb=0Oc;_Uj zHUE*r)%_UhAQQ41z6E3{`$E*cQyop&?--(@r_j39I5*se07nAt6x^tkbGDJ4!o|$! z4gXUmyPM*1g7CNofAa-9NZeX!)pdb*Cr0Q!T)t#V7a!{e=l}r6>7wy9e85}U1Y5k< zaM?J#SvPL&)|%zDvwY`x1&|=C`%)$T$kofD0y}1)mMWuA6=R1Y-F5Us;B7C4KaIaV zGof@n+j5%9_w(6tKUxwF9Pj1gV>%s+dWU|}z^I*{J^vyU_cLq|Oo{T2zVr%X4ID%7F71otI}#coF(+K6VJ9(yvG2^#B9A(&!1C zI5?vGZXA`|>uz>ql?&`auYBk(nItS~bbdHQ)M&;j1?Wyka9=VAngCj$su~da?Yr5@ z6s>m`egFZF=F+-}7i`eKc(BB_(hY+n0*+X@3-~^9QQlUavHMOuF-r_1r9m9+-8w>D za@imQ5p{T_^G+Fn?j`w{rA@-Xa}|g*uncE(N@LN~#(X-m+Z9s4jJo58p?S|?xwNnJ z&5#$T&QK{@7d|Lt*^U3r3BRy(ajMzH#T5@He$c6H`;4OyR8Lh-yP2rc*kx2d`Hm-d zxU7*S$3AqYz8Q3hB?;ve4%8;?oX5f;Wb+(i9Q5p|uP>~rosFJPC(Op9C+KP=?ATNt zMDP^XXM*E55=!P@mst^oM29_oJm0TMJdcRzfRsPne0&p9hOd%yV(0GOOdk+lA( zi)#C(BaQ`Y5R00f#GHK?uBC>5!1VH7V#Ve&lw0e-P+^;o4rgm2j&E0=DHNCUQ46ie zQ3by51x-ICGK%u+IqTd2;oi`&RI*N5ho5K3I1H-tPAyd(ub1Iaw{>U$~ zcNISm>DIy87ji& zQ1b(u$^r)z-(!t~qiWZLy`)Q0)3si^bj%b&Pxfl6$GsyGxZnixubaeITFfwa1id&X zJZrhK#4}*!fp*1ZTGN?&>NZm!!qs+}m|tTxbMPm*R8U`arjE`iNvx6=rJp}eTh?;{ z5r{fyk69Xul&NtYfnPNNzNtVGvKK+V7W2pF`#iHl^VCkZ$33-&@iBr?35d9paJj4} z%X?!LY4+9f;pDdilaAolLl0(r{v;4TPxlj!%@}XP-Cl>pa~I6QRv>|n-M{1uvxks1 zfr6Yzg%vMlZu!ILeL)|jBkcS(i2NL0Gue<(675^N*k>%7gZGRqL&vaPXIR~>FK*wR zs@SA*fG)Xyt+LuAJ0`7z7+by9=J}pyj5C7MWh#cfzQ<~jl|sX(IX<#KH1<9~x!*RI z9YLZ&MLtaK$|D7satq5Ti=@(Od;L8?42#mj1LcCkn`nY%WUOMlVrzM_P{R zWXIV>SJ(H#K7P8&s=B-fq z0uZd!UZj2Rsu6)~z=+d)y*#3<8H72ylb9&;1*;uL-}sj&z2YM$fS!#7eo&D8AqtE7 zy8(sp6b#NrlTUUCL%dr3)FgN9%Y89qPio7dkG4jbuX@;_eyq=?8hHR|AI51wxwHPp zwF8JffeuU>F8z-@C9{%(SBMs`mojrOjT zngkVUkDA2q9rT_`sU#LE6lOj)^d9VlJ5uZHJ*auW;#J!6duNQ;jOF8#=!^`zolNJ! zk#wg7a-_??+0UnZB*{Cne$1lZcc95yYz31mdXt4TD7nyz-lk%W&;%WTEtVAjz5*uw)qVzxo|QF#wa?<%F3?1YrK02}A3zoIQ#nMCJ8 zHRwOOn%=3ihZMKG!0`Xt?3RfSS#GX6C}rHX}t<6^l?!;c!Hksu9{qvVcBdxRZ{4lknok&J<9VLzQ zY+(bt@^8I17R1HpsLJsdN5U8etNrbtDu)-pO`j9=5y&x5dG*!?9dt$8?}OM%27@{s zeZvC)@o9-85nkBMFISJQY{z|?uc@{BQ^@dYcmtn;2k4W`t!>S*&x-Vh@$Aws8%Y4F z!e*h=#JBal^?m?gBe{ap=JHmQu<06m|CmLiEUq(>Uq_pveA465ls>uvJ(T~p5q(g- zRtALuag%BPb9etISQDIHeOSG`?Wbyw2#%!5@D$cv;a~tEEI``Bu?F*Z=(={- z7Q9c<;+RTb!?IC7u2<39cG;+utj0zepp2HHzaq;+HxqPJpMNrN!rg z4Gp%cN^`SLm{aw#B%t}w!HG7#RNxCMVL|Uxna|goUTbAiFI0BCVx;v)b`WFpGQ=u* zGF-p!tuT7P_`o<*wvZiRynX&=-ta?8s7K&<`5-=)!%d#peoKG(2)O7??mar0-8w3~ zLSOOGUF}7+JL#(Z7kS*vh)ozss{QcI{eY`PoGPXCd3zu2~ost zw6T;O+n5`i-tBN#z;Qv^+&?KJlkcVh5;XS09+}$$9LS3bwV2+Ngc_t->cQQPb-!5Y zN)t#Oc3m&>_soZ%4gpwi>^p5cex5`KY$i_Ah9UY#>EBqh* z)+~IJhva%NI~r?VO1IsQno}i=zZxt*RP%pJZvSGQr6PUDMxe_zWHahclErbP8%UKx|k`^8AyPEDeh#T|D z(d-b+JI}2EMjdOaAiCf3+uI5YDG_##WcK z8=22r`BtCk3qo|s3o1&lN?>j_Dnwkr8fE+i5Y9rCojv;Q+|Jz&hYT5Y0T5z7aq}I= zPn*+tPRZ68#b?U4yp1db9a+J3P(Yq(Z%yH&gp z)71+|;&gCoZiAON?fTbDGZI@g;geT1y)FU5-}3V&*tf3?&G8Mbkkcee9FYqHbrin!?28yEiAyiJ&9hO5oOF2Yp@g0|hl^wAx;2+Zpkhb(=# zfv7vm)UxdaJ|<4A&Cf`XaVL(i`sm<9DC*S8%d-+Wf-Q02n^4aMJ%%mu(Ccw`-V?Kq zeIT5!BSI=uelwe5EAZuD`BBqvJMy@dbQ66puG(2{fiY_Mc;w=%pq_$K|9s>6&xKG~ z{szY9ZE^?_M1#q~lNH{b;}zYSh3DNWy2&W{%0-&NE}mM})%P)!zV>#szS!=jwrc{& z*0NWTE6rtvkaseFfN!CArMCrfYSu9axFf23dOK~N7R z$R(ua{vk7d2X+03hDFLi1wIQ`6(4<2)&Nu`>QE-r&u8K<*q*2RV@>m)?mZte{kio_ zQlvjO`2Te9L@Lzh^F&~7_U|q~A&579VDb?Ebe7+Le?ku7fOtLLSy=k-t~a1a3E1eL z-n)M|_<#Cjrw@{9rN5dC4HkI8GI#Nf{F6Hq@QvWIemF4&1DfVyy}%A=8b!2|%QTO7 zxAs6m-v4}d&$9O?G<_P2b+Z59T$gH-zB(! zpJonv&3(LpWildhyKi9shr9oyNz_BZ2q6=Bw156fNB{L%(J0`9`;Q* z#i^QFH#GSlK1kvQd@zJa(CSYJ_wO8Zz`Z7t0S#7wjLrM!F#m@Sj*|=Z30+*9ng}qVIh+K^ zB7CXIIr#*~^TK0;NeS*BFMQ$!5vFN9oUA=mj5|6lVQ_W-Bmb{ z7b~BLbzMik2E+l)#w`5YTEa1wzuk6&m_8`d^K3a=QK`+)R4SUpchb_IK{v7U!8`g< zq%Af)g@sFp@f9`{49rj*l{^Ir2?hL*#|M}ultUgbS|B|=d_YH|sr=dXfFjZ-KDJ)d z@e(k0jl-TFFE=xKKUT}BR;ZH~$z{frPSZ-);;B#orEk$@dcv6n-^N;cA4gZhTtt`6 zv+!zDF&H;}=DcnAu&JtXbEi4~^^wuFTuH-qb69#EhmhOhZQ@g%v?_xhP<)LTn$8>y?W`I;iVk<^|<@6iiH%TG|73G;ISM zTOEU_+(eQB&yxm^hg647^QiLsWdidLcd}fjk3TrL=(W1ZihFSCI$121r_tzC1bnpu z+2(^hq3rH!9E<1xHg8acnm$2~@%Ssk5g>%X|FY{Rm{^$zESU%QluallDlEQ*P5oiqtTLyB;r1jT|R4STl;X*RYG@LBq6iLi%Wcl zM!TKpCZv@Z!Fc~{l^SHfByV@1|_)tZ`VIoqm!7F2h9mMYd_i zb;5-!)LmF##m|RD86Tc&gBIW10YYfny;=9-@98{!X;V&(jB$O#3Z7Z7U3L5EkG-!m zIkq>sR-4^N4e%9Dh3gdbhbrv$-b8=>xU3FHQK(l@@aR{s9##z8ZP_PvTJw00RL6N= zxis6g-jSiGU2vRxo>8d3@aBE9ianYY(M-oBv7D}GWTDQCeHpPZ zV0o#vi7+AiU_}OihG|%}H9PfsPtn$HD7K{wWqaoZa>Kn7jh>9H%gemZ=@kb@wL4!) z!x*m3&p95_!6l^QUJ@7EZ%1D2-begg1!ajuoVc@RA2XFG7G!$dT{?fUzaw(J*sZo) z{<=3A)NCAKe%}gT`HCClckP#vJJ>pe>qilKl8Gj0*Ig0tTTND)O?>!r;5n1 z>i%(5Q)DS3_dFZ;AV8I2hi>~RY5#7@VuzrfwmQjOC5imq<= zgJ_q2^SctI5=(38v4WvkdI?p)`ASjRvlLzJIkGfQsoUbREjFGC7=8n2CE_)4b0+hV z!U%j%(HO}>-o9xEW(Sk7k1WTi;|C`32D~DLCX}WV6m}weF)Sb82`xiR`vGw>)VSid zm!QYBq*!y7Dw?YX!M^!Q@B^EA2baYtUeM|Lf!I>IKz(le+jPD((u^a7Itjti3~t6X z(D(mE-CKsm(WTqp5C{?)55XOR1`Y1P-7UBWcXtUA+#$HTyEGCA5C{-FxVyWNG@I6%l5f*%q^%&7d9k2IeZ8h+wMfqWA!3S=#VX=%3pVK?PC7g2jRroU?(PFT_ zLy^jlTK*WA%B%+q+$)ecQ=TC%JwUAzhG9e;aG^S##F@BcJ1h0hzi{h?R#B~R)>&OH zUNG(firZ21J6_uZE0p@KcZF0p4f%kcKw22FD2o+|%Vtd^9q)^NB81J=8*wL(@-+-Z zOB3AUCRlJ?gDDAA;f*?eXuto;J-it+N$8!G4b0xl9xf)X_R5Ie%9KH?*L4L248M}Dz=Nub6C9=m z!ZW61?k0Dz8NECn897`}xP3N>T0=nu3#tIugZ<)=y${B340#2(5wF#LZ}YNAmiU~v z3&m`HEnTlreI+sd4WlHcMdO&D<>$gbgRR{m{j;iNBYph}N#F?7ZarqTB;wq}fi#pn z(&{6U79fBW_hhgmo!XI$4DlMM=8ftIV_@$C^3Z!x!Kre^F}3;CVLs)M7*xUDX_7%2 z6)FlX_xi-{VvCM5s+#r0CP`@2z!KH6sav0_I?=<3`ZG6+N5^5``g>9Z>#@Lgqwi~n zhN(=iiO^o0T${`*izj{>&5{iEwXdOuWw!nP)-o3wIqvOMr8v+uH}tw=WuW}t=VS#E>won;{9ev=VLRYv_1f2;a74M}?!VeNdn>_#NII{0jvnVxzfUNfL>k z4sipg?4Qn5^4UT&3|8M|OWOp|s0^Ca^QAuLkrK3TO{xSIZmC(LJD+>qU4^uHg^enS zyk}2^Mqv*if=3ICAD4)ETSRT-s59I!N+<-gvLL^g_od#dJ&G+dUF4cl6b&PTtyndN zX_5nf2hZps3|(DW~t&2RNo~5S7B7FpulScaES4$hg#i4j-K4M)!T;$pq!ZCSR^0inLE1Z-sg9=VLb^xr`m>v7k$Ze6TX$l zQo`EDjlp382a1g(;ERDD3zXDR&Ujb^!#aTNX+@o&QlHrbERmz#q#!2zXb81q;zwyh zzY><@3FrF(+heweNiT=4ENvikCijoXV7$LCGR{&6jBR(-o^vuE z?Dd%<6`DE!J8G*E;$2(AMuz&a@Vt{XTG*qFoN#^n?WCx}Z5guTCywm7H1Yjr7Nh&m zgG`nu!<=92$B*Etxk$h-->CG}fAdn-xY@)$W!ZcyBY`%p5|IP%d>$x(zDZ^#Cin=M zMQ%eKILS*_6v2ka*A)~tws{i3s91+yw83TF6CId-c=7&&I^}H)A^Xr!7>oR|cmj8_ z(~G4A-%({s#^;-aB>MQ+FL(~Yn;LHam=4`#XbmNu%bj&rmLnEm4C02r98TU6wwioD z36(Vz=YQB-^Ged3CnVY&{ur;)pT&eloH%|6RwkzgqX%-v45#v1E21KeX%4T)g%Grp}O_^I2^~=(Jp|AEq>VPLbxeb3$01=JB#rNmU z7G(V^SFtW9+;Xe?Y`LW`d>GaM7l<3zN{_EhJ+!u$(_{6-6muk%WHu$VW?DkDUs9*QFs!g_U}jG1}Tgh9`s0Q?+=L@t>!=>i|FCy-k5kImO!mt)yhYsP#iXh z0llP;wfjdQ<)M+LabE-wUEn~Uiv)ME3ZxT7rmh3zG5z|ua}FJMrbaFATQ>O{1q{zW z<@0S0rB7`iy-~KjW-|{ElGd}~XO}}c2Tb7+-1Y;D>8crZt-&yLb?7Tl6FP>x{88EA z&E;4wOzuM#ZrYlU-=dDy1z3Ah%pjI~izoNyGB70;D`3pK0&M37Jxd=UhkWCiT<<`$ z7VHsG+mHgu`M$-0`z1z_ABHw8Ue3a3Gc1=c1RFjl1qBA%Nkk#zOvSfeza!a}f3Z8p zD(!HuO@jn$c01GU z-*1)UC#W(rOc-?Ekcp$2PqH`*TeCwRdHBQ_UU|A!*?4s(LBR#sC?$wNRsA4bO~*k& zdDVr5!pCn8gzu73pK~`|WSfok5A1sv*vjxI4C^rx(~Q?3rKcH?NV31aK~J&7+AQZ; z>`V9me&6N#$z*%4zf-+J&vKRon6xZ$?S8e#X%Ybn076vbuY{>99N+vmI1;_}JnQ>r zhz{5(63YGN2DBtLY&1~Nh6l0;M#(sPi2AL;#At8KFTT5J^WRlsvW`1^BPY7_Xc;2+(=d-SbnN z5(%HsI@x70#_r(Y`o)T>xhrlD2C}?%wPTVVTCGty7=mC%FrLMp7wV6=xxWOLB<9ZAz~H}BQnf5|VcF7%ZE*cN)p zA`HE=r)$JK%)@_Rm6F_3{tBUII!UVdsk~7X7M}Ws;tqO)q~|hlXWv&Z*%Q z0Q#$+ypCzxf)`O32UP-%34fWP~Sp+D@?sm!CX!Mq+nm%AD*!E zV-droom_r<@1PD-*XGZ7>?$NFLzrg*dHA}|O*mP0_5E4x8yYK<)Rqg6EJ>wJ zy6Q04bKLS8k?1rF&9R@8^Cq$utMq7h>}s!75nOfN$k7DFnWpfi?jUG9k|!c-Cpx$) ziZ>zYt$+Jcwg#&+RP^0vvF&F*OBq+$oZ`e)WQ9}zH)%|A(PU6eiDqCyGd(K-GxQf) zRK_s^jo$%uK)`vTGU9^Hnm_y zUHrzHUPK%}r{^w-@b&fh0j%Lxk@~`GhskiG202`_G#kpIKq} z?HxTuEgPAKv8=DZ#a41=9avcBIi6y`+16{eqO9OCUE(9<0b0Nh|H5S>RKG;LFeqOI zIV#1xIB|o5-Nek!d(o|R%3EmcpVmpO`J+XQ`TlGYhHg3Sl<>Z^!Opu6vd)Au=!Sws z&FFQ`s`phYyZ7q42pS0kA*%(3lV{gxg{knyL2+z5)(7YksQe6>P3jCVA6=ttPOA&A zuy#pm$e15W1Sk#{z$IxE3o!z#Aq(XM&eoH&?5qb~Bs7D-{n5|OJ%YQs(E$n8;6Q2r zaG56(ezVj#yyBji?t8;FGH?OA)A zR5B@&9IiUoLgjaSaMq$<-k4FtizJ_+Yo!b0v`3Z;cms>~@#3&ec|Cee5Al0F@p7507GwBy81|6w(VPalzX3L}!Fe1aiO0hlo|Yi;Nfki?6~>OwG!wHv89S7Vane7&na)#$Kye5XCP8t zR>{WKFyQf<$YhKK4t`f>GELii6#8lc?55m~##@N$lz}sDO43LQkgh%iQiV zobWZ8a|y~(=j?Tf4u94M^}p~C(1v?6wV?o|T^-Zn)+MiUGLh(b$asL7D^d62W~|@) zh!*}@mM1n}b=W;h@DYD9BUuV+WEY$6?s#p+K1+R|+|Oh%=9 zyPDT$R#hGEk1sHTWfT+UCxLY?C6~$MJDxy=$OU{yh7fEYisAH){W2!7`;aY1^M>Dn zRo=lzGUfV8Da(?rC4_tKy7lZk=KUm<^>FJ?9Ln2UGpE(=7x3WLo~T4}0fkHHfdNkI z#M=e}iJemMT>Z$(J+}g`b-x0Uj{BU;@#Zgqzx8Z=e~9b~6wgNEbr_yTtH7{rWyFUv zw<(4l5)w>#w_r7q85)@GnI_K)r`MT#p~P6As=DG~@0)RTjWZKdjHaOG&qn?6E9DD`<+4e3nZjUpgi521f7>aH?nr^Q8sjf>*?l*CMwS&XJu1pnaiRX zmQ=Y&nHiJkiNzp;QSnR8W^|r61^r-tIn3KioK!3WWy9fQWl_eV&#bc8O)({%hRl80 z&)c`pC8%;>U^>mm_dM_Culx($3}5PcJkN=!t)eaGou9zH52)H{FnNFrPvoCsWqP__ zx#l55KnFIkJT;r?TSMU19Kn?mln6H(Sv|*Ey#q1^#-*%S`bzJ4##$pL3QUR@UudrO zS=C}P6B}6WCpilq)LV!b-V7an z4!K2hoA5zEhv z4y6AGB*^I#tZPoW=kS6~-%*xUak=SkdCpzd*c40SSQ&mH|!m?DB`t2cB>{q6jj*?0|aA+o-GUC@(Lb%oYzmo7%Iz zlkf$Q3<$J^z$lu#i@g)+3c-No)UHg`3~oV|!$t!vHgjutr?0FIekr&t;M`<}q zcfgl>rys5Q)gtJ7yinFXm+ypKZ61BKdmoUQVA^(O)dM}a-f^i^a;qNTF&*?C*2$j% zV(vxC&l|lxD4L`KQVSGOW;v1nm=1WK$P~#9? zDC08st@gWc(eiG@4*`Kc+*1w?cEUT$P{UzK3yAA>C*!ECmBw`#)!535j{CPDVV^;7 z=CUD{mvF7GWXQf}Q`zitoUMyIU1>qRuZ4n_#^dpL2Vh6UbxJ)TJSh3&&dj+qcj*4U z!`@vqsSi+>C3I#T4moeJHon$1mOLp~)NZ|4&Mmw5Gx6q}r=aycl>7Y}?!7aS?)@kH z=fm;48OEhx`s@r(^@|cpTTu5xo{9VUt>xWK(1*RS;X*z1aV!zpT$>R;CN-W=wW=^V zLpMS9+eM}C8rc#PW$&w0USPZ-S`6Q#!1_Lq3Ec}`T%ymDjfB*X%j))ADrrlz*x^|5 z{irt#S{}xq4eSY+uQU065j6ZMe&E};NZBIW4lgwB-FNTn%&i7;>)IbbCkN;=^YzTW z>gt5(a%y@(tWHq>+>3Sj;1oil`CYxmT=8RyZ59~~jBUiu^jIrm!PC^&q4n%|97?6K z1=rT|sm>KF{tW1_BKO>(Bv>-A3$hpxS!!eYz~^$*VO(4D1}=^N{R5D_R0lb%c5#{D zooj!q&7?PDvroyNrT{wH6+>~116ockv7Bn}4(8nzzzX=Hp=1@*P$<(&**~pgazmc` zBR7*`eMjk$0KHG;l%^ICIvr--7-%IQ>&DC4H@kh4;zOERs2^GNXF#AE zc@xgsOkt4)549dokLU9UdQJ0-lcQxpM_D+0c_s%vB@Y|ktUVM76D%aLTPN^xON*PR z<6K$x<(Tf_7WXkMK_mgs=p`J@B4I|)wQlWpg-7arK8~ix`RU}~tly~rw-|E7g6#Ga zI9TG(Iw-X^!W?>I`O@f}@S9r?xP$Z8^BV`e$c@kXrfUjlX5D5vpDK^0br0k7)Ihb6 zOeUVcK7y~XeJWXjRg)^~)V-pA^n!SLAEWtIf<0GxUnhnM8BjrctF16y))|CZH&=Qo z&syoT_Na3eQ?6KC5z~uID7j#!p65}32}V?}y)odsI?5h?h=FAzln9YMk%)F{varmF z`*FzQymN@Sp%gWZI5iL}Dm$Jz^nA^a6G>DhBa!y&m}DD{x}oFZyc<{~lqQIV+z@yh zpzM6^_Pgw?$4VH7#@WIncB0hZt?;g%6r}|{5I+JNs%jw3{g;yvp{YG zV^l(pqZ~nRvLeJTu86JZtUdwAqYys=liNL{;sQSz%K#))A92rwb!)ErD2L;vB-$TG zUj*Z&Rglf>J%-x1y_bqxUeD*^qT^Z!BkV-RaDsz|VQ#=v;<HLBp7OQSA?O9T! z!NFjl*4YTguj_8y8+*@-LX16acY6-L=fZOW5XcJ|$SpJs!c^IaA!h5zyxMJ1}^M(s{I|Cvhh^D%ax5a9yd5>ZN-p0-E@)i`_+C`y2I9)9!h_Ueb!b zi?rnb0#P8%BR7<3usgK74zC!1jAGX;PljNX3eLepNIb{EiY2kHimDnSnGOg|MG2nU zSurFX`bQ$DZxRVgN)IpAZFD-XQ@5d310H8)zDX}#hASSJApVpzpS=qI|CXdHNqlR# z%KI5vK3INvY;WT z9pccgiNygdrb#xt@_tvKouC=u)=pSXNeACn&UTly=`$GJwR`T?lS#$(nI@A~?7Zdr zPb>1JIQ}1o>zl zq8S~`Bi4Rz{!5`70$%MS1T##jUG7iW|2t;?!cLCto)t7`C17Yhj(|%?OA0l>X;Th4 zntYBLOqk*{UsKj?&1W06U2j@S);q(4Vp`t4F2WP6J6nBEaj@6`&tTBe5J_M6UqV%V zZ2;8_1qrtOZw|OoP*-oa{cJ~kj4aNt&c3mxNVUw}Wu0TXWhwVytfcHzBLAJK#eIv{ zO?~M>yQ;T$xxNUM0)3)oGMlsofg?ZNUQ2n4(oQU9Ymxb0XyQ|eKM^0r>Cb-kr9w*! z6R>vIy1|Sy68BEys$0uaiG2wiW;Ep+Ss9&?{;Ot}a}~k>bkacr`)U4fYW~1j+DPf% z){@#?_CtXyEpDGhsIF+@0{4{#CHr6HtNjn`psV~0HX${}a2vhkV2lxEf-%>Ci29x8 z-*7Zb&d|DstYr38wLQ60o}X{G9n(T{1vk7lE0TF)Semv>Q8EpdOy!JH{^vvfgKkK8 zZF4q^U}trSAs)|@>{4T(yBTI+$F-H-I!+mx=K{<3YWl5yJ2(0xc9sFr1u{N9RMl{* z``?T}CBrWr9xNNrQe=sPzF~CrM3{KUkr3N#_Ioy=JMhJ@yn971C1dXt7p1A`OQr?= zb>d${-(S3dfQ~2!6S*4`jxpf8DaJ$UFGFxPQqItF)2@T057OwNAA)~UY3V&yQ}nCN zCvBT54?nM!q)RIp%TjCf62=8kp^25r=cTfBkXxa;^pw>#{kII`D4#s|ZUhy5Q^cRH zz{4_t4%hsOc3ZKxrjTM=!{r$A^uO{385sE#QHy>m&dlVFbku{!fg< zZ`e;|MxWp=4E^+6%K_x zivPm0bwdCnr4K=p@~`>&4=+kkg015tsx$s{t33T41yCHVv|7!fqDkx(X5Uf(=b;Jk zWj^Mqjj#TE96pICNv9()^RdtB?8K7mufe&hdob{7LBJVIa;WT4{rNwCZR&E&hX;-w zG0)ouoM|cHV@7N7C}2qfo%$JJ^x?=-e#@1AI47OgHc}k0M;ZE_z+IsBh^6pX0V)6f zzc$IhLDA)`eEPdl00u($|7UMl`PTP|74p6$2zJD0Eq|1(f2ix)_PTs@mjl}8w;ebz z|N4v7M;X4m!K`=M`uDg0BGJ)(=EzCA^Fo~8$CKWqcTY6e6SVYN=1~Ah!rZN#-{yNL zO~{J{Ix|KGGPqr{c~`$N4|k6(vX#!$G8YWRvzfOZ*tmOVxNa?>V)MBb@6CdB(uZ$T zm0WZDl#1R90Dh1gD1$kok!!v1%tq(^=XU!$j;>W=kfvj8AJ@Le^UU2iA1=h^0kOSp zJg{W)D?+2^0~gB)z2}(D=OW(Acb~b;vr>5-RqWRKUlXzv(HrTwPvrOSWq3s|dTSWn zT1kO$nfVoR#jHH)x(iBrr=yvW3Am#Ubk5@4FAoO#j{Wh=`GdB+LB!(>6!rgK&)e6m6K+YP_NqHQ(Q7u!EFD`s^ zO&?xI=O$77GQWF29x^uC(hEVQeq(ffR+AsivOh5}H!&OUUZ6^|muatBp*=Qi3q&GM zN=hcu5SPt*?ww&n(eQZPsXa_LD?WEDjRE33CD*40L_b`wd?ZpozkQE^Rjt*AJ5B$^ z&N~@(!ajr)slj?N8qvI=ubSLjY?EN0Omebvk-BuTpqHP+bw#OBx~6ckrftW0Fr#~H z)3Z*Q}GPE16l6T=v39V0W~Z$;PjDfiODC*UytMtPqB(dbblA>n9qR=SG9%Rn;HkcfcG_5iwDxGm z*gy$i%u`ckBtmX#RXUy6Vrvyp!TYg3E_QQHK(ae!k)S$|5H)HfN!^=gO>tYUQ6t{A zb5RCFtp7r-o;r6Hz3$ThNk!^Oe=snZ0E*}SzC{C@;9AAwedooP+U?N~6r^|*_L#EK zCd|1F)SoDC_Yck=0M-`t;1mGla%`b(4{aV~|cID@}z+ya!Eu&=ygd70U zQ#7@N-{kz*Myp*ZRbII011advVPOE&KA1%3MVIA#S~asjG@dWLy;;6Ju2$wL;7`3D z3YhH^ZErl$)K|H0yV3f>oX!z?F_P1}p?vmsX7F57hDwQ@_6LRZ%v;pS({pTKgD&J~ zwb1OU!P)RAK?kc99T>jz;sDh^L@J?|I9W!|8fRkoKML%kM>%=r9 zCfZ?GRm}rXZpMROQTlYtL!33>#2m$>M`2?D~?oXs%YT?D~B3b>Ko+cWRMTq3O22(bGGe-SBq0~^K35%DD?NyiX|h#Kb`={> z=5~igXG*;TRplraJDvOK4qMTVbS}&yZIef;(Em)vBxiERuCddmGq70$cfU5C^_dK% z;7JEoyqo*Qh5;h6kkxQH_A8_FW@>p9fRRaoB&6CW4UDJ}*30q`ol4)Y6KGRhN<3LR zMh6m&t1VrNMl1SM7abA+e9&w?syr;%R1oFcqOfJgf_Mm_0nh2C8;0;FR^y?R%chM`Tgu)#7X=`dk#74}($Q~r@(x=Batx0TYie^KM9g#9bCh7A1K-Eh z2v8BnmBL$K#E{!)VrQBG@=ZRFkx&uDM=4opJOOi$zaHUsc7QchrK>SwwUX#6Y3N5? z=Z{G)ln*Y??&LF_5GsI>O}VRKjRVq0RiBu)hJdl07PDK}^OaCch+yOC2Ee=s$YqJt zrb2pXl?TTdTRT1L6oBrO%mEYO5a4|REB&m8Q-?wkoFWa1*j~L5SZ&@(BA-HfW@40S z5#D_g0L&ro>;>BL>&0Pc%9K#2cHi{v_O`?|4(YWUb5o&RpVLn|V^FY%l~R`DnTWbA zyAjNJH}<1YQ4jNENP219xd%+JT3>0%k$|vy8efCOxhHTrrIO!DS}iue(Qz=R5mc(J zA!f(Srxq1|@S_(KcN_-h`j}5hvL^NfIJ?&~6cRCD)kn+ymkbVquQ7*Ebx@qoCF42s zJo51ZlZ&Jaq}K%AKH{7;WH?FZnzUuDO1CZ)e4!Y|Mlf)Qc8^?$mKtR zuF&{FKqFN66S%NoIGtnj2jdU#K2cn5m52^qxam5BiiZG3!a(CQj5D38&nd696r@k`5qKd3 z$*ze~7R*H1{#}d2QE5#1(VKH9Y==`z2%m8lLH@@J>=R6&+gar5<}U|NMtr$i`ih)PJhC=gmIW_TO($qPlB+=~ovJX|!u@iuKC+S$iK3)K z;(^4{aauX6Z*En@QAQ^tsveXRtB` zTmpNzU%+=m`zmVC^_OQv?mjw~&n1dF6YetN%$d&qB*=Ja{3bL_wS-FXklW6UiotQ^&PifpZv_;WwXB1z&#o1DaXI9wZ(w4 z<6t5n_QQv2CmG#{9sb;B?`pU0D>*^{y-qz^0#^_Kk6y?KkG5+{dV@dUezBeFC}`<{ zrC)C`6F43HX_ABV85sZ-o!)h_&xi^K1%W_i znC@Q*RnJ*D%`4c3DnzMC@cd#FY-ET-mj)`hwojqy-FP@Zg1(s7>V1?|ji><=1V7-h zcx3Y)rU`@fqRhoYRl@h#Eh5Khk=M2>+r-jaB~1)aGv}i zS1p}1n4C5p#K}uHQ1zDiN{Cl5Wk!zIFsGxn`9S|E3mq^Q^_c~9f&8;p*uEA6BtrQd zS6Tm;#sV1VfMn`tD?x_jIKv>7i9wAU{Av3T3QrFad>ZJVt@){qeS2N@Z>pWWv6zNp zH)vbJZ4YKnS@;f{OXfne3|N7t236R#%+yJs5az)AkQBd6$5(#H>uZUQ{Nll#8uJmH z-_T3(6$iTX_RJ8mZIAqE7wy90`{nBk!1tEiD7Sb%obs`_I1EJsf6bM+;egGop2Vz9 zc$GhefZ;vP$VHMVYxXKv>Wg54-T_>a99h`ZXlzB}fkj|HGP17L8w-g(&~{OLKpNIf zqg>Z-2@^Yq7flxww=x8thA_PlHDZY0@oyT-aYF8mY~DG@aS z5fm~lw%k`}8kZ>GZu6a6Nn|AezN^QjFX>u6Ee3z$Va4ov_OO*|U0DUXn&ow5Miva& z0m*6sQsj>`mn^7!PNpN=%}Co+2A8U6bsxC5hVDeogQ;Ip^+h9P z{PP>jIYq(}h4|u|^aS1WJy^zc>@&=V<0O8=Ve<61pO!MW?Z2c#P;!0RwLdRDA1#~O zTu!&-F+u4y&VZb$oTP3+dAv^hb%(waL7fJK@jP~En%SSa3m&ux*|S7VW$bAKynsqhQp-D zN&Xw&IKT$?TjZ+A4w`TeXSr7b?S1tfyIwv{z=>>{jK+KH1*mlRAE$CLuz{oo4661i z>Ab-yvqy%UYS{H2S|~+v*pCzZomhA`uq>~>QYqh1v`~BNyU88yhzH!fE?u==9}n=2BO@C&1@Mi6 zpPO0&bYV?hmUi9?_FynXoOgJ2IdmI6Jaet1ixjM}JR;Y5z&vZsBr;FKY#y_Ez-F_n z9-_r%s7)Now6nLb(3#h|mrJPd+_mB~M@;_EMokG1ZF}yPQSOIi)BdvzFSyZ!Uycud z+V~16S!as+!Zl~9-L3w5^uyh5stQHI_Ngsr#+%DDio;Un(~m^9P6f?a2++yhpt)V? zjtwJi%yP(Hr!S-2tpF_!r#>Z8QO99EK2ZGvsWi?`Y2l2fbD-n(UA@_EL00Z*ZE5;~ zhJd&wsWh4+NS=Le3EKhsG%o| z1U#J&=O{92tx6>q-kM#sjTcR;y((dZkXs;Wpl+$LIv1fT$UYCJJOG4`pb{vnI3Y&P zs|FlF2qPoisGwz}(Th`A3x5M1r0^T2N#}EBorldjV#JCz1ihu5rN9?BVusq%#7ube0|R&S{?;i z=2UFIQ72(^&ONRh#E!%Ejy`yu4_H7PWFgTqQG*0tVlzpE-7L*E9K;}5y7ygCb&*wY z_7eB-+q<|n#5CHa4&Ly$8)tZYNGAvS%0^c$GtV%Y92j`v@!|sKTHIw9YpOT<@<{>& z@kD#78*dy`t>bztr*3~nqt9f*2SGRtq%)VOz+NgE5(+$Up`65|QYpgR&#OLWDk1f} z>Eu5QJZQ@;X}Qq?qcJ^zQgiwWnYNHEHu@OIc@u5(YZIGU3lPg3>tkKQ; zRx#1#wrdp(4)23()_!aQ(6}OMiw)?h_Y7zJP)j@)CL!&u)n}$VlUIfq{3wV4u;V~5 z(H14&c#lY$hgenOi|yfL#Z9q~{YL8*(vMpg4CpWlw>YV8MikzabfBJr#PYaL6^T>k z%j;NF^$gHho=&x{o)gZ7xMzo_X-e)T)uqauX`HSiMhpR1IaSgTsnd?Hq43CiuAm}j z?2HzkfT;7kVsmfTiibxBD!e%JFM_S~eKh1cqY=IUG4;Yh*p&*{iM3hC#A7@YnBVN8 zHh`9w>60otKhSV14Z3@-{ewOny+>+n7pzhg_CTw%tc*V$(pkyaww`Sm9XL#D|Y znqVQ&PF2+~u3L=NfxAiA^V6pjeF|>p@SV0>Ua*q`qN2Yto7F>G2V5|wMe6J|LZDFi zF2wtKtVnZ zg5iw$rrmb_m6jdkl=uNR$GJr+DZ<7>F;mvb4iktyXlbq1j;u1PqGp)yQH>&JA_?6F zZV88hYk~ph`G&f><_K~xwhTm=G?b`X;;{SnBFfzc0jJA&KCf3h0_K|!w+9I57ta|7u}@6ur-2B29c zLFt!aVM5Hv&a;=7=Q(b4oret1h~=ukKXBXkJn?il940@PMyRX7#d|7wVvGdXJs8Z1 zeGc{DitkShKp#V}gm;*Djp@A(u(O0u&PgX#ED+DDq0X6!82ClgU;r|GyExG+jjtY`B1lmJd;=Cd^FX5o`5scI3uRQL@Zsv~?iVmGJ@E8yf! zMe#Ah5*DRmd7KrYH;OM(ZwHOpkxEJc%nvN`NF0x|bfDxVe+iivoQYYD^j+aJ&1?p4 zLT2Jy27{$bV=0^gPgk^WX^3R8k}s)vH1)@q%1Ed}E!o@ruGGE4` z+7ed3`>A6c21Bi#U5K;AC>JRW>Hiv9s->`#aJ3iNwYDP(&4!vq8Fd-U98#T1Re9b4 z^!kOb?HRyfIn29w^bU+yjw}|h`{xR#5G3VAY)d`~;7E}lwTTlRf!@Kd{p)LMy#ncem6>K*ak11vqqDq%C0FQFbmF_OL0W|eNqWvr` zIiLoO>zwZ!?$$FCusTKGiLtvcRI?ncq62fIrXu8vb~?@SUfddI5LRTx#U`%V{N?td zG%Or;FQ}9I3jdTXWa!kV#vXk-4&3Zrl4kIPz(sx@pE?wN=wWTXfx&O1dVIrCmLi@= z(FA}(YCdWXvl|y=x<Hyvm1&+SBDl8q+aU{$Fy)2jw3ah( zWhxj`GjtPZU8@^a7-w!H_t=3pS~)JiY%n^##gge(c#I)CTbP}db|GuaOMmDiu7Jf* z1}GB{>j9MXH4ULjdl9dV7^(6yfNz;Gthl%xIi7xc=*Q&RI29!YG!prhlQoI;t z&&$3i*8{BuC53W90rhda8f|UXN}w3GEcVfZIz@B828qHrB*IHgj)yvbm;=dp27+q|mtVYj!?QVG^#PKBQx1R~)yT{8sv@_{$0V zPDL=@JR6VY0A|CfD({e&qUSMIilapOm#UmO9oM93A!p9&&6uq_83PM?iM8RDk!8vH zXms|P{b~!mHk}G3bBpah)s4{tnWejQ^J=Rf+TaXhnquDbdt2CaR6tH(wwn9&>qLnH z;fXlsG)TMmm4!|5bXc!ucLxstZ`~7R3VITGfdcuEXL7!99BiZM+%hL=5_PW&~U^@L|3{;NxEucYng+Ddt%`LxcX=)_W%O zqw{EWA#XT^Dc@PI>0&QA38UlK=Le&1zJ8i1u%fju{i*o0qCMh#CiU^WS?~+ef5Hzg zVoNjMW~-e#2*8fvdgR;|UmI9wxh@;=)IGJUU&_*7n;9OzS-;c71-xkPpfhj&?@9iP z0YJfob{giOGpW}aX`vgV)6sPBy(NQeCkIO0iv(`ee}-yoC71RtGJhZe#d~zomA#9B zdSl>&O;YMJ7%EKK!xr!Tfm`{(Hnr&2HvJE^)<|^rs_U4$+W~R$6pFlYg3UQb8;A4f z|IThjDrUJw0Eoa+@Qcm=9EX3s?-ib}G$~Tw*ZZkD?K+XTI2Xuc4;fLwt)%=(P{NSV z{4>-f)*`3H4ovfi6vX220#u?@~`KA zghza!Nl<>CpW4p0It&+7O5c9z_D?~sGk_p>jj(RaRsK|9Rte1qKUTiAuhx&YUd z?88mF8CJ{y6u>7c9sX}_3j*|??OSmcDK2t!drdpGxGFzUI9K@A+U&5B+7Hw1rM%lc zC=D}>PMK)7UTJu8x7>f)a5xu(qcFMUsk_eBKHNx-vW4xizuYr$b2PCaqIKc?xgfFa0NH6?%Se7xc#iT9SnZap5t;+-(;ss{W=K{`9k*;^}$M zcn1B?d;7276nIB~&Me50*7|#>=Pn(1_PxAcp#0_K{?{*k_)vonW&0I=9b6@a4bT^G ztA6*VDQ9UDn#s6-Y3%6`w!0+Pftwbrg*FZMv zJtMW#A0tP&NY{$^@6*W@gYVcZ-N0&4q$SP=0;zPB$85aEx82jI&_wS?1{?s}RMVTu6NB6GZr`RR| ze6S8r_4uz(ZjI{z`lJWi-I0Z+TL5R1{?V`P16@JLIhvQIWtESM{?#7a{RI=;v*dsI z)cCR!qwvXG^r!ufoVkMbrI7U_XTJ)8hK6n|9lrDz8F*Z9RebTPpKzPC#d$CG5GFk4 z1Vd8N4;r3Ci!J{Zuj`2r3x;gE>+5tYyRMl*Ci@*t*P5PP#yW%y)6E~9q)c9CO4AMQ z?c5oc97QK0&x;i5{K#aZY5gLAoe>?v=9>@(LS`nTZ)Eo{=e2KI+%MBFsrl*?Uaqvlh4Y&r&#SS`RaKkqh-^ zb)OW3?}tx4=rUIIDPQ8@(LR}drEy)r9iRgsmfa1W&Eo@9@>{3RK=_C7!9o+oI!%0n zie-vq3|UC{2JPu~#DbjJ@?GA^ZS{oR-4;C6i2||d?KMRVi-z&Q7OuKzg*Ny#X&Wy6 z6J=ErPpA6seZ%d~W#+(<2%1Y>=W?DLEuA}|sG&J^g!Qwcnix`5*6TuXPpaFQlIUW$ z+Y9{ZN~cg-jgugUgAti(Lqx7A0$vX?zTu>TgtGA|y{}UP+nZnBeMAtN$)~6DINOST zQrz2uv|8M>v}qOiXKJm9IqZ!hm9$ic2l*zzw*LUqCTjGX#AestJv}HkAAMLB*|kQ( zekmpp@Z65i03j<{_|!Xg@geng{1O9~>jMB-eaqG^cE8-k!ELG8nuT$ zVz>`?f=2RK_0!J;*Din>D3H!z^;`I1urM6D2J8)E-m0Vhw&`LS5E)!de7qox0l2Fd zYHzO_v)BsvRS1oxGwAKTZiIEmeOvKDAZIanpPE01(CjJbQd=nbq%$Z0V~-41r&O7i z9OqV-VRmkMn|lkTo23Et*}zUoQv6Dw!&L^gOOAQlyX>KwgYYEc^7F z3^0x2&C6=J>FgL^Kh>Ub33C51<6v6Lh`jkY_f^qi^Ao*5v-4qFGb~c{xjE|q;4LAx zT}>Wev)~%Tm0IycG%mPS1U}po@OWgWa=8n@Am7d?0HLC|NSAv4y&psXrYKQ;C-uWf zVHz)e40+^MX2VO+#IzOuEoH$GaZ$m6_vOK2;#Y)(Zy4)jPgMm$#q25UjAa6wfUFQKYjPg>_5vDi`JymZX|P|8cZiG>B6+(_RwzfP_8x} z)(_QhbJ}m7Yw}d(OZ@_u^wv}X@b$`?1j%bK9ONqo%GVOct`yRLSlogF!4*uj=`FF{ z7%cFGwYYo}P{POJM@jZN-FV}Aw3M~sQ?CFZR|`@AD<(fXkBmu-Vf9h0twRhFO0{@g z&(iF{%sUuMH0}p*$Yi2J7OW2@!>LoKkwA_}MDBW{daddG;!UIT;Y8=f7g#SBTUZVE z1He6r=*s$4Ytp-glFNF_3pgVb=uLHm*8oq3ZFved`l1CMD>-dXNE3vfLe#EA#Q>%& z4-EPG_ccOiY0<*=TkWKr?`J+m_yX1|rd|B<7~q*K=}0l;G#U>)VTK4o7GWJ(z#$OJ z^tP($2R=AD8gP})tO))RK>YF&60ZFD92>9V=BIX`Pi>Rjx0&wd?C^C8n^ipEsCN^$ zbFkCky(qv@cmVrkjd(bTXdAFD*FL5dKjP5K*HQ;HQ2;?@^yR&oJ3%kOe4c`Sv1(CK3ZVsPi$5gb@ zo}00ZT-yFW!rnSAt|nRh27-qG!Cis{2%ZFY2=49{AV6@3!9BqP0fM``ySofdaCaHp z~O-BsVJqSpyBY34}?WW!D1v5CZc7kN0Ip^#RjrqQRe zT!fXb;XES4L!NQ;$CAVzsD@sefvq zf1`MNj+ZBi=Z|M=khOPa=}g@JFp@~lwEDXYD7d!y-O4iCq}bf|{IduT;WIos4kU~E zyk_QFD`KLOY|eUBxbs5M{7H2ar;PFT8!H9r$NXI9mz|EKR?7oGYV*1C&M#&IA(a8@ z%=g#FynlgM-sP`GrzcZ;e>cnnoUKNsx!)t|x+Rb|uOm6L+<2$AJ7FKG-GG->8xU0q zl#3Yv0Eqsoe*XP9AYyPfcN;@3W4>E5_vr?mLzOIBvNc0DoI8YF6siSauVNJ76M^#d zQyyKUnMqi>1Uj!{zW6G1+Tf)njo=s~-ly5>NF=b9!s~q}aT*e~Y<=rnuT?Aop79P z8+F6B#ilMNqyVkh&&yM|;@S}j0)i)CULfB5!9?Q{b5=1D==U<`@bzB)mHK}7FYg;$JcaHQb)PMB#NtOv z8w(N`R#}1w&O9E@0J!CrIP14mfMvlO3n)XLnRy(4**Z5XtEJ;wC;vBv_z)_rXJZ&l z3o~-YYGjV~^&j4iHVdRS@07dxq|@~wh0_o^B)_*8KFcVz7gzgC2)lXQBG%z493tGW z-5mEz_k%{XSY;xxy<@i@xKyS@OY;U(+h~tX!st#X?n77|y4V{iVaH=K=|^s1gwx5H zmpTPm%zDHhqS+Lth(A%oeje~ot%82^*o1Af3nlc9qd*LO7=^~ibP?!%k75@{Mqu7i zwL0!BfW~XXYECLfWdrYEwJa6q28DokA5JSQDi>Q~i3fi52yj-nBx4akb&yG>uRBI7 zxD3@wJ$VeoQ+!4ioG6Mm$ZfN}iZqofyLcQ4=-E5p%AKBCo~qai+Jhi-4WV-nk$~Qj z2%Fg-bpJpo@dLhiGmDO4jR8ZaIf{zaeG5j1prF9xyQ36Y9 z>JesJWy?*9FAh+X%lM_CZL6rlCi=ua6i$qwNMtV%->=Rrs3343NZt7579JGO=6^y= z1;hI)zU{Ksdhw^=gXd@h;?_3oyUa)+H>%G5rUmcGkF*hoXKVQO+I$(f|4mUyHUD)9 z6@~H9tNN?Z(~jHZ&Vw@q8VG#)!sZ9-@hr?(UWY%|<01q{6-2~c>d-}F;zKdVykX@B ziV0K+a%h}pv4V!6%;r~>l5;|#-_z#{Do2T;6NeNqeVPwW5uW2j`RceAlr{cT2wxsR z?)jM1tr-MF#P>FV`h}%?X@Mdq`jKio-f@SN5|{v51w&%_T(fHiNwirW}ra(^20 z<^B$)U#YK3;wAZukT)486X0J3dCQZ&cC3LQ94<0^^F0bT5+1E;%bYuPfUlK%UWeHz`02CQaKW zm-oZ)fVo_ET1j%tVs*-(C!I7)mr6EB!sA)K9nk+>V#hL~$Q?HrTBY`xbLNl*K@MFm)Isb+oQ zt;Z^P<~+PEmpn(DsKDUN324*Lfpkj#(R|D0`UfU0YP#(X;Fa>{|e-gU;T2hRR@bWRSdx&3YEA&)8jN2PSEb#Nt zebGXTNALBXN~(2X-r*^>q(x^JkexB<-KF#SezGhSQ!H6D1nRRBXsO<@?i4++GHW?4 zrAC}y1U_Q}BV&FDpD27TY}gf`;4DmgVmKQXu#?^d9?X@i!JiZ7WVWHhMMO&ice7kJ z=Sa~trnS%r4X;TIG1X!Bi_V^GW~SP`b8X`t(Q|6a^aQwzq+ys|gTgIhW4{6=lx>>s zR>ZU;H+Ew(F(=(;Ix6e*Z33L21aAzIe;f38xux9f?F_0G(#K|jhjwc!_{{G3m(1cq zp?4PWD!zSm07o~ve>PH_HH%=G1#mc;GO*YJtaL#cQ&hS;`V$ePPp!?|(2?c$>rjvO z%U2%sn=Mj^^+H(=@*#-Xk(r$eeF|D{x=E?%M6)Pk`c%Rfgx*|5-KoFRygKn}0Uxj} zB_z)#CoIk;uis7na$dT-85-h-@bp6J^Lwf*Hn*mS6Wu!IzEJ!=OGw&=EUc_;6(>e( zV(ZepH}KZ+cNco`?X`KN_IuB^-!E39humjofo9(q&EM}tLIr(w2X`#p823PRHms>w zX#yBXl=pk7A~XY?^4>65Y2Ht8qi=AL42oFLnJ^3qPWvr)Ii~rUm(bNN7({%Jb2Xq0=gfNfCh) zBvV1Tj)q`MsqI_cj`#^NYE((PJM711qK;N&8$oDkbX8ba)FZynDKY{vH80FX?RE{+Fmp>CSXu|_i&8_3~qyU zV(6_);SIHnPT$b2oOb1Y@9}KoR}QAd$C2PFZ06UsjAfX zSO{0As*ByPeNOG%HbcBz$Ze4L>7W7v%|NnDcp4QxY@LelkBQmiL3LsNKCaR<!K}ODew0V7e%@xU36+EM{LP5 zw7tg2*6>s1;tB5GN%yNc`G{^9ObddwNfs4krxP2?9GLTCn8X^o+Ty|v?8-{!cG~a1 zzd8G*QBI<;5=g?nIeFnYkhOcO>Fz9@*QQR7u!5p@7ACoOXTCsDy{A00T->MGN$vQ8 zIcO{YiC&~z~a~%fUZoP?s)eR+o@7~tiuFoUVh!u z!Mh4HJ$*MnOW>;Bxvqf|c<>qEVH}rg^2>^-(+zQ3W0A3%7PThdoo&2cuW>wBP-TW< ze;o3l)Ge*pw0!l2XBKTHT_6o<2vVtF;;ch2Me%hL&Bw`e)q(GT1zooE;H7U`(0+MlaOcV8&33574ZB}0UPZH3#rT~ zL&Z>A<S8LC>DYvx>V}c+9_~-r{_27+XF;}Dh_lX-u$V1d?h}CwZs-~)Yllfd zyGH6(CJpk;Patzrbi!DgYw}=Q9w7#U$X!mh?f#cKbGz-RQLS|#>Bn5zn&wt!D+y+5 zwVO`{1LxKz1M9`Vq^7^tBFu_B+Jw2(iDD+4ZaTXW42~JWnhsq;7p?YQT~i* z#x<5fO>iI=%EeTgFk>pO*%qEjl}lw$Dq~A_ex=)K-6miub;=s-)vKvM*?>H zLm<@e{RR41{mZ^Ht=<)Yx+_k9m=A}OWbI7Vy0k0m;4QIs2|ab2`lNW(^>#m4m`z#0 z@aj@~a%os!3Ow9hWES4})Lni*2`WNr=CuKbEgMJhjZHQLPat8bzqhGe622`m%aVoI zAuSNHUx}=om)J))M>1fg(Cph2KOvSGw(*xN(8IkI@JV|ufEZuG)RYd!Wxs5`TSir4 z);}g>b=zEce!)3mF5Yx zW@p9Rnng|hP$ZPl--6Y49cdQFx4IA4U1)Mxz8p07@dOwBcc&xsotxEaxBK01gCXo? zaf37Mu6J23M^Y`ey4Pc2Jejy*_Cl+A8~GEyKAXo0f zyUTle`|Wp;I0dZZ4)qa(2#M+2ebWq2;>*y@Fh`Xu2kV^W!~d!Ua46FY2n8TkvxR`-NBp$xQ6G zzOzxEP9tNF-CypyE+nu zg1B*TrPq{!NHRVA5bwodTExe?cDkftG15>>%u9ItMhD2-Y1^8w7J-kg$&EE?%d^{) z4+n_J8X#w{_tZjar$&K=cVwAs{q7TwB+j0%hh(pJD%+x7v;aGYxQb$T*{PCwtWvhHDF4sV~=N zK8cCVzdTKf%k1W8I879?zt7|Mdi0&YDCh0iBjfQcoV*2G_t{p&+6?364(nXFafJB^ zeVtSEY7USf3p&pe8I|DCwRBU*K@t>yi*d(ll;Z3p`d34CXFJ0;MqXyhKAa5?L5dv= zuxmMQ^hVNs%1{gnq(yz$Zvt^^?3Z%c?~b<{9{bpZ)RQ^wRas3(K1bvqfO#mlTT1gS zWI!k9KhEy&9lrur4g7kI|BfK_x8BXF=L3Z|gzOx?4EOPRn;E)whF+~?&;7^O*-EY? zi@dwKL1&>BC1NPQgMKgE3^*e+19Wo>WHq!xL7`mRQqNB+FdIV&6sp;crORqAPirJ2 zQ+$|iE$ubDJutzl69>EA6zpQScMH5d z$5ZFE3c6?8PuqAGr9$%b&S^a>%jK056A#^Bko8p1UR_#Lmbhc%5JI|txK(Hj#>JUb z^CR|;H3t_I8fYcdUXmBG%%EorNJa!FUq+~w{y`IzINe+Vvf~Wk5rh-e-ZvseeC_9R zrk#yK_tm=*Yq`E|9@RF81(AS`2uRH>cK@KSGf_SBH3-H`js3)=-t~Oz-jCNIvcK|- z0=Z-XTh%auDHev~Q=0n?#qt+&{BP-gNy}_5B;m0zNd4@{A&VX(+BbH(HZQN;7EzA0 zd->c$vscV{0_3l3;~@T^+@9%IDVzzFpY2>LR`*shI(rl9)SR7{SNE@~4j%~J4A4Eu zWfMOZqXuo6Yw3CD9DYCJ!3|!p7kuL^GRyvUap7Z2f(Ja!Tl5^5B0R(i+?3fI;8Yj6 z0P}VQoIo!YoeU4p!0UOCg|c&3!bfe0LqQZ?$};%l_WlRTH-rq}qXK|zo)7KMXF)?4 zdNIox&Vs2*>EO+G$CdUUZX1Y}ojxvMHoJTZAY5{lEzO;ik%%I2U0)2Or}GD} zY5WyUq$(H(RfNK0K9fu2tp3cfgO|dPFY91C1O>i8=pp9n zE1J1p9FTA{H}D`*Me)BF1P>{B!>qpZKDyp}DwEpbI<$HhP}Zp*Nc%red!FgF#>(0* z{Uyccq1X|=LvRScGMdjsX`KI0ii-8fh4ucO=3=5~kPJYp%zr%3(=QSzq-XMKN3@!y~LEL%hxpAV@A zF%N^`QgcOS8}+37K}ypJYRIrvQ1tLHx0|c|3jbVzzxnQ0A5m-TpMHh1l4Eygk|%RS zq-14!4D++uD1kpVI2Hf+#peZ$IFZ;grqaiVol2LYo!halNTvX*&Jy`HbOGt#cGCZv2l^0;nJ8VEYZ9EOf`OO%|e!+xv ziGKRx+my7r_3M4x4ufCVFIJD~*dqQQIdKyoi?Nx_g{beBm3=y_g&tfjA$>aAhlLw- zQKC3#3q`4MW(vx7IN;BEKeaKZ&l(*ce!jy$jpK`@?mUtxQTpbnz0c`pWbO2Mk~+6%%8wsa}E8|8}3|Ai7FpT>QK36!G-@ z^%NgxGT7=i=l3HADzjdzG zfH^4}afrqkj_1LaD`oG{Q-&Z{x!Sy5GDmrddPhZZOcHYavjdCm+gB>vE~~$k_qE6A z-78xGK@UJ6U&rL;@70%wP~*i4g?qGjVYZrJE<}KF%oRl@&R7pTEDrXC6q_1DAidf8QwxfFtG*{+ z6Pms|-`lBwlv$!ru*;1z&Eb!1qQQg}le3k|CgYGxc?0O#4wxu~T1w|jEb=5_ldwMw zq@PKDU{#U&DgPMd?u3gdX0ICY2(CfBmfh+EM|nv>~T+l=!#*{V$WH|EvPaaQ$=t>*SAf zfmz+>4|)Jr;twwjFf*8Eg9d%+d+hKxv-lsWJz}k_bUsdQw){a001~)X(7$CHq~bcQ zP&QK&Dk$KmMs|7AM)D%%tu68YGq?XKsWgFL;DSUZqAPCV_g}{Ob|nOu4v)eb*PrJC ztPA>U{ceHarpO`qzgv}m&8H0$*cUDtU5@{C&g5?*xkOIezy3?p{m0n*$;yn5$nO-S z*qIDdw_t%yUiZ8*3p0VHpZFQZP@y6)7v~OT}=KwQyATqp6bL3uUXr63MHFo`Elsdq)`30IsgA#t>^iXaR9ar zov0KO{71X^EX2sS&qfyO(~}igfNlqaa7h}Y=c)M?XgMV{-jvLRpC94=xByq%t5XjOyRY}GoZr;`k0pB+ zsjmi^BXcSbIQg%!!-%c)gb_xw8cRwFlWEDLVRqhk zQsKVMhtF+LH#l_#xl{Ypbj4rasEh>Eiu(Rsl*VeQi8fy`yP}kTvk*e5cBE}5S3Pj) zniQ-D1%X!1yT2X8$k(aOdGAsdJ|8Aee*RE3YrfcR$8{RYBj5V-{TF4iqe9z=c}o~$ zL5FkE8>3l*1b*kJ^W9}(WE?tlNdDncwHnUIlTIsG*E8F%^VZ!N5~sJ#5Y^qv7#p1p ziAG4AjCFgpp9dB_Ud+*H_0*x3Nuq0I(j`|g1xwU_Gv0{RWs)pk6D2wgr2+cyMWHPb zV_UQ&oB((I3sWrc31>GSyY;P>HTzcOYKYq+bMN`ZSR!cM9_UpkR~xi9cLwKi*zRu9 zL8CWV!!W;D674M6+2fdPkl7Foq;b~H!%x68Jp$-lG~oVJoS9D=&(aWft=E_M_oRQeQh{|V6REymm7B-unTk~|2Chdp zx!Kb%KCFE{-nexK%AB{iJOL%P%qJVvNUUdhMmz%7gshJcq#xs+S`>3+ZUmo8fdQQ; z<8Jpz${ziysVwKJOt4S7mRne9o;8UJIXbA7qq&brE&JIh_+-7VvsvYL%^G7nHBT)W zUkfpju$N~SqqUtOj;2-m#4cq7)@?NRJ9h^Va!$vh1&s`s18k073Lop-A!WFSN?87_ zsH1#U+L6Pahy+Tu)+Y%9c1yBNl~oqx;k$DJN{GjG&4S)t3rDR?Nq}03c5FnDR|Y-X z5T|ylXQy-nNW0Rmneko(4L=J(#US>BLF%I39?(&9Gs-DBB`SH|`Psu`w{gtk`WRqZ zTnOKN9+&Eo>9vs20Xd4M`h3gImMzsgv(c-UBYumrOMDusgbmNXx8`=zdCqyd9Pp(- zp4LzI>2#BRU_)4=+@YH8OCI$Hi$z@1)$&fk=$#Pu8`-Eg*@K``txAPaEn2yy1-rFt ziQOAJ{)ZNqatrqm9!JgAt1{WHV`zyO`X^%enUE3iRWuQ2h15K%++fdzSO0baGY3$V z>h!!aqL-_T2>@ick-YJjwg__%4y!*vRqUS867f3;aq-URsMKu5IT5G6K0ml~7>cX- zyvmBtZx(aA&}-#lvvsa9+qmsrAYIJhI%V3M^+psqq2JrpHnR3luPZ z$+OkB+W^zuA`~gzlz%knbYH2JV*U`X-C`%|d28E}#ED7O`3co7;e|CHP2CbG_ed?W zqyVyFBjvKZ!k5{l)4h3vJKqPc+>(0YgAC;Ag~qUMN_MXupWl9e$f%&xX>d@i6=bO2 zKjJnzq$e_QvFC@1*pyQ1pSNydiY-))jMLkF>t%f-wnTaxbueG4*Koc=nQ|NC$8aVN z*jF*YQa{>m>OPxb*jOlJdinTg{)#h06K!)iRjlH3KM_5e`eP~G%1{bL;znPB4wG2A zPRwn2pnnI?eWaKa=*KY;|HAi0g|77_SQ9NC!_?YIuj#Y)n>E+H$(qvg!8pc!!fn9S zQGoN&(E$x=pU)<|Pb~#h`|{OL3plkl0E-LPQMb0IT$PTjwKtq3PC9{2^73G|s4>); zfc>;rc`!etAiJuXxwBmp`*Ev3QL4hE+I5wu{8iED-JAIFWiLymfMl8MX!G6kIeOb# zRS|1wW#lx+IW0fkiP@q;H{_~9grxu%L65rfUF&%*Y7 z>|3H?9TIL5%5wyp*7zg&`lf?K3GE#%6`o9-)A&`3iTAb!s>2* z?`e_V9$IWj=IIqj)0yyWJ9)75yS;)xarztt3RpD(_iVA1O^JJE-d1%(j8NS!% z16hY2X+Cwc8(mZww-(0;S~r*Go7n0o4PoP6CcNwrNO|wRVs^`sLVf z#AHa1%>Lj*0S0Wq7InfES_d7RJD1CG8Do6KR_R`ItOFc6o%lqq+;*g>9-liZH)15s zv-Uh3uKpkZXiu{kS~W$tmXnHRMJVn8y*5?Euf(}OkAl0IMxYH`8 zoA7VD$#FDtjEi~XvF6e%3-t7nKrE)7yy+DN-IPp$dVNv~kK0E4;5TSFei}WGlVY-a ztRr%MkXPuehdS?C%jMFZ%0ZYa`p8+z9o^ugN6np%DT-W4E8@$LWz6eC?}0dguI$0T z?7zWFKV68sgZ4)Po$iVecZj{A#PgIRb6q2)^BfY}K|0WNQJw}hD47-=qFn&T=-<3g zLQFI5aTLJuaM_}V?fF8sf(u<$BV-Xk_s-|nxFB|hvs-?#W^OrOE4ara`(>+GxU1Sj@F3HV)_c{_j@x$X z3nI;@-nfLl50EiGIMUmzGAsoKv*#{U*1lzL^6&)6X9KGfH`;^ty604~=(0X#Lp4aX z9@}yEXSlI~jz`|!xd|`1D;-)VhueoYWgd@F-rESJTLI2}<6UbnMC8L%(4ImNX@QB( zJ_VhGJT26Bu8X6%fHTht+YW^~pC+x^zwHEPW#1YE*PUvqu-lz8Bi4LI zE*Q1%{Uy(7f($7-OU_k@KVF<9y>Rb5N9S@~i}BP;;&75Am?{s57la#!Qjx%YOP-7I z%bFui^nHnv%29rv25UYt>wS7$TR&zL7tTkw*Hxe#(IO#x(_VcS64%qJZ}_PbtI7)3 zKK_!%Y7D-_s;J{MtG}$}^cWYc)q5`m_qEQmfI?kG9C~U)e8bh*7P&Kr*pbDl?uuU9 z2vTr9OgEq8q$%HrayYUo%B|m3LcCLMxkbT<<6hEBqxx-GLoChe!>nw}iM@yBJedBq z!gBul5XViKmgJdk4yZav8TI)$(aow-v+YY>w~2C6xhgNz*NSD!ZEDTPklp>07slLr zM+hXIlb4PC8_=)Felp%ryzP|u#REIO-aS!EIQMyI^Y;!}`eiwkGff$z|7w- z&=H|xcy^^vkzi={XHM~X9%tM=O@R5*w-ZTVZ$l8Cloetz#l0+-a!A$gTO_|>F=V;3 zlMRg;Okv=!x@y1D{LaiEUfKj82?cc*01+_8Fm)k)frd;_(Tinhs4uL97qS_AkuQw7 zprM>%c9*pT`(nQ*ZmUod`n<)Zq?3)SNH8}pbpjkw7{5ww;5}SG2hT6$&oecr`g``% z1wotB&X_kd#r107K%|X$a4reUj_{%|Osv9c40`l_wpz+eEVSPp+61=UTUh(YbUYO5 zw_U<^R>Kj3F*1X-b*JU}kJb$sZF7ZvSS|+KSX(H>mp{WBpQZ_g9Ty(zF5{F3ft_>i zEA-Vw+cXKmI+fqK{D?fjht-z&@O<1}B9@_*WINDY#{5-zpkIQJHVQmx0j>7g;r{+j zx2ckY0aXu^9dN80E`lJDxLphG@?Sf8So?mf(c60GP%F^}zM@OYq}OurQ~K75kx>Et^fld&9^wuTDC4hw@5T-qo-JQzI^DzUu7ieM##~ zY*Z-{FK6WOy#9G^r-KW}`xO>m1CK@PGE38?#*vnkC4^Ov+mU&SbJIIo-1S4)L@}}? zNQY12=6r4(5=jDZ1(<<3U|E?9uzxqM~x;z>Gl;e3&)UvOV8Bd>q@1s z-Kx&a1+RDl2u$1k$5?5sm)J+(=uBMS@&sD6c(@*VraTlLNle@oA2kU2HhQE9L3vRd zj862Dm{WYmWt^H(VdGaP;T>l0sFJyPbfSVPAw|qSzl+8`-AYqafq<*pqc_#J%}Vm~ z#FZ4R92&5d4DgnnC5n=}hvNAhb5LKI9rZ30<1(HNUrcXY0T>wSRgOtZZp6-Rz^7NG zSNkRO9%Fr9DjQk_JKy++qr`oF-T{4HAczi#1~me&nDASg0k)?z^K%qK9NA}{3RA-N zJ;^^jSiYa2Y*S>QUm12#5CxlYTC=V$G)xWkQfSt#Auz7N)v7WWj3MU$5#MPQo|GQ4 z>R>kAd^=YUk}o%Utp>yz^@hc$RQ{VK4FMEeR*KZ?wrtdlUld8W@gf4Qers7bWg^6r zT-^j2+)#Y!&oJf!depS!EqA^>xVdU0*C7My_r^JJBeaD0Gf0kVj;}x@v6Ib^fWXLNVKty!8!*oR*qQ&946jA^8jR#CrSMZ53#BX{2~6 znqYigC@=G}VW~LAT}cW|)!_=wQJF18-YV3BtNITKJ}WGB9(g%7!?m@g+s_DqIxo1! zBHK;w!-=^)R&ZwT@{!J6(;OqZs;qd6?PDH~D1^d)p2dg_R;ZlT8eWhd25euGPdR=ZW()k639slv{Zze~-0s{(! zZVh~b9e5soWJ=|gwFLT8a0J4I)oCo4TGV_)W{0ufg9k=J%Mb(HZ^QDJGmSE}?Ssm- zHHPL~W@Jeu!9DVBI7auy*<1k>3AyZXPxdbXAOCw!nJMt8@KM#0aCcPR*Gs z2LgVlF>Yu-!bGF#e6Q9PAKh#^gricO;mr~t$s=#uT8Up;T8PNriwes#BLLhG4;^Pq zROSEZa!u244A$#?)$`B+x0ro(ILk!Q+p<3@$%A*czR#)0VM;F|XMq2mKHztXwjwr7 zdyL(7dBE#R&!uo8z8wBWJku_n4o9Q84ScsyH04_jr6|LIbESP#^X>3OH{Hj3pA+Hs z84v|#rDuHM+4w^Kv3*BfX_ju-7S~dXuHXZ3K~cj*KWaaquO~fO^o8J8v3@WM`t}sI zZwjyS8sjgOJiH&mqI!Gfsy6Dk*UfmiwpbOkC(2f)=kLM0;PrL6hJL3fA8^>{6$(0# zTF)1`IfkRK|~!2WQ8EM(H0X~BPHA6#=65zh!up5NaW7iy*Jp|5xDV!lMopHSV-HG93U?@A9FSmf%I zk=rI_?tCCB;C4vP@c8Ozhh-7N8Q%s4ToH_y%reupktD&hlQE1f;@QbawgPudVLxk4 z565C2gnBQK3K2$QpNx>JuBl$@j=13eSv*G0e;=Zk=64C@As8Xb7>_@y5Kn`+EY6k2TDxf05sdmj>lBtUNH+~>Fz7}rzB^! zr*^K}CiDWh7cdlFq-al*1|O94LrxIogA*eCNnY%zl-qXdJwrebofZdqA*|cIt|v9| zvVFFG2J_1}rRZ2rzx-`DzYyOzB+|e#0M&T!=O3N>BVyUNt{>eTModAy`T2C!$0R-a zOInj01pLFlm?}*lGlY!ZPm(vU&GP~6r@Fa&dIi1^fsRgYFCh=%eBB7jq9hpbYLCYc zEEw31vVcuW{P=YY4!L-~rV7l4`DHd3xrfyBzK9;?1fS$=4ECYEC!WYB5;V*XkJBR% zA5m-0s4>;HGyS>5j1-lyu#^UI@FM`Radl~xueIw1PnFs7ChKgVF-Y>`KhziS=wu+& z_QL!Q1x^91csYCm-Vd01Ew&9msAZ=h7f{?6l$^xpKldgqA{nJcz7Y9qvMs;5yCfOi zUODiybsJqrOhqe-xOydY%lkV^H#*7Qkf`eih})}{XCWhGWE(ie?||fP_PBYEFx`~k z?eWN6egce`-hNO1K8&uCa@tx7beJ@H4o3g1FxcS??TB>V5!AB^BrntiGF*>zkkzlo z{L1g)1zgILGXx9HC$r{ZjvSUF7D$K|WuYq9@Bvk?Vxo(L->(kxDopo~}*rIhh1CXx) z>8qcD)-9#$PKLzy$L*fdFAvwuZ48JD#8pBM6iyZ?0?|erE1ZIAjtCb^!KwBKzLr1N z16wZa865l2P&_#-dbD-Y$E@|4c+1KPS&*ga^)Ukx?4^hT3w|2 z4#c4rOp(AF2n*6+AiE}E8Wv_;XpqsIJ=zW#AAq>FS8F0HB>+fpKN233??zKi*MN~v ztDURw>vqYZioq1;A|b4+pwEz)(VK%8?vVMj(xK!^0ZpD+=~wrbFY*0U?XC|dOSE1j zc1bq7IV+s(SW5z~OE>%;GCV%FFGU^a>V(v*D0-)11Cm)Znja$^bMTUyN^wrq@1*!eL;P3|r34cesk=r@-zws;(VzJIG-)3k~rf z@wrbs!Z>oZfHJR!keUHx7~xX~o~?#xfFo|1h~U-CRAb!m%Rm0Dk--$q;zO#bv^A{u}3B2xif zF^(kpVEy{q?t!6z?klrrXIXmYM(l{kIiJGrA^=ofIB-0yA3n65W*5FkBQRM7|3VMH zupQ(TV)lB{IP2cc!sV#B2|dtq6C$naQ0k_%Xb+X}!r(D%L(YLId!oO5ARi`BzLB^4`Pfu$)J^LAlE zEOvdeK9GIEBuQNaHVWpT}H9t+z}w?E!mQ z)dv- z^Y1-tS$pxAnKDR8WH^)7iR(yFyLA4-V+4*7)5zY$nFrvgeJhk7xd|e?^i#dFrNVL= z{&f}h7`5G`Yo6=-W_xpqC!*AwuLPtKF_H72^bDF7RUS-eR4kz)`1&Sahnp=kqfj3& zAqSC$v}|EAP>#X8SH?ijTfkPVC-^+`17zfFtL7K#USX+N{Nwjc&mC=xPvQ?c>qoBe z#`~LFzLObO>PyeXO>Ux+A4uh)gETR%_UpG+EPhoqT_5CYMIrKPw2WwK`}VqDp_S9> z8zJI-7wj?oLFL$s;Afd$iLGHg2al03fw*qRecgwDoXUK?8)2eFemiDBV6gGi=j_2Ch!Yh(wc3`;I*bNg|3sj3iIiU-i94Xq``@bGeSPWq9;)vs7@|BS3;W4&$ z2B(Ygjzz=fpX0>RQVr4`VdOy+D^9+c=m=N)ved9NERv72Wdrk8%AovMH|Yll|JOP@et&0b3%A?9QKjHgEuW z*|oy(@cl6+>?4BC0nk1kOtgwQWpXr^UB!g88^85TG2ClUjv)7A-%poMq}+HnN&yn+ z#Htfp`z63JSHIl;Gk%3g=i{Z88tr959Fs&d6~^HZp9R z=;2a&_J;3D2#99r>f5UP$ErPt&i)DnkHAzmmP}Pv;PjEbf>(9WC8`3)efk=eM-fM< zv-N)5!DT9K7(tGw2H*k=6Kzoc^|k*SHDB>-TYZ&le3vBbHrPmr8|fh>*B4gEesT5* zqI_u(Q6r-o`kOiqtS5Es3F7l{`PgK0pP{X?9rVpdUMkl)`G>EHeUN5N2OmEW%f73H zg8tvTnnhnwe8kJwlNV5Up`4ny<1}!g+yjrj# zv!61kpl;rSSlw1A{NC()t6q_lE-lEKv3B1M8ZKH25KKQ6c>RxZM#A}hazT-|*~IV6{hMWG8Z$$4``YL#(NTe=O~gk4@YD;Cwb1d<{7J%XMm{o1mwS%ug?P+x zm+JM(6CKfSRFESg+OdDgw%u~YTIqHj`ta(24^cZ@py}Z|uDeaA|GFT8Q@B zm%VYAGrbfmWI_K*?jz5#VsXn(t&LH`q|TCC+PLau1$+69i<+iJba#Nz?8t=bgZmv= zY-4EY>=_(8Qd`4_1CRlm{*J$b4Sy$Tfi&cIBy}38nh7|fRHi4=e5ocIC1-q=?uPrE zH+PK>Zu3s|1lLs&`tx6wE7mLQCY?Oyid#yOKj*e?-n}@Tsr|T*vtH21d(_kKZaiP) z7fr20=W25M!fIw6CK1?4*Rs!x`M=infBt#TXv1*Zpxp@SJhWxqlH7JmXvG;itk<>D zZX~Bl#EOPd5o+9g(wjj0(t|}yHPZfLy`sf!Ra5)B5kq5v?iuk}_|A<6a#q~SUdH(u ziNy9ZpDf#jDvdSLHiCTs4W#u1ZYW*WT_67b=|DVL1pWGQ*Vv8e85bKjm7)OHvvIJN zr-~FWwMVzWo6odgoXSNnYy{ftY5ZttOmaSgtlY!)(Td)Rv8O0_MJZ-@diJaFi5Zdv z-kmCvbu+{^Ed&-m(?Nh1`3KF7a3`Uxd=&ZzZ6gblLnP}+Wbg;_@Sk!c(|$%s7)#%u z{a5Nk9$4s=dEnuHl=oZoPtW{FCT^m}U-9%m(IHvX(7=lM*x)=PSsbRRuDO4^f;Qxl zOkTRU_QIw(^zcI4)t_YnL6gD)L>A-)68qnyUx>EQff&dqxbd@@fBT#N9J@N4I*b0) z%KJZToWB;7tPA)M`Wh2+rP!Y)@@zQDkO3`Q&~Empm-bm;0>OYPChOAEH;MRf;?u_l zES-wq#D9@608HUN15>SSLnQxg@NIN$-=GWObN}%6|KlAYXN=_PI_o|7GuWHvO$w(P zK4-P;UaV^jSU5y&=*$1gOa0@n&X`jFhI@+ygNGF^2zW((*B#e1{-_?3y=-4gHjTrj)JF!&0LbvB8uE|jgKk!d zDJQCXw|7T84zRzC$Q*Q~w*JICNif=+rrJM4S@oVbpQ1_Ur=Y>4jaguXi2m3WpVlmK znp#n++4AT&{5+q=pDnvtk}KAKU1tGA<`{W_CKMhdz-JKydwDd=EVuwm=h(r z;25;vs%A@TTYIZCA^vjvi7_m3dS>E2=x;Ygo3l^YkE?-@;_Y+PU#O*fci|nMr2vk+ z|K;#kK$wrX0#t1oH&TbO(9;x#2_<-C%E<3-W3gYTQpo-tKUht5aO%c@~5&qB=bs z0k871u?v}VEKIX|OL4@86*rTtU*R9tyNv=GmG9s+rrNVkTG4H*daT7c$_#3|X8F;D zb)Q%~+_1$-QarD*ZU6VgB`{oc{Rn8UJiF(d-rtC6wNTO3Hl)}vd3ZLmC0iP`_PVLy zKkWJyG+^l0!m)P%kk%p8Mj(`DJ8{bejyLVW{4!aU;-L*a;n#-TDG6N|*cV2~PH8+4 z6Ah3jy>Jru2V295d%?7m^^_1-O52(TH;$$nuaf0=t&bHVK*A~QE0Uu1ne`j{ZBy{4 z{`d&$zMNND=LXNYd^+w;WY3us?!N)V;Wdyya&V_cr%KDTsCW1-02y}_asw0&X;>_= z0OM|?n8z+MqIJQ|lL zKHp>s3M0P)iCqui+@GA`}gSA;WDvE`ihn`Nb#!I(SbAJ^w+?XE`^ zXf>P4c&_dz6!(1ytmHnho`$|lsukG+>%7Bf&6;ujn$@CJVG7=IN|e_wv*;zk=XZ)e zxHgU37)Vz7_1@ww;4jW^B^6?6rIu^a)*7l=J;p1V@$ql@XbKkT$4!-qsxEw*Y&4(D zN_nsBMRi1%GOB}ce!1jHGLpnsm<%+D%59AxRsIx4Qa}Uu61!~Mem9!`GvNKa{RPq+ zcF*$cLXtjxJ-J* zb7$76_?ZM~*EDWASTf@-TATgzS{blA* zYDVpAITEw;(5AWE!YY>xjf$=NP1I6JL6tAu92t5QDPnm<_w%%Dn|Eda40alIRE~8G zZ$z>T@(8~5srh#{Ndb=iFE%MT)%1xD$o8cBV6O6mk(aB#R>?CG0AM2V`8}ur?jS}? zr)hbnc=j#O?I?=J)xL4Tlni`-T50|H*8l~w))AwIP>vUyQiB6X#`E6zTb5eD;)vN* zN%Oc|LdSNt8dsj+LfnAG;@u$NtZ~?EJ29fkny_C~S(WuPn>oRME~x?H+LC%$cfHS2 zfY$P)f1wk5bIhrZt7vkvRW!1#NfQMQir3@Wbw6 z&k>*f)?VZCjt+n=U5YM*9pOtblT#s7oOYk%YKT*QhV4H^gH^KSe^r@n?3$9IO?}-5 zxw=~-nW5wk!RIFk*Y}>T?&^q>ta!3HqIy=khG>>j>#~RuV(b~uC-&HgUaT8|LcY3X zMlCM^SLpW2LP~egLmMDWwU5?DTZ1;eNaZ3g_Lk6)AZiv3t(P8(73VtB8g^^HB|L~- z*7A?tAY5d}K!Qo+7vKye{$BDZ7`Rlj`*`f-pyhWhc3)(=TG1iPlb3yYRCHG-SBr&l zBc3XlM3F6W#xi-*ZUKpAD7VvLcnYMgfafX@2P261s71eHN53~4|Lw@A*YNHc7ksC| zHrW0m=mO28lK9egZ_;eB0gq6-!GTt_P^-u<@k*!w&hFRyd^r9#1wq@3y$|gy3IeeV zTGM4;B}~L*{gLjkJ@?O@igha0kLR%dWK zo%>_Gjc1zfAHF}Lc@FqDuR#^_G`1WET467pCg3pAcK{kA>*c_A9C{)PFCU&4spRM3 zZTk+8Csn%KHMAT zHU%`z{#Tuv>5ZK1Hn$V5Bmr!GJT{TS?E+zR>Ydt>-He=47&=43399iXQ|nWH-DT;I z&w|RIo|es(Hd8*V*Sjd`5{3&q^mZ?rVYYqR{&vfA|MtwzF`tkE~;%gni01lB3rZ(fdpFE+nr~&NgH}PzCdN8i-4}AEai4 zI6k`e;PLkY`<~JA*5ts}zCk)d-S(kf%?l+a%^$M?RgJ957XaGxXoFJj-NIA*48kCh zRL$M^pi8?(wwU|1g-9>4Z*@3%%ZB@98HnxgVrPbrPmIZ;B0^pWs_M>mv3)PPx>C@UzpGT+|WphK5PBaY*3aawV;b#x;r^yuP- zrreAqe|$?-rv;X#RP_ccZrS&@!Pax9Lgc-AABdXWC*sy@)}=8^fd_$FxHbPOi9*om z&s}$s3u3PVe-7v5tk#U*21ayLYW;zBPVK(R658n3?g5kB%a^sEFL~=?+I|)lXxU_I z-X?{PR+A$371E9rh>eVBf4S8r)|$H;J~k>@+{{Pum%R3;#vrV}2ctpFPfWJKMRPH% zXO=jC{c00fV7Am0*eF5#yx$_V43b#Q_4);m#_J1)^W6p3`s)m?hEbPhe5qXl-jo}~ z|8Uiix|&LN+NV0}jGF6P&q9yqjTvgjaw`+{rPFy3#v@pZl?A}-yvfIvz`LzFgqVIa z7=E&JXi!nYc-BTWiIAl{m*AqLnJgG`WJRIqK`;=Z=DAfl75MDYOP3jjf=)9k*$*!( z?>3*6R(_P!i_Eah&-_S^_$r<&8Wq>o%Wez4P40KvZ+6LaNzpP2*nwys%T)*jRIskW(wxtku)Q@eWFk_5&T+5AT5}Vh7%ISp2i`NtzZ@*F z{5$9Ri#6}d*hg(JYP`>P2=0wpy(?V`dC~XmrRB*QH&t40Cx-_yfpPVda-&On8S14V zB3$*Zv|H~o%X_RX;j0f!ML!d>7F95wB-EEbP;j~5dv~`rj(&~MG=CK>QsCsEB*`Cv>Zg{IBwUbE;s<3FP$8M-*+%*Y6!t25KMdhr zF2?vU#-}|#gIPv8Vh$q&x$DiTHBN!_20Q41=MSz3$OCq<-=cF!i;-T?-t|OQI;&BzECH)m`#K zl~xs->-q^G<1e$3Jwc}KCpx3H1%Cj zvP#Tj7JE%LN{*JS*5W!x$}L=Q;`;b~KlVWDT;$Q!y$0_aKnS|nmwX*?ge+|>_p)gf zR>~hp@}CtDeu$q}2k+7lY+uYq#C>oVcu}qT(S@Ds*IIUz;Oiz5eC^=Mg?&jAp;p!! z4ES}iMfqw446p4GA}@9qHJwDs`GC4h8+{93CFbd9-=fna?8Y zs=u`!wLob%r;W4qIdZhN;sAl;D2EPINFC~5j5VMHv<~O7W)h-=AhOVVaX01399Nm} z;d>t0R=ui)GVgEt7iZAuE9LhkkpQ;rNPgR?W@d~eI9V$eGe|4hbfMFBx*^ca%Lyc( z{sC2E%)K(P#I-U;xY^8zZq_G_#@Np@p>aKp{3cCcX7SBH-qphp>`m`s7oV{={ARiILWib`?wTTmO9t7Sk&0p!vpUzy#e+TqSVySAOuSV=k)z9B6 z3{!}<Tav)?)0jclS8 zjg0l*xnTu3Nd{f=d?jZSY{?QN=-F*!akFJ%Kcqty03{WQSFcaI`QMiPzMbxIc;&MO zLD@dIeAAf*aZ)Ji*&dDwLc8_g6t9TJt_dHIZn=a@(}!4B+Yl!L0FrCp1_>ben+Ubj zl%dkk1$>zPtDrT=-3I8(L%0+oSaaZ1sCP12bwbrw6&F|>1U~a?RrV`$(IKgieZa6v z?-bAHJAnUB?p&LlYT2H&{H`CYi^K#HG|R89nfJ4MvdjbHVcJF-O|QdT%o^Mcnx7q; zw54nHV+Ku+54k-y^WaBh6q4{$!4Z`9K~J45h)NN$9u?{xhq`T>0`j7yxazoz4>w{K z3Vij??%Zd>>NrGx)ouVgJIR3KCTwAHzA@#ui{AuqUFSSs0zJW&vo*k*%JcggE1xuw z-*IuGoftDCCpxf^)B}$e`yJEoWzJUkC3rfLUO}=T9FJrh@<3k>ny*}Ci+5ouxH~6F zD*<2pnoXae`6FHl`bJ1NAMsAyzDzdTmEEExd|ef6Vb^5ocl9DtYf%xx`nYwhGxcT! z+!lEB>KLwi8I!Z~F44Ex7elB98z#?1*0#@`6^re@GiK%+R)h&L2;QW3me)v-@Dgl3 z41w1io8EIL_DUG1!C}v~9R}mOPSUJboWGIj@bg@dAK+w@EJO``lcS(l!)w0Sf9exQ zwY=vIN}dPtZj^6A$o*Xt6SAXr(+g!{kEDKbdn+zd2L%Psdyz2)yYagDm2K|MrIOLa zEa4U!`in{~EwMRIu_LxfUx4g=UO+3T5U_qyy+*$FMoF5wZ}l*^Dql}@l6ke%e7m@ahm#EB+u$}OF^d-M{O^BZKXmTni+6# z=!@$>4FeD~$>;Nc-z#0$tN@^$A`@|IM0{XS!@tL(;`zEm5_-frF8SbVTL}BE%)$Yx zk(Vfv?fjzzFvyCt+yWku^#h7b^RI0%F6^Vpk1N1&1y@BBTWpL+oDRxtD-K`+NQK_j zVZpI8CDP=72(`4AXBT`YRKk~@{ zNCT?(sbCh+KI;qYC#|N5HNxRxEw}hJr2yqtcMHTyG$)Ux<7L*pGay&Mda9@~0R@P@ zJj2YYVeB)p?m*bD+OGmaq)1YQ0lgH5&*&(6?ktCEM-kUGgg7)wR$Vi)Z|P&M?t5&5 zzUAmRWG}WF{4a|na9il)eW9x*5rQ)#ttnD}@pGzX1STfj1!VwM;~=0u3y;^&8+*Xx z*g`Qj%4~^QXnH(umjdKQOIn@fkIACt1wj~{YjOSkq7EN?D4Q?rT=nj;=cLR`Y^Bc#{XTj7jWG`~apN#F z{-pWFdzN#t)Yz#ah0|q49VK4k!3E>8_Jt)OC!nvn*GE*{bR@BDkIHc)&NxV;VrKN zQkDZ>1dBg@`VN}5a^`-YIS!sA0}}^a^Dl&y!j-j=xT;Fz0?s==1%g^&vz49p;xZ!jUCOG&`J@?xX%gM4SM_ z-s9#2Dvbg1Z>>Ws#ht@8nb3FRS+)E@xOlnf{r#t21gRXNS7eZSa%ctrcP%`FI&KcXCFc3j#Je}Cvd7UkhZH`YgViswi zkflh9h+|mGIE@(sm=LE*k@0U&@uKvjPsGuS;gXsTL&A;MYK>jTaBMPSeq_DD%VmJ8&z{t)oXjtG;$Mv2gf6v-O9Ds$m>PR{K%Vx zvxCwdk; zd*qjfx4TCLYz_?#HFyvAD#ZcC;PKMB($a=L#Ve*%G>nJgCh}M!WND!m6tS6CdlLDa zgi9*3Yc67TvI9H%qW%E{4 z-YC&eEVpRi=tVVty$|j^W*~(`*|}V(hQrgs-P4*ZE)}-ks2}N38WGe_dc^o7;tl*V zfJ{BHc)AP(X0%PAOu}~|jP4qYHkq}LV8Y{W=w+K_oja03{4oV91B6sW@%Qn4zXYHP zGlsL=EsDPdIIX{^VdlJ)(r%j0{nljMJS_h=GDx6>hElsv3EX+cNHnBwbEwcD*BkaH z(!dxpyQb?soH^?z#bL|Y=i6*sY*d6Y+xc%n6zmJUWdq>;9tJn8H=fe8i_CBQp5JK1 z&g_e~B16A}$z=+p-%CTun-Y8IwVH2y)>U%6lJ$lgEyuQ{J%JQ7j4TU)cPQpZ$CR9p z38u?v`IsR~asC$9-0b92p%RBzl2w3|M>5^cRXRcZ#+Yro&%~WlL1xlhSniQ>_q zvr{Ka$y7kJB~+b0v_FkHIsO2#3Z>BTU)MHRO2Fa%0feDcEdWe(s~i9Z$!k5$f5NK% zZbBKWA;s*10N=Vx>6vmK@Zt8zLWRwIz1KQD^_Jn^xEdL(no&`k$Ojw-#*U6w31NNR!q)LF|e@F4Be|k(o znp<0@)&6bxLRl0r-$b6l_{l#^)?aY^73H|JS*ZqBE6PY!8s3+$Le--rx7SwbaBi*#3BqmLl_yo=oMGfwtDD aV@=MYj=5B#ST+^#G0?kpwM@q${C@yek+qos literal 54215 zcmd?RcT|&0`!x#zmC`x+wd7~JMO zbm|Zr8yly#)(s;zHg*6T+ivay`&lJBrpA)2Uwa)i^)=bp%3=<0+3sV#KM2+`_h4h= zYW(rD>-Ytor)+G!j@mag@A=xyZ4=y%4U8}u3yd}HqHT5;$HmB8uL@{om0Njkm2}{8 zLUr)Ng&PNM9!foAd^a}Ju~Y1{Mzi~Fk>=6I0z4g`f?b}6e0zFuFZ)B$lR^!bKJIO~ zdg z*uTE}9?CT8zkYY&U*Bi%zJUF^`mvig-yHym|EuHg|_Ryrr+8kYav_r5LO(J4#In8(apV%fd6KeFRJHMt8+)7du?JHdDAR`&UyTcO@nvEc-k+1^

SP44!7tdrj=x`3jPxms(Gw&s=WT&N`4 zBN<-3uL!a2gZJ0bK@|!im@E z^CbK_PN6CfxG<7UN`0oxHiHSCazHm_d{hG@Sn8qa99X|kd_e!X zuUBx1PiiRY1J-$l>-zO|wo-slXp_o_<7X$~5cv!BY4dNPCazQ@t*H*|F=3C{8id^4 zytSALrsZ(Cgfk|_x>+`ga9*Zt;m?b44?2c6Gryj>&Apv$Rn2(zuQfjFT5$6ZP_TqN z0g%sJOiWmYhK1+8LtR74&$*-X@S{_&I92iEohtP9OgCg&NV4m;`NHfpQCSkEf3n8J zX~!d`ugE9QtEm&yQYRlX0CI`NGHZ-4dV(!_PEYTTZ_=lG)y1hXpCo&bRk|ez3!K3B zcQV1WVpna{-D;gm&76h2ejeo!)mc5Nt0sl3cULC=dg_S;`&zLthiB68QgZbijhzz# zAf%@l_GQKhCfzZ)jR(}c80u(Q6#SijW<#oJO`nTpp>tkaT3Z`v31Uw@cl(ZRjLWOp z{#RR@F-xyR{ z&4;v|qiqE%RbST#Ntj#o%t}aUnfVr?jm;c+0m~={R&#A{1yCxbhBujIy?P~>rnPVw zQiV{$)y^q5A5`OBXT9UMicU1%Mw`sE_;LR4#$E;BY%!zPxz#}X9XTGE22?R>QpVg@hVPlIOtdu4q^ zd_DpisY09wAX}{gulh)R=x6p#8j}lm;rk|$?|sH-;BXURjKdxsu^G?ifQ1^r>d%h5 zPTlbj-T_wt)sTp5x&ZApGPe#jS4H7Yl+pP{eV#8Q;dmp#hScm%?~++gK~hL}j^%Ki z?U2)+T5Pqj%J^*|ZK}r0$!9xsfiT$NuTrkJMyC*G@*M>d(H`D4FR{2uRWikeinHx&!n`2Erjvq7}V*IZs!s=HYXzX;Gm~jPs*t!+0 zx`_=mnXCNprnH)Dj|`2z72+>9kn8j~xLpPsaCOM}GdWaA;6H#B&v-gVTpw4r|uoG)e^C9v@(aUvmUlFAEZAd3 z^;}{-^6YHS64OnpE6*_#)$HrYX{YgTgloj6ca{zL?l9NGVAaEqL&j}3M$aLx$j;|C)0zK9WLQL%Z)4}z z00`vAm_P7u>O4a5LipGEP1_97qUM#osofAW!vJcjOMi?mQl~RyA!)cI!;y5S$92-& zbQ94BSV;1(#?K&fz#gU7*NaVJbnD`OGZI{cn9R9i6 zjd>x3p{6ldpqWI7$1ihxy(A z23lNQ5(E%5gVYn>*!6X{@tu#=g(IPXNJ=3oV(~5hac5Od*O!jy1V}VkgZ{k!XZm| zDtCqG>99KU^_+#vuM62)#WOT#C1qhx^UC37RzCsJ|jiX&MLE;jz z&rpbuFK%?y)^F+Cvkdv)wI-sw&U%UFSG4Kv?)m~Y7R%YQXU|HDaLAzfo?n%&F3d<8 zImD+TNA&nX>VeadTEL z9?bKrsNvl$)WKixP9Hr0`2FxVc8`kw`sk_FY2w~r@BTRW0`}_-+u3LTXz{AZAOGmd ztG$i-zbnRF{g0cSJr0^3pe!svfx#PL@6MWS4&qxb{i{i~g)sb`3Lr}$cA|E|%{!Fo2@n@4-KkVq@X%&oxSQ})y0GJ`mW4VIk zZ!9}j&Jni|+U>7lIRy2NX3HZW19hkdC_N5})-lJ}m-vT0sNn`=;e^Fza{@czD-?K^ z&hm5;WTx3z1qAxXpuOmQW^e8uR?=So;Yt&`5La8Bn#Q0wuUKQJ;|&d>Kzss!q}}{F z7P7){+F0!+(N2K4G71t6HA)h6_mtq@53GEDm=U<{n46y3)R6UiW}F`EmJ-uX$q&`5 zuFBh~2P_pa88&i(WoL=^Y*sU4n@a#HDu^PBQ!=Whk6ciG6GZ|D&z6DKubC-=g|(!Jpf;wSSrwS|CVimjDCpX4oBPe^U8siPORaeyH>&C!0eSu-PdLz7qg0`p-~#dD^2<-_sNa}=Bc$JURlAS>>zc8?h@6t z!RRkLvCy@w_616L%d(tN>anG|IxDfPVEkEs&MbOnBDy5qb%~M&$#*u?o8bh68@T8l zQeH!@)dsgJcgp`>BA3XXJ{8K;O$UaZiil?QU>^c-5a98otFaT#oAWluC{4LV|0;cR z^kBYtK6IQr8gR3COa0-_{9draU_kNC_6FRD zvOjtcqTJmpw{k~UiUJ#u?XwZf3PM^I7y6~0b{p4rf22wx8S%gRj7P@coC^bG;IEpj1VHI;*^$~3TCr*h}z|FBkJr=ZE& z`rYS%K@nodTcyJQV9Vn%N6D`%clJ-d;gU2|v3<`4kT4XnfXw!Xw*c;G&4;-=)bR&H z)m3g$8QSw(+sc)z;V^%7{umKtC-oyg1Tp<=+RIvjC;+xJut}a?NKwmbA3hf{csq?X zdrE?pfX*#&h&=&r!}r1T;JwECVL8rY>aEw1fsWVHNc$9m00B?U1@jv_NSC&;dGwW;t%E^8KDOY{gcyv+a|;W6Cwh40mVzA(NZ? zp1o;Pj{C1VNTD}(D-KhfkCPzAt@Pl^WVHj+5 z@TVBG6kV@MABc3PJq@(Szx&Uc*r*z}ZtnHFo<~4j zS+TPZNmBE#C>H&$HLS`tv?8pTeR)m*$f=|ycX7&G$p(0is&g@S^j%0Dm)O%n)vq!D zm{)Z^2&XSbECSJAL~CbeBCbfC3->(yX+qkuilb4V1StcCM~s{{(paq618Gs$dkY^J zUkg&-gU$&m&SVbF>L@8aU_aUApZ6ki`BD!h4Y{!5T=O1zeJl7~-sju}=z19m*yWvi z7nKUinEK=Q-k0Krwj4$P2L^9{WI0^;qRqGit>HG?;qcu*9!5ixcp*jGbZ^tmZq{ zGIOk2wywpG*z@2)Ag(hg^TC>CWMW7fsw8*x@TH6sFgyW}=c(#U>Kn55CkMQ5Fx1+l3t9$PriUvW3a3^J}ju*|7N-HIOJi{q8KH8E0VVGj3B2mIwxnkUz&DXSSDwD+dD`A@X09XG0D zJbyDg2U>Y%HDoPbHNP30F#`6uAw9pUNB6fXz5;@lTl4A_cJpG*CfXOf)YLQBy0AWZ ztu&wy&LL|aYLFp2qJBCnXfw6XI?!cz*TA0S9FS{OI-h*PMFZy+L$z-D!2%v=(d zH{CQY0&MIzB975u=y@{>fP-B=&XL5gsT3+4GOd+c@zY)68nP?5M%BT+I^7CTw*ZhP`9vWdoKTaak}=n?9Jb(<-~IkHh+&E=^E{G zWU_tAPJM7uIMe?#|Ei>c-!YIViPt!}29Vdf6sA^Qc27-uND|-4o8xL0=Caim(W_To zdAqCqt4@GnSGk4K2*2F;8#rL%$(lXSE9LI`v+wivJus@SlnG9jTf>(ZfAX7;QVD?) zWYX`lwzW(Ao}%Hfv)z+MB>0LW>3r|&6-v}~qm3FYc_J+e^39CD(mc<3xRh~g{|lHX z2XVR*xf%v*_aI^CXO(MmfZx69)AJ>g9||f2TSz)n4cdiA?krC2k~ioi^lpCbu#|Ct z*aZS{1NNi=p@`eC<~soyMN+0Tb8AWd`g#$Be1v1*Xx{KcK*->e#%{w%jf8e#u2zG! zMPD|4q(L5)pre#en}SchD6fmQpDz11@Vh?y3I-E}uDNu_ik=bNPh!Z5#nj z#Z5ew#j?Dq6c+(K%(4kkCCz(SpzIY*iFlUXnH_ne5_DD)-5=rr1CI zPuxa63ZPR!76a{F>w1>1Q_bVXo9fWd<;&r)Jc5;8%zsw?zYp^MpJUA%qQp5CI2JR^ z9K3z?3gyv#;onfT)#foPBt11y0u=jp<1yutnQ+tQZ_s&l{VWg2Y17NI5I{nR-Mood zEz>Ri1@kX?#2xwv%%5PTPyTk(Y;6AnqW>R_2mX)VY)XVEa7y6cK_iRQZFl?%;dym( zyZJ{-?9W&0?H91CpMGQHr-3Y=aDx?4{HkE~|CI_gmjbU9L`vioJ&v*Rvkez-a+NpA zdkNUKXJ=FTNBFjKeAHeVHJVqj(kwAH1$zNYIs7{$JPl+m?&o5xouoK%3MP~BaRf~3 zd}ZZaoU~%(fcD~FZ%rLZId%Dh19HZeb7g$=l)2fv9cv3I4e#x@d1f(ds%zuq)k=T$ zaR}pns7obHyaW_S$n1aT%Q*5Yw)`;!sfR(RnnLZgx;so@h}Hs@wRRWk3t=*?Rngat z-e2#K>GNC$S6ovFCY*H`Uf0UaG+TKj=lW1xxlc8?Nb}-apm(MhbdTRZ!q*9j#!f3R zi!3&q{45iY{s-+-=oL~=U82OAazjy^+&K_xNQ7i>HMU#@*l8j4oYRHpAzSLTl8{XL7X)}1AG>@!4(%brd4E(l5P)1uiJqBh2Sl#GG%o+^cKd6RPvFPoY= z`&!I$#doaS@G0VP!vFJnASz!t3xdw58f8J*#qCn_ZH!S`NmMHpaF!I1aYq&d?nM%< zLaw53x7dy4TdoJExBkzexCTMxb7X-nT+yRb^V^$bSnfLfFa)vv)T5-Ms7XJrE~@}v zzorJ@ZcSb1fmGJrQt-Fob<_Cm7h+2e06fl|CME(xT9-?_E>OI61FlD#&i2EOmxG#w z9ET~D6RIQ0)+q6#33W&{BlD?_0Me$#Mw#=cH=4o!U=B83gIO!K{P&zgIAq z9Gz+?lQ&xG!Whh>K;bX>;?EV%RTdzD6`{-CXwzgx1=Fhbr-A~}H!b!RD-N z0WV`_{>IPseFA$^5VQ%Ds5!)}G*aJKQE|lbRP@T!-sFw;qr0KopO9~Vxu>_;1>YZ> z6*wdUUw{h%B9)1O!P*xOW!Onbnlf;_;O4&al2T0 zbY~^E;Y=cyOie}^5|g3F4Wy(3&D0eK-QYiChK7fCnRM(j*qF(1{mWOKz#a&L>ArS; z9AKS+xG27v06f5_~@X?l{kV^@q?Pc(kT@syzrL=U43V0cdwBSS$5krwf$zYSyUl=H|)&4F&w zJNe%HrN(jX&t)S|PZ+1pzgMgRAykgilmTr!eTo35m)o+q!5%Ga|gdL2U#t8iSX8jM>- zuGVQFz3bwi;WAybEw@}9lQ7-$pQPoP*{;DCtrZo%%D|0~`uekK*RhNn^}uiyyMa?a zYS1>LDm9gklW17w=<&GpzDSbxB zI>CGPOBV)}^=u1(NA_U4=e&M{Sk9Z7?k|JTsRgXneYdQ+V#Repv%;V3P}((Ix><`HCk0U$E{4)HbB#`h*R3qT zDB~}}ZRo3%$TS+CH^nI(y1R?g+`WHP^pDFPEUr`f5SAn@YBHMTr8T2>>Lf_LUYbBQ z)T_V8>GS;M72YQHt~TXvkFBa}7o`KPy~x>s_c07%Du!6LPR2_4C zH=A+$?eP9ON7AT2{~*QdB);mLFDxMDFnNWmvVGR~0@MmOkokVW8Q{Ee6p(*ioe!H<}2J|Ah!|2)}vpvN^? zb@cwAca;gMf5@#j43?0bb5`9Y=}N$Ql^J>HMs;oHK)Te46QJ2gBA&1rdYTW=VXdwk zoKYnZ4wGZl9vtfaxx`wBS+OJR(%s}8y4B&sm!@S0`+j(>R2;ZB;BG+MaA*%+IN?6i z%W=u-9uQpuu(R;^s6rVFupdnd@g2>=*%fl_M;7T=t;$?THXiTlBSqoHduJzAyb!n1 zB~@+fNf86q<(QV#L0+3pYQhP@QzB6^lJPxGf;#uw#6}t}P(j)i=3>lBG~W8_2`q=a z(WZl8Y}r`$YO0?@bQFQX3NxiSP}8pj zTt}88eLqZbzk;^Kk3Dr|cD3I)9Z1F8NX~QGp1K*zUdw?j6W*9&}$TQ*Pc9bKTHHMgL3?L1F2>m1r*9bp`>I z-x+qQ&p>O4l#SXruDw8o@poyv%qD$megtuXWtf_L_FEepL4~91dvf`My-u;}kPrG(t};uEEU%IqQ80L1t8a>-E$`KpBi*ip`pg37Es1v?r)I>2 z2qRP$%})T?%CC8o%9@fIq^+={1FuDEs6+cN%LfHAKGE?4!ouo~*OGEp*jLX4GQ{1p zZURr5kSIgzk9sVizEM5BT`4RC5ZkU1q-Wr&rMFmI=!Ow<<=-A&o`-%^U#{~WwU{1s z|Bf+VTj2G%?~8~#H}$?1UwN>lCbYS8=+G{?WBm}h6z2C2sF@;$9+}=-?2)#1D5x`*PIXat zGg_DXX>I#Y1PEUFTAo&v z3h5OLZx`(5=H3||(?A9?8kLZuG69XNjV9&P$1$r>>>+uMZ=a3ShQU7eI?#sF@ygSZ z#E!}7zmiw>*Q?X}SwUCG08*%rMLkp> zV(t9{PR57pV&}2GnaEn(jvMvq!)DT1TYi3HyX#7bhgUzoHNKYCAbnf&*03yff+U?9 z21{K$$5b%1>?PionMbxO5IL+$1cZcpSL*xYAHo>}^IkAj`XMV83fj|{oqvX+YdO9xD$^mfeXb;DjgM2{rldNo!eTl-47OwI zD3P%JGjsG*>+PiI)5@tU#6Fj&#;fq`$Z_W}gzR;erfWy~VAZ$7LnD#XjyTSn`z?a# z*ZX+a^?4)fvT0J>fT}m^ccq(NULjn(P^-iLWFNvTIy&afcOZ^m!Oybgcp&j;eRuvS z^_q#vM+iai17JpP4j#q{z*>JC-pCyit2hjR`-SAForp&dWJH3EVXw8x8=~Udir=k|>nf8(2WtMq+JrkxJ`1XR+g42kqa6Bg2(vI5x zY2Qkts{wEJ4%j2LNIme*#j%f%dbZD9mwxPnKR?Czx+4@3)$$zYSwFM5UOH2{F#P3E z?bR&xOFI06oub4Jr;J*{c|Sh5Ww3;&a8z5|L;A*|&gY@BZzc!i>}5{$Yp*KS)ptQ(PrbgEq*m!dzX8+(sV&7pI>772WS5>k%= zZD*-2dG`X4#Xna9ROz;OdK8DUkc+a%f8d0ZbdKtq>BX3kfPWpgG$+yfuCGcDRi@6s1;yfVMj zFZ$m8NgCszfmge!&n6!$Z*u=^jz#%6S}cbJQ_py9rudiCG#xUn`o4}>44Qd)|L}3p z=18@+hJT9D_!z>LRQ#0XFdkK$G;{xkB_e|r}!=^K1_B#`Yf0KqJ$=cP|cj1h8Mn9_Szy)G?VJ(t%yg?$C8T?`?!ldU;ew7;LUobOUYPQw+9!L^ z`wjyV8vCCo8Cf9cwoxI3pujtudMBX^ik_>2tDeF|{mThbU6B3!Hfwp}%aa`oQRmw(1_T@vQOt&7d-Ll>=*_k4dJ8quc) z4hgW#E^sYhSW=>(h@za%W}8#vdRZifdZAaBEh-h$&Hr;yI+{z&ieYKaAc?P@A9DfU zI_z|fSc2!*_d6Y{E8FPD^`s4{HQ&I-d{36GBz-JRb8BL^zsKLp3F_2`w2N|=IeTTY zwHoiksXn_J<(U%u_*tXA)cB80Pp?t`d}L70=LNx4GW<&ZA+umHk_R7Xb6x-4AtrIc z@fj30eok&@U?toy4WMN-$x=|%k)3P%Ha~}pSsZf^1mzJ!FW(Jw;zu*rJ9VXcoq(l_ z2)h;SM={&(&&r>U=9yP3hdh~CIL5XMfZBE<@If;YM`|8C%{Br zS(QnG=;rqH;mQQ()WAE_^GLcF5vE^C2;Fr zXm3?(-}suR|0XNdm-FEjKAr60;iRR!o`e%y6qAl-g_1ftbgxY`9|tGkI1qQy<7-gb z;h|Z1%#Wy61B1w7fl`fKavHaq%Gz=LJfG7sL(UR&Wt{=N^1Zi zNAsXxmHn!9U$XufReueeNj9#d;Jq2F@GE_h{^Am4{cf5oE1ER-EHYcjyhr#LG~al< z+}~#fGPUqMC6igTWt%y97-cH5@Jxv)&Ivrxc9du2gWY}e^S2i`+VZ6$HfpR>?S(;3 zm&wSBTgGui@b?)b^Qo1!JlG!eBShem7$)d26vyuDG|?KB6L3xX35s$OMHI;m@gQRs zE(aKFs@iTi>W+UM6mKW8u7)xMpXIEu^hY`H zf0j4?EI0h0mx)(MBRroU{lauW?U<`>R7{cZ0G$LN&RK)p~~$T4o* z=P&M|kng}<;~$=$N-FRqNEjl;e)smWH7cKzi|a>wFTftfkzl!SdHU#-a0~ln)7Y~c zEJ-6=<>e%?f!09bWu4jhOSm%;$y)dGJKG_tPk9;$F@IlZI65_To3+xntVD%kaQXi7 zE~HYcAgJ>8HP)$%pQG?JFirr}{7`1D{?m)pwte5`Z=+HR(HdthFSFT%P%o`$ zpdgfa!K0a31-h{VPBxTcETcb_hZHoEQo+*^aw%u0l9?}3xHzj(;JPbgG$u_SJj7T_g{+Hw14{YVQxhAG}Ap$ z)p5sZ_;yULj@k%B+bPH%QkKx4Z+Z#_mG<9+haJJ>!R1uwspBM3{o?RP- zbgw?NatTkV&8(MtQOMlEc)20g>4`lTFA)P}&sz3T`$v4mo2FmtNf=Z}%Vf4?WZ_qj zJ7L#6dtKgJ;jO}9WtRFzt8hCO6@5wxQepCXB%j4tg8Z*T9^*0P+i5|Hx^;G`cqd@; zd_`u90H31Rabc>Lo=Y_mK_^m%my3@H@|VY~bpK$pOUKbciIO<_$HYwzqLE10>`Cx+ zYVV`d-%kF7hp!F+h@PSPLBLMtWXMJc%oZ(SK_|1Vu#egsv?DLfxi8{QCTJ9EK!FP_ zM@Oep(r&RQj(|J)n^Qx$+r$M$hL?Gm04_hld4M^_qsdvId*R{GQK z#0pgd-Rtmi+CU<<)^#y{$}`T=&%%WmBewbX0xDc0QzIiY-f` zaig7k$|ti7{%~%85$1#yR_tc1I&>@4FtyZ8#@ET!F(#=4L+^nt<}!<@WFMh6pqF#o z?OAdv$dF#ME9of6Ky}$mSLO2hy_YR6&ZgE2&IMknApXgMDFqhNwO*7;cn1wmO*i6Z zx90J+c_g=$2)^<Kqhfc(Pi@cViN0YUI|`Qy*3Y29KY&93_}-?aUOHi04Kh3EW3&la8&={F zU>KkY#;ytUf%f9A%PRKJ(WxqBIYS`ADQTA)$VpGxn^EoMT<>T4y3h4U0;XjPH%Exs z^tGyJ^w_G@2Hs+bo0yJHT`A#Lv6kMqIH28v@{j#^Iri*4+Pfu-yc1o?;R&)08P(?bXQg&7~W?xP& zO`W-wG>HP>(q?ST%+7k=!}(l~lEITcakVXs$K(6?$Z68meB43pO|(LfO_mvKn1VgEB0r_Q|*k+QNjf? z`RY-7qXSQNN3o<+#%kr%Aj7M2Lo%J8agQl=HgdU_iK2z9R7<~LopjK6$=p>l$mtE8 z;cBUb^B{1-mHIY*uky+VQCG-jw6D8{qK`*3xS+uvMp1N0>WrPdO!w-0u>t=^xLMCY z`K?1_)y#+s6Dg9myAOKZ?g_3D8FGmykG1;#aklwJ}D=DHLC>SL$qylo!-8H|@xWFdcx|oYDs2J{Zf7 zM0W=~CMP5J<=9LmWR8oER28aw*CkA$P3pW31PSZW&{KmDdUe6P!}aKArFOcEx&w;M z9ea^sg#=|&gkj4gxyF-!b$4$7 zdQXjJFwNPS31eupobH>p8h?31`c4ukOga>R3OGwj_Hyq0gc|L=zP{FM=l1z&;PBYN zOZb)gN~!w}M?v;|Qm*Lv)I1|(@8ByJ@vjK8j7xHMey#wt+f9_CNY%*-TSAZP^y3Fc zk5|fiFD}1tQn+73%W#ycS?o_qIT4kYYju{5#%k#P;#<(J&VyTIBkH2NY z!0h9;7WUk3&*SOJ>WaiStf{ziBRR)O_=;~{4aTrK-X%^QD{1UBEI3<6673?Im@(uq8?ZEtyo(4U;J?H|-vjv2LA zSlsiOj$U{K9E5pt6U60zytzjjeQUB8F)vhH|mJES4Jz6*8Sr~UG;^2O~5;>m9xjsv-bQB)SD>%)(qP9 z6|*(PY8HvX-?S3ppIIT)Z8x^_%4Gwh6@2Q>c+Y;HuJRBgjkDaIIHRB}9l^Q=sD5M@;7w}4K=DC?d;iR|ad1U}Pc zEi_)8s{Wy+&`vldw(Qy6Ax4xxCtbc5bZ+gwe#q=B4{fn*q-8mTn|w0P1Ix2Kt!G9P z25pA&^v>$}@D%KjsvemeRcBuBnM2<9@LMSPu1Q++%}x+VM}H(35iDeV&9|h!9c@a7 zLhGO~Z}arihTYDr%otA^gMBj8r+u>Tg|!$Er=P)%SP4@mMM;cP?=F#Xxp)l!OG3_S zJ;B{yZJhee3;#De&T4-=P5>jE#9ViaUELS-Ka0;YINztKotsKiG-!U((d3_ zrvyzp+9`t3R!|gmt4n8Sa_f(-FS{niq()BT9na@CO=Qn2`Ojy&XcSgANz??C>s<8q zQ6}{aXI~O3*i@9d!qf61{wor^DkvG>ST+6J@`QT5$nkyN>S-4A#vHo^6;uz=u;-2L z?5P0(VTIU^nV@ev!Pa)HgNmf`+#ukSww9ENtc%kA*I1a?5#bzHe9*yytXMr+1h<3R z?`D3HcE~UtS{By)>1Eiias<_8 z-n$?j?(YMMRdqYz6)xj8T&$w0erUq<*17yUB{lTuG*FuBMt6p)oGRk?TDenkyHg2F(B>eWf_YNn@T36S0)q2)V$ z#$5`BhHdh31=rE%@fmr?^DDZvsSg!tD`BwA2Sa9{Co1w26llK`Cd$?M2AOq8*#fj_ z8KG>whiWTYWQ>Dd4svVGd8Ek>BSjVFl-`#r6uZ~qVmb4`J3-QfQ6%x<=8Q*Gw`oDp zlzDJP$B64he>AmAk??vT>3~Ruukha5Vg+lKY)1-1G!f6=Dajlk>@m>fJ<>Uzr_faB{ zW+)>k|C-v6H-8csaTMtKt%ShRcWuH&6majnjmKDeVqJo##tpacskM@M0Ub{%325pY zOvoMe9-1sp=#Lp+bguqoybMF#286NoYCHop%NQwE^E#bU$q@lE&sb10exCV#o#VjA z_+1YMWY94+n|ED&1s)sWBc&P{c zF2mQ9)RX6D?aGo?%$Msk>a<3>HAy|uZ6w@)-2)@??OqQ<>twf7I0e#Q`P@Kx>MtXl z4*!!@l4p+EkJY6l#CRnaW-c{(TccTf^~{3BVkS{U7r!d1r9C?7j-3!r z#3+9%rxW#Fh-%{=UQar2*;hWZp}(58k^4e2;W7o&f0jA|6s2~dN%pgTSZA9;%j(U5 z5sA*(n*6up5zZyFl@_%56{p#=Sh{zSrWKjE+_AYkOa8O2t6hik z6&GK)CG45b8S9=@Ai~s%dPjOJE?*)hURJ6yMZj50$p&2E5 z)|NgBRdgiKhx?Xu`^lUly;$zN0BgHnqoCHjKUxLxFardA1Tpg&by>`EL_g@KLRyAi z+Si9{Wq4xTj#YFvVbbNPM*SxOvyA^5m)2c>vVc-IaGg`VCM$6@GUKuvPg>54C6@0e zy6?l~KJkfkp(F42J~Lm2cr4W`PKOZUTvkYn+MnB(*4~hw#N!uL5L}0umAAaT&~K?MDy02hePw~GJ+f>b0;n5dNuoY3WHrX% zl{1QHRoQbY`Rj-wn=Wr{$!Z?ZiFr3&Ri@!&&m2iKM#au7ka%jye}w0n*nCS2vQj&X zRZ-d3@^LeccgI|t(73l)vl~x8S;YW62@GTyS|z`fml(rMs?>R2*e+A^eqGN()HiVY zQma9+AE^fXASVh`>d){wvW@g8lfIhFo{Q_%>8~cQxsMK&kT7_4PAbGSymYbVjKW_= z?%@2%{JQ=Y_m}#z{sGJnruWzRNR}M*N21~vAItVbb*g>$2&?$7X4rm6Bmbm0ov~KZXeuHps1B`p*_e8ksALsOb4Epq`QG2dV2b9toa){b#30T%(i{o#FI9tcz z#?G2ssMPDUlOWa!nx2IqN&De;xA?$S)3oXxWay~q(^*g@`$Ww2&kbP3-2MxVabBf*( z?LVj>BCMbeg}SX#sRNK}4Z_NJSywQWB9n9uvKAI}?Nqn_tqEPa693Cw6B)QY)a(8* zC|XN9Rj%$s@I4_k4i3d4)XU}y`%sLMdBxQDaKV_dZaLhFXZxF)ax=tq++D{a&t{72 zz##3Kkj+WxyPk;w_x{2%wK9&nwRZb}Jo)oxFubw=e{%oquKca@;-tW3k-A!c{wjgH z1Tqguof7&golD@^ zKMSTctu;Z`-&%m=7j!Dp3@J~K`2lYu3Teu_sJS-N*X3Q$YIQvhFn9#fyNLJxLi58b zNeY|B6qtFdB_mmRvZWh!1GsN5VNPV1J_d?bW1eTmoe{JX#0B&EeXjdY#XFC(Cx2qI z4P5?RYmVw1UqWGMLIju=_D|{;XKr}zqgg!%tYPUZOxtgLHH^o_Li8AM@dnE>Mb*2q z{ug)e9o5v@?f=@g4Uz3eMWl%eCg;}7)9II^|0R_dy=T7!>(SjSEE%)Igyh&5UVzpQ6l z$Au>xyE(lIi=`rmz>h zi8$#J&^rvnZJ5s?vCp~)t~k9Ls`GA+{}|%nrd~gvjH-4>ui*ovq(R0+T}kMJ+<~fz z(?ur#$fJ(8^z{5dpWBDVl*FNaQij^H`itsXHGPHiQ!a*Ng1Cuy1G#x&+u@4bm8cbd zN+ae}x!b9Xg-@g5$RH%-olZTid#ZVUF6rG41vsn89qBvr3iDHQ7-67EKzhkn$HY=} z2|0PeF@zxuq2*Ew^)5>lCyCG$@XM1+t~&wAkMvijq^_N%QdGWRjyqifb_>ihZXQT) zEx}*Qef+{e7ozu@hvDG%hPC2@*GustD9>zMp4NVE(YsS~$*PL6L*^=4T^?b9_L`{e z90B(<^bdX?tykYaL4nC9a;Z}VxB=Ir z)n!vt1*^Y>+fuayzr(_iEV0bRw?WI6b6~p!y`py}`_SguDGIIPtQ40)BlGl?7kVBMH>=z z;;$iK)4i15!IaOJ`kGcvmR5PRU;HT``N_=#F z(0e!f_)=Lj@90SP_qzR6pKrBxatdl_-tDacrC<3_YSMaipMM(akB~8D!Ku8XkDB^i zMVisgj91nYis-oK@Xy{@RMUXQ4_oi@@8ep@U7UNdgdbJ96Gqtdl_B4+0>akG;%pDF zTPcA8%apP6wMH5D8Wx#LrB1gRHPhVgv4$uvwOS=S#6RZ?PYlZ4Gb<>(|Hwo-+6|>f z0uqw~*#*lxN1c{~FJB9~9a1)#*@W7^5BI%df8oX(6; z3$i4C2%c0IkL{n|Oj2JAx|oBgCjs8q7=A7S`ppe?sgEmf}W3d3}~!Wr+bdI7AxSNT2sWzbPBunIuoj1{x18PVx?4#YjRBQ8?b06Aq<| zB7IgqUm7&dG=#p|Mle$8+Q!7>W|_|4xom(*;UM!awl`JmmGSs$|AAMC&MpJ}zGyzk z7*_@5_RQtLfVS`u=BG;jdFxwncXR)c$BO3(;6ei_9m0s)cOLM}2u5!F@wIcweBv|X zQK{Z2+sAL$Z!c$tXY>leZ;V;RUJg`L7Egw7>y{#Qb38NzCp{m?vU&-&YdRpRabJi@ z1fsdKKWuiDYq3^8e1=7&kC05SRano{xScg50Ks>{O@X6l-TFRGq-59@6gS`tTm1ea zTd#G(V+55S{s9_#wg!>-7z_AJ{ySayA&EgkP~66%oASkIXm%+)-^k)-Bp+^yZ4Am%U*ktdh zo9J8G)n4dg@Wrm_x?7o|sDoLLaO0=Z0q!MyiF2JrDA*Om?~JoJB^t#k*+mytJ@0^4H0o9pE3Qy(OdT;q zl_ZNpwQzd^r=Px(cgO5iA$r|RS5hIRhhS_n9JeO%l`JA9W0OO?fEC|~BSDtDMuvt% zZdVBU$4E&WHarRv;Vi$VLO?xR6v=fA1}>h63$m zGiHc+%_tyro&7A4J$f-aYQ)1;{|@~A7+>dQghbv5hFNfFGX2&73h=P5OD8m1{B}MB zKc>D80@Or%y-tsqP4ge$EQ&88A`%V@CFiAR-&VUjh(p z!k$`e&*I`Ym4E>C;7~cdzr%wy)>rqZMrqE@bfm(>X#)qVn1K3~B=wdj7N*5mX(!=C z5&vTBD9a*SFL$C&if2jOa}cGaRa%D-uddKKY3It_N&vI z7~eP+=&l37pgglf4o>EmWK1YbfHozhy67j*>gL{(oL!(QeKH_#C|*81d~G3RgDwmE z3$YU3wHnyerF;IWu;zWY0)$G|*-K5dYtk->DQK&^7-I~@wvfw`bymPU4=t##TGVnKS?m5 z^w>B6^A{hFo!GL_tI1L`E}GQ(o0SLpgMn2lC#34sFU zYYaOMBH0XqL%TgQFrNlrKXCUvbx-voZJ_;>l5B@stC~!0=JTbE5H{7k(YMPiyfcUp zbai#ise@no1>kHHm~Dn&PoU;j{)Au-C=H|ESZASCkwTi&K2KNnV#HRL=@N#T@*%Q; zTk3s7j6}>??D&pcEvL@Uo$OFt>=7@I;+`-m&|R8o1urwroz#PGHr?Ha%aw6o^^_-P zXLNPht6$lCXuq!~Pb9TrHn?EmDt>+2(DPN>urV#*|Ns4 zOhK6>fy)q z%G|F%1UVRyVM8;1**367C=8~xj{0ebkF2{deF?W}$}dXqs^s0LwCvra1n={LPehA$%*N^6;bW6uida-YC_gI&%C}t z(P}8&ti(&!zJccGR92DX7j#ww%?8c*{jF-K8Rycqp)+7=l=`hn(S>FxaWVE?!1J`j zLL|J`Qbc}7%{7DJk*|eDeE0(W)g%0&%$tLYEQwOGkC1yv$k@)rD$EevIWzs)W@}r@ zQML7onn#n`PqN9i^w zajnt^2w&kKw}3|PorV^@BM)v{M{C88vYmz9V-Jc&di`F|)4%%7_}t&(-Tm7>_Fh0q zP&&pOsZM98?i=}*#n85bYeK5yFssg2CDUgk=e<1c%;>QmeHU^B&Iv4VFsAuM=J5{K z`pj!XqVu6qDm(t#@>WRJwZ$PTtZL$IUFQb=M3-p)9YKsJ3I95SCsxI(--M!mZh%E_ zj??44xoCS1-jLRkL-?U`75@-VHcXRoN{-;0xl3=ltG3dFLjLu)5Te?MN9}m=tV_I$ ze>g%-1HmUoZZiZGNsU?!Or*T>$nF{6@Cr=%%cO}g6dsYGHqf%nzI4h*_L-vL;|6kg zj+8)GCS^3)R3mp0t|`V)yJI)cInns69tF$ky>*?Oo)o3z%MPwDw09=O@dlX0_HHTw z-%(Xl`7*)XX@9#R7+X_dkXqB*E>~=dz>PU+saeMsqd^+clbVxQn5Zdu!9LiP!c#sE z1%RJwzrXIsS9km8`|TUZOVhMc^J0RTkcT4HQ_c(19PlJxj|j$`qH5OF+&Rk-iMzvl z^N~KTz0y-Y*cwSDs>jJc8YKByZ0OK98pc*7!XBzNua4D^IGsP{+pHpu`&qr7Ci;E4 zA~f((q8agQGo-f!{~QX~H5?POoXfSkr*Abva7WcCsGa*Gs4rH7+-?XGl*SFmje(h7 zIt~0h7LOD;Pc1BrCgU7hkzyl5_+2ON;z{W;Tj8$AvJZySp8xXyem7#M-WxDO*cZdI zeAT{F1;|tcE%SW^=m*=?z#(({4375X%M=|IA5ri$uv%U1Uh36Ms@XocKXNGfL5Wt)>((IZzp zjJ|w-X`XZat7oc*j=9=ULvoMH#!?i*yc8-Jp@EgOl{;N2gW^wu035%01uv`$SB z9Us>ecZ>~e8_C1M0w%(nn(xfGVvCj)7(0S-OyQ=2-`s=KfBkN+bOys_4B+R7Eg5uS z6&mckufjAV#K8R&e0eESZ`(An{fdkK7)P_}`{u2_GLuhWhe zZXVdT&!)b4yo#XJtI6J}gt;RF&!w)f<`!NqjnCCmRG|%*d%tQM2#6xyO8wTkaBAyw zP_=wc!hmrj&9TufCU)5G?22mK;ZVB2s=u~)@Lux+dEB7dRPTCre9{=paFJy(3LW=P zdX;PJw7(zqFs7RkJ%}P)r;dBX=5-%@L1%7MPm)2$^x?M5l#I@W_X-q2-Z*lwjFraD!dT(TX#|qo1(hR;C{$AMVgDbwiAwTERe4!f2cRl)x)UV2n@UL)b5S~BWaS)N&^Ivlw^X3oR z2jcHk0W`~>hG@EjD_U<8~5uUp?vq3E&rBTs2l^*&%Hm29)@U~6FFTf zDUi^+aU?K5LLw{!9Rj;31#CnkBAi{l8@9y79T~@aAbP{<50eE$&Xe5hD|2m)AsUxL zztd!elF%a!j{ZqU!~{h2`|6o=x{JyU0#BB6nv!zgdTQ^9MrNC)=xbTLfq9)2Y-CMP zBylvT`Ixhi0L)tTLb9XAg|&Q^`K}a>$f1`pK8NBg-;B!*dMnjwBz!Vf+);uC?5S37 zUuzGukuT3Sz0WRE4`$vvE5%DQx`Ii1M_iY|#068$tm9L?>6L_0XP8M{boU)9GGO%< zl9)C&xysi4&umKN6JX0TU93C zLd=Xr9of1u;4;et9$2?2-~wq~;ZLAa)l*FokX{1Gc|vx0o32x9Ev8zzV%kS@Fy7Vg z0g1zJG^#MgFV}kB2*9rV9MN_Z2==hP_&L*57#Zi7=nM;!>&vbC3I@-7&oeLldVkB< zjf6@HOd-E^Ok>`$OExcH64#eXq=&Y2OiJ9=;W@OkB);LArHI+(A9*e5!7>CbvfDE( z!?52KvB?n>Ozf5Q{ZMQQd~N^w^qzbK^t9K`3KCJ)oM*P?V%*9l00Z24Zci`7| zz)vZuX2*YFkIV@JXXKA--+&+kh_N;UBrdH>S4&Im$xK)!Y5)a8-gC)KnIHR?w%Tv< z{tUvZIi*R4u3qeT>s0PEiav5_i8^-T&Qr5~|DRV}TKZi8W7rKQ!2Ay9mKw)M>|B~1 zc;qxB(G|3v!kc;S0{B}VqE**<6laUFtA0pvjd#iQUk4=RpRJ}V&+T4kyosCQT@)M> zUZM+KelhE55iM`@C{Ytkaf^>$q_%ZBv>D#wo!8j2=bSU1mbzGXL+#sCc4@{GSS|14 zE}JkXizDbTL9O}H2I%zvDJE;2+ZB^JudlwypuF`5LZbqJIo^J?UVp?ckz~vc!x%2g z!~DJoEK7%b()zeWLF>L)2vpzYrIjNy>@Y|qYw$cCuTm0zz$;t7;mA4SMVYlnI$Jy% z{zW9e`Rq+)nKe;}y1L1VAT58y7#DE;tQ1Hs7Aq2mW1f8N$?L)E(O_mNl^P6f-qel> zU5Tj%T%cDbW6z%FMLn1B{lG@xv#NaFRgZfP&30BNr{?l3@#QJ@_4O8(9ajEAYg^VJaV^*MB~KrJ z05-HJn8KqUKKltI;D-fRDc7^TjS@imTYQT<5Wxi8YW87kK-_nJe4Q&C5Jiox=8i$? zktQL_XFuCM9m3Y#m0W44s&aO6Wm)y3?eK~7R?^-DNb}LORYmWbE*j|YV^8<1WVi3U ziBip(woEO?vS*BK{W5F%HWh8`63dFFN8~vrazIn!=f7}+Q1?O)4ez*25YRreQ)uJj zu5r|Uas0Z~VL+c_5<9rPAHp|0u8d&ZQx@AnnP~Wv6TQq9$Mv_{k6Xu8JnlcThJSJD z*_nQ;Kn_eA?iFJ{S~rZ{erCRnU2p`#mKlRPQw1<RlK* zk=S683zIF;5{0iOGT-AhElK_QIaDl2naJA@-N#V#O{~ePP()*i0_%Rc0UkQ}8TkcU zZ{MZUWYFt;+eFbjF!R5#=|~N+_Q~vP@uqO8|4wC?Ge@Q42$n17dnBbn$uX4rP z&N}Aw)JAI!cb&6Tb<@6zQSw+E*&OQLZb+vhPnbQrc9B?$h39hwb_T+`;6^%5O+{8( zR~!#X%#O_XG_Ji0TGnI?JSc`}os)vb_FNzs`(G1xg!1&JiXR)e-2f=w{?R>f_Sc=c zG|QFB9TS94J}FW?eQtl6C-0Y=p5O!*=xE7~M~VpH&ap(Hp=U-W8JSVojn@qN!2Y>? z0pq5%Lp7nX50`kqPdoD-Ftl{NN|VI-ULS;JR_;YR_&sG^^SWU<(&;gxjw<&R4)y8| z)Od3LOsc+G7OBRYdWwGodRi25$fLXV0<-l~!B*Yv3c7k6mf89gb=3s;a_-P>8}b3K z&AE^hc_z+&_a4Aa@M;tWJ5Q_7CC>@rv-hGp{$c)(xWZ_G4ZY{;zkXx?0iQCOR)!MQ3Z&57gP#o>B5$L8PRzVL zB2Pd=VS=VP$u157K-)#EKi>w`=a>$n3>u_&D$?&I& ztbsCrrEA*3548T*r8oMPxYB)McR!3bkQvs%N=&(K6LIX%2T-iig2s)Fnju{opP{s2 z>2j#$T9426>l9DksK1xI$V<=SClD)arANusrXQ9IMye?UNZ+WnCz|)KE+xR~Yfx|h ziQ_JBRQB%LntDqqP+EoFG^DC=2&f~A)bKR3Y4#UH+2_ah(8=73e)>xfr)lgk_KT>B z-W-nS!QV?^{7-_C=UeIL-YsPMS$FZlKEG5%vRcMWK$pn1sSU|9YgwbPGQ?N)mseLX4&rc!bPu$)*ppnod-rnM?jwsw+3F8UH>cqb2_w;Nqeg> zE^A*5JbHZ*;4AK7za!2x{{54V_AW4kTq;i12PeuXZN`KG?i_3~ui@U;pKw+s#PObZ z_D1(=9!wHEMtSBZX8sc>L!XI(aT0=KR@4|qM?oVXv$O^ulXR$wej_frY{fxoSZitU|hytO6Y?Ex1dI8C6$GGTQt>D*a zm5xfmtvC)uPbPwO2o%+UBTl_NPSXd@JXXlgPBw>UrYwIZFqA74RlDkrTuqFzZ_2-& z*O;@qF@00L=zHiPSI{&P?N3|d)r{V6M{>p| zOUHM_IAN+i3aBqq-$6lO{4-?nQmwlB9Nwke^}#Cz38J0m>`W5yety>naIY!_eZ9_k z_u-iYo1lwX9vQ(=$Zf*-4r+P6EAT8M@VNgA73Xr{j@9d zjQg^$`BfJ16gB&ffKF(bWih^K5vB4weFjM(NCtZ*ld(W~CV?069f7{znqE1{)EF|s zXu0a2!Vo^g6Xbx@MV6EE1Z-Kw^V4WKB3WEfebVZjcRM>Hd0RtECC{LJcijS8rK|44 z@3^{(GTvycBY<_ zvj2^WT9G(CxKVEN4{WGQ0t$oN(vHaxaW1B%tD;q6-N@vPp;?0*-*Sm__+Ww)I%=I* z&o(q>C1*`qDYY3Iy!vMo4}kMH-wi0G$gRysX_c7%1AOa$-o7l+Q^$(iBt4hi$+eO; zb}^SJk}OT>tRr?Pc*AJdWkzu4ipuW9aO_^)BQSxm>;9^PC8ZX$Md2~FCS06{5k`CE z$nJGrf#ki7<>4(08lv1AT)0kV&TN-QmTd;`bgX-CYrv>C&|!hZ4gh9z%d&1>LpI1f zZ(>p(AB@a+USb>r%>iD~_(}k+nK2CZl|5>*bSOYyqvic37*+CvI2s zrZEX8cl4hH%^nQFG0pnVq9G`zH+_7Oth}#U5j!RRNp?$J2cF}%@8dU?+$=Rb74{B3 zwSB4Td+Xh5&i#vpH@iPplCA>&)_PmUah=Z3(zax0Z47CH0>Eq|fgEcd%ndmE2svu! z{@b3FcE!OKz0!QIRG$r;62Rxvgo^U_E{}0Wv8Q9r*Tb!TR({abAhpAg{TP&uq<+uacefF7n2 zI+UHctMv+5Pso4-R#FWJgd`X^`d{%wtrgGWmchDJJS}>V6dmn2YYth2kH2gmxJBMN zmg8?!fOP|^ZlY_&*A0-2e+U>)re23-tX6OgmQJqhCJ74Mwr?Lw-8)tA7teJ8#F*&& zS|D%hOZ-~R19}91BxOhm#+$UX7v^=Un-Ez`yjXtxy%ivW8>~IZ#s{RvZgBi5OAYzf zA}Z5IK!~Ch*Nq#-rG2jL1K-$brD6-0ub(uu^ldX>Z=Kr7yi&2wFy1o*z|40mIcX9q z4}JARZM<0V2&Z~fLa4|7qc*d&CTQ^3qM}=Nu{0&NgxGE+)L*1&R|3_A1Yeg`D+l>Q|C}PQZ@)yj=yh^np{42d!2>HhdD+!? zIA4HaCjo>6<_VR)(h>m%WbQ)K!aaRBd1@+tT_b{&I2S8Zb$YWw14OwrBn5VXbZwP| zHYTgY512JHB6SBhtyypJ!ZVXnyAnv{nY{2v!aOUCZ&{AB6FeCMzL78|ARp(euXbqP zD~S#PW21jAYaUpkNU>XURp!hBs-Y`!0YoMY8UbEpk@yfm5+qT zpEsRAbf*6CrR_U){0rCPFBb1e!drI_@zkm5p7Zd;J4($qXU5_bR~#;5t+G>=<6Y`X zhrW2yqM6yxBE)Xq>o*q%I-7}K;jBP_bHk$kaCaiuJ_Y=C$MPoDyO~{Pu1N<2b+NoW zpp_7q*X{Im*l4vZ-u4{ElXg3`6*`aA>eE&`c0C)Db1FF8G3>WFPu!SU4`U|SG-nYu z-5yNe8CP9iu0a)8Aw1TIyWu;gQKD)Uf(yy-6hTSyf@O7Hwf6WMlJ)|{sv)%kXWb*o-|q7ZZY>gLb!jq27Dq z(TSV4Wx>*&Bq`Y#R;ssKy{c9eo#dX<%j6w$MA~=GYyimR zLR3#U{Hj{U6U>4j~ zjzRT9=Utqs@sUmpxvJ@d;GP^_3Nn(1XXiKthwees!eTJ5GkT-4Q#M^(%RDN(?mlIH zuQ*+-NJuOB<=wL?Gdz7L--v~isK5)WFo#7ELEmxPbNTCDKPUqA>-5|#X>T{XiqMe5 z5wt*6BJt5WOl>{rX&UOi_-Rs-(%29x8PB%alZpN~l z>P5RWD#r}ew=aCZ#_0IN>2E4FVZS%-_`W!4cD;)xqEDF^or$WY@bJCN$$%2UxeEi_ zTVAcIL?IO^#SusQw&TYq3Rh2antXNAPMUP0jQrR)=rg=)j4v+X&Wh?=Bj<<2nf>mGfT=F&6)6@zWFL?1!coAY)B(` zA?i)(Uh{S#H;J%0z)OPJ8KjU6hY&oAxIZ$AAjtS=&`r{!5LV)W1*DAmMU>hy0-}?4 zK0vQ@!6jP_UG?pSC{+JuyX+{ErV}&xn-@ywwGab1Wen`e{m=DfX7%>53NUvsQVVo% z|2qzDRtu8GTDrCfE}2)o`O}BC5-j#< z&ua2`(*V)-fJN4^CEjzy$biq9jiaa*j|3Nr&q&YDs4;zdDD}1k6+v?&b46Y#%yUSb z&r=BzU6NKD7mZ$ZpXeO%hk|wF$HO_YVo0wh#c$X_Sp~y!cq&5_oYK`A3e$+IGaTHQ zZOF+t6Jl@}&pj->@Q36@!PmKzE?`KSGh-vP4dG@F>)t3nriDTm{3u*2b5pj&uf88P zlimm#v=n6MV&*RZHbHc`0sTzC8^1rQ&xL`3Tx0;P2cp}m*oYNRa&Ulz>uUXn|%g&F*Az)P~{3(ja~<>*$$ zHAk%dFfE~NAZdOM7yJ2>&dw8MvS8nKoKf^qP2rQ&)kNPb9q2F*;kYUfa8KpdiGq2& zq*IWA$A9KV&q=m{KShAC-JK#q{YN}z!Gp4>{z!usR-(Wf32VfV;N=srvl=-PSOgB9 zeAm}nA#jZ;Ud&3&h{(G(*jVhbsgJ6AsM)ZD}! z;x3$pxRrz~Z1kbb7|&;`jjVn+xYe9L(iI6z_w1B%aOPJRO@`F#K!VT01eq0bR=~`x z!yw1EDZd6k?Aj_;$ZURdMnm9D=XLI$fyi>7DRMv#vtA{#!Z4J?Oc;oqLJD9(J<0dw zKjYQPLce-|W7IK8$v~lX_WQIsKVsx|gI6mO`y)K(w#Vdwc`hB>`M&XRRA^Ht6FgRD0Tn?pp0do!rULG?3rq~IARf(AUfU)oH@X`v9Ts3=W zDnK+5WQYQW$Q2Plb}K;s?cj%x5Ns}hb#o5}W8tM6<+QFBq)C0E%iqGWNBlFEWYT;> z@VslDc-eiJ-^OUq7YjF=ioWiw)8j=2e&!wrwLl(&rss8co-_=ph^reEIZ-?)4P()Ok&FfYfI*6`ZyB|Fi558;{^IxkRJ}p_^ zj#c2)(^iLK<5hk=tl#A%d)~;j4UF$z8ZajD_k}%Q07lmS-h%yq^0O9c-y#~s#iLWo zSrj~+D#E2LBJnyy{+EXKFN$0J?O|=Ma3_S7u$Nx()%j9Vvp|yoAA=c7Jz(B|Ly!Y0s|8zszf4#{T={ePz zNaps5<;9(b9OFg>EN%Wjelrij#z6C*bosF^jr~LQDtUBpnY8uP8k%nZ3(2wPbg5;S zIy3MtXNX@Gs@a$x`o1w+>*Z}YYLLE7^#84}pgMp7QnFlAbGWBEF=fWA4WUpsl*3(^ z;XlwSGxMTCGjV;BU2c_}+R#$6?@0MQ%&=~%(7svoEDwQ1J7hC@ zgTaGk@nV1Y!}zc|&5ZU%ufk=?jo>>gqy`>^K&v_wqV9e=Eq^^nRIU+Lrl+|k$S9m+ zVO>u+3_gPANVrQUu7H+rY08l|f(L_Z@(r0Sp#p&EFCwYi3*aQ(lkclNsu~mWTi$UC z8_(v@i~#f3H>cip;ipGCErhH|lx#*?is|>w?HsDru?=ULgDKq*}ckN3||->+#RRyL78o zV8X-@bva_y$j2*~-Xll&eTS=uOzsJ&%=0HciciNW)kNDV0vkTnN%oVwyMx&I|t_cr#=ca1b z>tyl%y5cJ+dFJ-l^?t8Xtz2NR4CM}61#=!AL~6#d*y4>Yoh6L^E~?&lj&)>*R<1JT z)xyd%nUfJ&vW7JNYk`e=Li3Nj^{9nO?C1QA^sFmekA-j>A5)L`wRkN z^?-yajYl3!yIWaA#WrT|;C9W|`xYKhVff5&%K5@m+FxYn4h~|+MlUbgRX6B1Eq7Q5 z9R+&L6A!Yu8r!sz^{eY^BN?dyx~S}o`whh@)$j;6w~wfIlx9k1OFHBcB7UyVZNoHe z(Ays-bLm)S9cm4O#t|f&O~|qu^9do*D+f&G{7TWfSWT9B*llAk)zu~RJzdmQ>9_Sd zJ}HPPyF?E)f3Px+Fs{juJXrO^mGSqen(*Z=63wSdW~=c5yNYT{pKYj+3@Q5(zsj?3 z9Z{TEw?tq|E&I@K7v!Cm*d55w4nN%Cib$?P!*~*^&?b_m|K~fJn^gnQ zkFtsAmEiAC&-|0FUTwNvaSA(htKx<;!Jlz=BetossaExw5&Uq@x&3lZ#43Zgi;KpZ zj(-K^2aW)Q8FuDs2vCG`Nu=e0*h-|3)PCvGgbl@Yf93*PbsQR1eGc|oKpjJ@afj*!el+@px@%Rtm7 z-A5C=n~vi47a7FzILCkFUvXqZ3-yjhjS$6Y8pJK@fvx59{<%g+xweVL13V7aD14$4XtNnyS<;jed5X@%dcX7r*1bG7bmt4G_%yp&)^|GI7olTFkhtD>S;0C zp;oQcS#+>m07}n@j`Hp#o0$AT(X~e?EI_6krEA19A?QYw_u!0v*iMv#foe%=-;j{f zRY7We_$D8p(9_n8XRCBeNK3G3io%4JbwRCF{Bo0$cw!sp_tHBrmr z-qN(4$00pMOyinwt$R*7>%=TTlA8J)LjyOz+ralxt`GsnRUY_I1qo}9?Q#zE`#?s9 zyVK=YT+T_&z4VI@{)>l*?XDR75EZgmiA5doi}mT)5-ZwiOV*S*I>MLtp8PV|6Qhrc zNGY@ArHU`)N|$?sifdnL@02LfeLk-H{tWTj__z}#Idm0Q3P7>C%9#$bIj;o~X@ZQ3 zo)J@K%Hs>KiBXpZ z^m5(&StgB*kmf`=Gha&bwQBgLQWt|MPp{H3z0 z*KuuI`Rt*;S9NSOB@$x>KG+C5#lkv*h0|C2^49~pGbDJ(D1jcn!I$Cr-upN+lw*ZB zp7-HS+|6xAocraptQyGM1By5ndXYWFIinqllz{v=em12GV8H87P55kV>$#x}24{^m zwKHi~cT~WCU$t{c|6&NIEL+6;YI>Nr1jroq!8?k(y;&BfQg|+oux3pXnd((JhJuXh z)RjP^@KAyN5K;g{y^4T-OOYgO4gT24rcMVd?aPKFyxqs3T^k3SrxPbe6e8Ja@WBv|gW?&5}f%6}NjfMDv)|EiBejj*W%8j2zsj^$xmtN!) zbn;i1Cn@V12?E`rqaU)`ug9?7`BANfgS3>(OWnpNew4s}`0ma%S5pJ;#7dP#0s%CyOKIC54SF)#=&pS)@r^9>l**g(uT31Ii zCZnFDS^lYi1VsPCMqb>k>F3o2u9(orYgK=`BU@j_Hp{6PZ&xq@94w^FZT3SmQjTC2 z`08LQq8(}3e!_pRF401a40knuA^~8AS=tdAtD_IQ-;Gn!{7K222A-$pnFx(}%ZvR! z%<79Qcc(1a^p=IGJxV0*mI&jIY#2Ta&Uq~-J+_eT6NDJ{LgBILnu$Ith;NQ>e1z{X zG8;cyTnXDq`C&Z2wW5|;VQdIu&s*4K!PE}Ku=*FQ)sMEp_I&SWzfYXxjijCe-rSUd zK%>*40>-FRn;Z7k0BhYjO`_}=lR};6NVlDr%illO?O7{O=OZAyRdaz{v(2)#q zT|3}i5sReLAsP+LMRvpKgKd@V$F=zb&hUPHbok6A>nRg+> zYoT60ePQ;jnm*ZYubW=yFefs=)DaS+!Bh;;0xtR0&pq^9LS^rxJ#q*JpX`XbX=KM~<%&j%#cDGEL0Z-PbH{fD*Z?fiaf+K{GR5>=+Psdi>e= zYzv`3!xMw_*r(a7OeybTw#Wpi9b*5GU_0u3A=*+yuFtK{a}-`!#Jy+YEFL>Bc)H+s ze3!7~n=@nNwIoy{lNF2c2K27cDrI!foX7qt-~$}kD&tJqebH3P-yoMOgho{!2H?B4 zT}REDavwZ(@~L&w)WU2FX;W_Ixz6({srOr=4~_SX_#fMSEY-=STf5|)b5GYa@Wax& zDeh|XzB@&s<5sPfM?uQvJ_gXhKV0mHLi14H-1_5s-@AZePpV!DAAjspSbro3Wv}Q_ zTUE(J63M*tc2-}#i|EFgZAy_W$I^zSX2jNxsq0&Docv}qgo z!-fH)!6nX}X|*ieXl~t86q8dy%NlaDIG9eEFj3U4KfX2cS^Y!o2`Zws^;u)uw~+9L zE{+{aq12Yxy64OiHR%D6Cnz4c{g~bO10SW=3F~Klb31bK0nUV4eZkTyU6kGtio0f3 z)e(}-(u_!Qje*f#C7h~aS5;FNI;A4VHC^()a@>6DtfoJ`r8a;vO+0Q8_#Nn5ugGHc zgR4S}L0Dd8!)@j|vTFXXNcZPiB7Ubx?@kv2%SA?JV zXg0s;7!LgQ_9rgxBHTmwSKQxrGuyR2=l}AhKw3=910ZPq_$%M1NNS5w?EvlxhY_+P z_DF6mfCmu}F0G1$3ye&}U(8RV5`NNEr2&nYfsq_gN0*ZvJ8V)NJm8~WS~DtU-ef1@ zF8P2xv}k3dh;DXJDdgcfWK`;R=^_b|BYfgq=L-E+k zvk(pQ#~1S>5ID#=BUJF~3?JJXni}j_35mw89rA1!&JhoWh&SU$)fTyovB=u0hawji z$IPp)d1V%!!yC+MY>dE`bEVye98r0bghFIZkZjBD?p<$QKKFxXSlOhT_I;RK8zai4 zzF@2D`Q0JprMDgsu|8l=K>Cmr;!4yCl}vbR+bYVJu$D-rb#I-(G*)k#o7&TM zA3fxYkqU)1JA9n*2dcU?wPMRiwM7~&0_{Y*tP{OsSe_i8Mr)^lP+;0lTEmk}j{HS& zXzaL;JRHv_v2w@&OzdK|{)npoT%D0u`8ScfWgNovSWh(^ir*fIAZbctN)4YJP7A`r z>hsC_8ZZK%fE1s3IxX`9NfW4RH+N{tNo+R^qmAG0ZIPfm+Bm1VobQ?Uz`#S+pjRF~ z?oM>uOk4?W3y(-!+B&D!I1vNv1pEW)ycKhP4c2AgGx_217aJ{dT+?I|btS3`yxD(X zS{po+tvL1~7ghJ#{k`^=-mgBNCrAx)XtuHWza6#5Uvs=dwkol7<&08}YlNeeDXqu! zx6EPy$XZ)h14(LIA2y$~^l3pDjla~b>$k$fRK}0<{JOB+Lmaf%lY^lK-Y!^5yp>NP%O*Abu=D<>~hNi34k&7Z_KYJ zAK6Poq@7f8foo)Kq$gPc`m_CxwtJ zZ7FJ|ONsx}b;WzeS<|K}Yv(36u$3oi2wWlX?p{`|8jnqp~(Mg?md8-%)W$;Bs?~R+lMrXbI5h=BNI5af}Ue{dDpUa~OQc z(u(z1enBA>*vOK1Ob@v7FChx4M85h*?RM?@H>0^F;(SfXb(Z60mzK~T{`e?fun?0NAz)# zEgAjC4Zu62M_p|Wdxw1a@gL(`Ni*7;H;Ni>3q(Zwj#;wsR zx%_CyAtpxI*ZaxA5+L6ID>T@-KZM%2qKy(XwN0VYN}e4*ibi{NpiO`b$e?e_#?vR> zm+Lx0q)qmZX3YC4dviuH(7FK^_|6z<@bwpiGTxW9x>YTDcY}g+ms|9jZ)q8d3P$~i0?~(dfU+sU z2$Ioy?6mfHPTlSM9a&qu@WFHsAgdp2!Xt~BmzGcfq;JkS+e`r>8~fM!0IBnCykTA&OAbdz#0U8Z z4}Dcnv|$8Hpz%=GD*RKW0m$JM(dR8BJ6G>dYe!Zaq&T}l$d`Cl4I<-!Y9U6y34qel zl)HBa{DFN-g`z$&X>DjzGiefO&2GlCR8?ylE-!{PsIMv!LC7s zJX2o|vGgGUk*Wa6&T>@R^6>J87oY!LFM3{niv#;*=WvsYChOnik<=5qKV{wq4pXPc zgF8X3t)1j`Z*!8VQhB%F#g7pyjtP(F*jgXtSRdi`Rhnv_uCdlmIEj3vUzJOZFurNa^@p5>8xcMH3~3XSnDH$_%QXZXQ*EH*d-vr^qVq&RiDLr@fo`!FzNVhQB% zW1RJO+R9)s$pT=AEjRUEGC3`Stiq?YSinL*0)T6st@M?kpraHu_+2D(fCGtrRX1>z zRtzbKCwjE?y!F_kei_S|EP0cf&ziISuO1{ZA`13hPQjJgFwPz_62OD2H?r@o2W?|t zO51d!(Wi*ObUgmWWLp~i`f;buOUM4}4vilfH8gpHxgRp)@M&H_JEY|4kIWzE?`+(& ztk6xTVNE9OJKv7VTG6vLM6+rf4i47CQ14t#5|puDLTs{D^rDy+)k3}HC*>3f4s39r z|A2uI_U&ieYjS-utqfd|fM%AE1)|(i0zfuq9X{sVS`&G8|Ez}+mG z=4{>M?vrf28?pH7F=)87K{ZzQ7O#$apLE{OxLhFJvrzdfgr6GG%aQT%t2YDT*H<|$ z&I{xszudM@w!6|5qgI+kw18_L z+5eejE-_e0z{^!b3Tw8Gy` z9RR}o5J~Q*!ZQl-(~m0o?mes6>OY7yMZN5+v;r_-rD2L9Lhmo0U!=;KzEhZIfBvW2Roml)Vr%m})uA&CA>; zno_8^hY>b7q1*%5jHhF7bj29*q1ElREo| zgn>+9_QZV>_lw^*aEXbZI~)$>VX1Z862<_RP3((WppH|P( z+^S1c^k@BO;Fvhfo%BsOQ$P&JIAjAPs4G3X+Hlqt@$@1yQ&hRbvp)t2(Aq|V7RAzt zZE)T$OgI^LnjIzLp!5Tqd2GGas;{D+4|P(+f?`Z+IdzCR}8t~}B! z!-JC^V+tIdw6{Y1GkmNct{{=W54#!&{xnBBd`Q4)xHWYiINxx+9aIUkXSX9n938KT z#dkL9C?>Wh;N0fPDzkOWTmHu4M8(*NewoB++@=1M$bfUR0t0trfWX)`!M4Et%NbUJ zy8LeE0-T|oAzMr6k94Tlcv%e)h>wF3yVJN*u|m$LJ-V?+CLxe%-h=Yc-Rju;_J_uw_za2M(AIhJcBQKMa4xzh zqFh(QUW1rt1&?KWSI6j4`XqCV;+*&xl`lk zna2m(3`EXu{86ns5kA1WqEqc_s&lnxG89|mQ+YQJ+5<$Oiz=vvdYQHw<~ms~N=pf) z2b_rx7RpbkO|!q&X*%7(|CN~+hp|j|8xHYMJl2XAPXVxMUP1(|U4jdKJv-OOa@hq8 zI+(5dm^|bifes+6nz!7``@du_N(On{-9T#+AMjjqtKuJ!Vh(H)+gDa7u)7f9d60lM z(tsPLlZXYb#w2YtIV|%2*^1ZC&q&^ab{hD7FB9fpNXf)^7S`gvImfQ9VcG_~(dePh+%>8^&9JnG# ziKR{B9&jJspG_Y$LVDDDSa&5Q>3h6NPYJUl$}b)%9I5IbD8h+ttq!BX`tz(~FBq4e z9e>070?0oPR$IxbM1jB=x}#--M3DrH^udXYN@RFIS+|2vKs+uZP_gxk?|{ z=B2t3KK3}<^LrZ|9`1X{CO-+Wh9L=pj4)%*-3Mz&BJb30u)3dW9m6wtiXI`YLb{jpLqENQC{)UA{Hz}?ImB@Z{aQL<2 zrcRFoB@VZA=jiWyq>hQj{DK&1`yoo{SEYpvd6~n<4(vYm##O4hrQ!a)j-Dss_rF%< zD5c)F*M4?Cn=BApJgu%me~f<{K357ZawtMu7$*=fu-7l%ZC)rWv#)LREZrF5y;*dr zU_{BYm}DH1Q`Awk{%5kq1DFmCxJKy-M7`EWnw^dCJ*x%JYO;(F%eJ%KtBl6Sb<862 zEnX~)h{TU~38uQD0@nv9Av?gX{LHcAOI2?=EOBfbZWk0JjXT91T)(==Ag_dN6i*rl;ch5N%#&RDNWL z;Aw;Q8Kf96lZ(sV6WEv3*k17X^{!GEc?-L`I+hA??i@ixT)-m3!s%?d+;s{32?Ed! z<-9KwvV%tU;upvHLW;9ZCx#XyXdw@VCXkVSinhSG^Ak2;F)(;c+M!a|`UDlzOe!b( zm?Pmv4MY?%zlsI%ST2TyWcytVs`{gZA^;(vK$8dG6+YYK^q*x6MrxxT9Vr(XnsAp% zcUbc}N5Y1eZB*=Z@+}%-*}2x`wY7J%MPa(JcUaO0a~Zl%^_6>cMojMRFjhCY!b?T; zKJD6`gFM)MI>{y-@7T-aL|3J!+56O46PMH)UI^~5x0##NpR7HaRbe6!Cv|3ERR$TO{sWWbt#aX#F`a5 z=xk&rd^k|DbN*Fp&&Nl3zVuJo3+RGX`c$d&r<3F2<+W43>^~|wiFGJ1vUfdEYwbnM zVgUKeMp9PiuGy~2ijZb%%0xyN(5FrAjOcY`eX<+s_mLN#;|_drAWx`RKWQv+b+0MkJN@`(9Z3%rlVuvLB@^6a->oR217C;vRS8@!+@Iy#_U2}@PB3Ty zdTZ6}0&;{dTS=o3e}!J^*{iB5!+XkR58!dNFm7~lNwpKVouoM9mas;9?9JxsfGNCA zx$zXeh#4YRfWA3sF&OELEB+IM3<2V5TsS^Hp80GCHompI6=P&JrNgCqbi8<&W$dj^ zd{TrdnijIL&zobLQ6K#Eg7{8C){houGv_^~D(yEn8ePWXb4G)@;{nRv&Tw0YK7ZbN6H6;GX1C^Dm-`=o zIpXWl-{tbe6O;%oo|mg@>UuNXHuTH37`7Z5?@$#~Y}0G#+_l*&9Ut`vBkMA5!tmRR zo9JNV$dOjVH=;Y0uOPO^>Ia=J|CPmQJTk~E*`5_Qd*Vuujz^~hTxE;x5$wF{Z>4OB z8X0ItlBsLbTZpV{+Kgvth&gHgD;(+)*BZGIbHeOZSMVvCxu6-Xyts%4 z$nRt@$i{B#X*N|U4FTmy|6NGp9Els0=b~p8a{?%*j^6l?oozN{=1i#`+kN1AtW2L7rN0#pzyTO zOZL0o)@+UL$LGckC~oJ{45Sk~UH|b{&EUYBy}$~l+wjek^^MTq75e{H$Tt-3$XZ|b zG1ipU_^uWe>~SsnsYsRswA6Uk>!1I#SC!}Z-~ITOaeQ1HkO#1P?zppVr8ZXjuBqNS ztvxMUHU5V7E7<@1K%y+a3w*6ya9D=S=hljz_=tD>TQ-g+`@l0-STlxfKH&R*%^c|@ z-&p&ag#Q~q%`b&*ZlVe#OcmieQ3tk%6Dco(ycW-F`|9v8MR+kgRHr+$-l8cyq#vJ! zr8k9c2OXT0$$XXK;U!f`3K^RNTW&XhTAJ-I+r6MBym&}4A_EAf8&)6q@K?%eHUlQOXLXcIvi{aR>!|cM<{C^YRwaA*UBHCqv!atyGDIykc=4PJ)=H>gjl{t zw$q8%!=e(e=azc>Lw)Bx)z(vTiHNcBUeyb&lX4xc6V4IX0B)skGXa!7F_bL>%LSd8U<8NtPU)w2w+iWQ0yzR*!{YjQZZJh6sV6Tk7qrHE8a^cl+tbkIPe z2Gg{!+h9b^i#8s5pzYreU*=5I1v`9@ar^Pl5(?Y}lg&Ez&a=-?-~A4i^5XGujPlAk z2=zFFWn{zV`?ijsAWI9)w4hk63oSVEs3+Z-3JDDkJKDVnl9HrwiG;BnUwYKKuTw)P zn^n8i8Nq4;i_l`gh$i{{otu8j;_0Iz@uV=bCj0#zA@~?PdWFx^Qud#@@e#40lRD@gZ8V^^#M`A!FCeK=htkjR<5F(_#`4V?rQMfU*{+`G9#uCf~;hZh8~vlogVsz3)k&B#bdR zaQoyam%2G1LT**Wz51Ges-$ned5rDe%&!Aasv)}fnbl0$QL?{q?UFY^{lQw;wNp(7 zB-X}twUj!RvKY_Vh}G+&1@~7oXSAp1n-!fiKfjZQp^&EM?L~U~a5c3FBQ#COs`8ob zB}Q5FzJt|XWd{d=;SBJ#-rhJx?#*c7!deQsL#SQ!+3YUzc=N)pYpg--p1yPF=Uxc- zp0$ODW%Bzc(?3*@TYKKMwfnxQKj+N=bd9<2nHTn}bi;Z$H!{^2Z;kKORYZ=aWf$6? z3881j+gkBmeU6agzM=vtJYFWoWQe4+4!)M`j|wf267cmcn0m}y3CR8eZW6(Tkl($t ziXEjl!w(vX(4^Ls?=PLORjbg+Y;tHaaGQ5NYJ21pg+qTwabgU3k=+=wQjSuMH#f8^ z%DyJ}w8kMChWuZ?9PZpVT9-f1)fN=tY{J#(3H^>Mv(9V29Tf7${O1eo3nlSx3!zx z!`2%LwF^NA@Aa3#znuhJ%eDR0-`BO(E(#X=+05pDv*IyU>k;5UD&%&}`2JkAy$t60g@xcdd8X>)O%cvzt7n=k#Cxe3yd49| z=h0_uHc;!;U5yfvatJtuTYwbVj_3B3podyUspW|@^RoGA z@awXEZJW$?3d*n(6UHR1}}gzgoxNITvoMw!);-Y8N1(o9s&8 zFWn|^-ioR7dkx{`qoqazq##1l$1d_58aWB| z8YTNxZBSHV*PCRfkGr5kEYE1EJ2GzmDl>YtO%-jsN(6J$7Zt|@*FxknOv1SC4r>1M zVZpyn;^HWYkPS?kjQuKnYl@6=YD#?uN=q_Sbz+VyPpz5*)1o`p8qe;C%wipres_oL zKE+UeUEVCQ_rDY~0g6i`K9`lpX=JoJzS6qD!(*qB^Ta_Fs!xbSz<)WTGTyCODvz`= z5R(`k!cVLbphzZCG6lLg{-Hv8?hxRS=^=f_O zeqr?k{xSsXAYP^&gkT1rnjZgz`Z+vQW#P=6YIFNU_l9yycHF!JRUbYN9aedP;n~;K zSi`Ccl*psPGP2oC>RGI30=s=KZ_)b;j8GKIbJ4E z+-iuTj7Kk|1yW%hH3>E!kPEd4L#7B+-^K%`b*mpT{$h@8PlXRu!N6ardc)5FPgarO zlk+cqur)J@=iJ*$C{+*7I#4k8ADnfpw`yQ7d>EdHk5Y%Jz@te(o;d`UI09&O>a813~O#RiWG zS}b-H+?aevkzjAG91tRwjW6s7?FJSHOs3x9(hIFrA=cKe@am={IzN2nx@h&ARjsvD z<9hX$jd+_4Z=HyhTGVl}fe)94;2UbAl{Fgmi0l{sHn*OLn{a)ZEwS=mAYWV{#lDHZ*rUaVQjOKl>h@YSq)$AG zt5$Abj&iIdyB}*#&kKbc%A%=LND!VU2iDq_q6!kMMkmC~<7TUbQH7^r@Zh)PkIi4W z^r*{i(HTw`){W4V)=gkz(HkT!6a3xr1wzpH>yih%C7Z;v^B`61LsvTI(WvCsYWlKe z)vB+Wt)b88&}T?}geOUtrI<}V8hy%DM(2x#XTg|{KEcv{|1eF}Sm{0L*rp~9uF3mo6Z;iS4-^TAJlD!*ihDg_N_2`Sa0-l1VgELZ%!46obwe{3bF_U_x&VC>Zg?}U2MkqV z7AP*p%!edvmfxChL>MBAGfVKa>@PpuXpq9`>LAXRBRAF4dlEr}F&Lid$;?P$(n2Jg zMa}!`Hn#M(ZjZgr%@S5_&!V-O7*tgd)Qk}YWu)=S=!4fmYNi^eMifMCi6GN%Ij@2CyT6=! z!g@sRyV^qnZ)irCi2Lp(e{FDmX@%LK12l|kV3HD%V9$%!3rb^y`svtAK@?j$c9{Cx z@7`$cr1e@Zy9hQGDA=Vj219PIN%!}us+QYGfc6z6!mUx@cfmmLsl5s12Vw01qF#3N zkm|W(Jzp`d>a)3YB{d7u-1Y~a?Nun1!AwzQOr99qSQm52RiI+%&8LC&YHZ`#6x%}| zjelq1p&ep0KiLNYR_NP(zzTg~TJ8X#EZhY&^ST|)d__fm)pS+6V2Y*M{?7@A6|ZmJ zJ^Fic|HS~DDZKz8cXO*Z^IhiLtmg{mB8+fMpNHSC z^c_HkGG0ZjjyPy@&fW4f)HD$k(k_~@g(g0B*2E|SC*189&6Bv>=j5zg@`wdyX}Hb~ z(deOyL8nnN>f*t|D8O{Y_6&ZxJrk zJlOianKMb}bWyTqU6aDrOX}az8b=ezaG&(n_bm#;xdS%b0+;j?W;h8Odyz_#TwUgO zlt-zs&g-@itJxWc+$7K>Ol5;XiN}H)in}^a2CH|N*}L7MC1l~z$Nq*XiAB((*U=r^ z$uNLKvh62jkq0Yrc4i=jN}M{tm2$!n3y~8*e$&4J6oNaP6o1+b^)%ORfo^OaHW(f@ z?3{uwinMi339y|q(_Q6^tTzEp>3;!0C1|D7HM8}4t)>;MX8x@O`fy&8o|(3{`2yW6 zDM5e1KfV~CixX0s$dD_4%kQ1M3SXftVu_TZnSZk=9kJj))T~t}sj^MICVxd|PRarR zl-bcsvD<+r+f>r;5&vapoC}fV!~JkJ&soPJ%pPM^iM05i8njK+j<|)DIB=${I$ji| zW9bu98D+I`tTjf|JB><7@{sTD~eAkKpD+1FMh2}5+1F8K5*v95OZfzZv za>lm@EHL07{JVvGV_iEi?;B^k<8kuup5FES=-ic`^xqV87$wDBCuvIG2>~Js{Dr^X zQKz3>B@?**UNAj}-A%mTcW$epqwRP2Y&<&SZT;Wy?QQ)&)w_P*_Ye>QeDb>iI4M2u zde}p=flfgV%TQyT{`snCY;fDDoqwA2WB$oy);`7Ihr?93y0E)| zEx%kl&Yao$Rf3oN+SXvN3Dr>}?p}t=yn;GgL|s++>Obq9itte=uii48#KG$J!{+>_ zdPlu&E_fjh`t$`O=TJTs&9u`+MCQ2a`5iRY4`+tsvv?%>>l!P{KQ5NXfX%2NEx{Cl z+#sSxDGEWw#Jw&}Ca~FaV^((XgJlltVghValLU)wkvzjOc7D4z4b2ELia~cG+$Q_t zx%ve$n?9oDsjK5)*3cT&Q;mL;!^%p_5{y$A!x%n(!?rxGrG`3dPYdZfH)EC zFrh9S+!J}A4BKlTJZF{CsWQ{esaQt##?Bifze;6~K^3lVl zz7ViQy6R8>7_<~+N)9gUQ4L=K+rtrR-SAb@uMfXIpyC-L81{c~#3bw>1LFV3Vh0`K zw-I4ve-PqKc@S}wrqc2F>ip$v?MgLb0B6{m%V#?8VSQR-#KgeDH0`gZII6t`6lR)P ziwry^m@OFMK;hxVf%Q~)mPgZ`vzC=xz@!Ypt59z~Ix9A#JhA1P{!FT5C_+bk{c6CC zbiT-Vj15Ls8iy2M%qv5k?7Vq@=`8XWAT(QpdFEgm0tkjpw_5G@ z|M~KUI(BMzUI_?;5ie84t7Y|IBPPk~$&F%Pln?iZXBZCql=~`6@b$z@G9RTB4kBBh zxku;;K3-5e1V8h$zz(+35so^Y9+XwfvXAO~-xwJ-xva#RtzD`LHI%YK6Qz2!cE0&~<3|M3X?h(#EqF8h-4Q*2W!WTV0E9AZ!6>HCKWixE^Y zZ(77dFXn?T*yTd*Pz?alyd4s5p%Jp(ytnexq0|W>BhVRpWzYxq=p5B%QVKB9bNx&6 z_?rfVN(v9vO3n9^-ZSiwKADaeW7Q z%LVW3vnYnF)>_%eFmJ)b4kSKg68?oPMtmBqQnr6ir{cXJUrC5_wZH<)DVXikU!g1- z%}L*^o~m|;nU4czN;xz(X+^Us^hMUsmJONhKlF}HCq)1&6mKVGakDN&#__`JQ(C&! zUXko+TuN4_cA}Jcm&RZxH!EUIwxq0?Uyg*x4xFI0_8RLWpJhWvD9VRp6nH9FonzPh zUMgxU(J8pn-MN8xnhp0D=6noM5;aSiO*#=kexLO)YC5oMjH*fwRzG3WAP!Q1whY~HmsOsih_Zhcz?$zZ4m<~&JI8+@7#0L)CqJDv(}DT;;bLDA zU@3o1(%b;jQzi{#D{Jpg-<@)k&jOoq^j6cr%|h~)6u(fgdJW^waM5Vd57`c#C&Z(Q8bYd zB!(|=Yp;Cy95eah$N`hHeAL*RJ2p}MMn36&o0TtMy}Fnuh10^&*^6gc1e4qKj5_L& zPWwfy8Y}E+we&#oPw9AszGjhBwwka&9*WnHw;!*MXXvNYZjO#(2~ZI;(6s?frd3D% z>Tg$uPJ=>gJA_^AdNBfk{ifZN9B%T0%^uFn5gBcv4(&eD6R&DhN^pFWqFV(8`qX`C zoLWMIx~)hV=zh$LG5|dBArKyTV^YwEo1{@vGZy~*+S=qzwJdM!FhaysfrzN((I9qh z36~&pNTw~9)srL@ACsjFK(Q&<;lWra(~iMk$JD-3%X;1myF4M~O;ik#b8+-}&ROfm z#78yF!yWd^B*29fv?H@udXRBEvs7ltK^bvR=FM#R-Z%iOvBu3_A6^dJr&`N0ygxos zfUg4ifA$Pria35Au4oiY9YY0PBL4A!Sk4^70C)9nRN;}Ml``TR5n-FXf6f30&hk_r zbpbPR2M-dQA(1fjz9#0@ER&VOokx@zUuqy5*r1)fb~J>O#61G@V7+OcA91Llo&x_j zmIC~jZ*MmG*^EuIGIp~U?m)3Nt()O03*Gkyb?f13+RBRBd!O<=8-?hngBeLWn%ch& z@LkO4;_=W(R%Y9m;)qz&OsU7WwOQep8BBRn>GE%$%gQ63XUXozwbmFHQ?*k|Snh9W zFG+7VIM6+1I1BPz0WHoLLxT@p5N?EE(MdrwCZyTY*JDuAR7d~;Rcb0L`(@TDWnEs- z#`s02;3x4$@?6hdKH_3bMrCw{&|FWBuRqt8wAv?2S_Bg$sc=U*Rm@&=unZv?laTbD z5HPRtVNn$_LxqtOyvT`A)DQ5~$Ew4~TKotENRf{6fzahtDO$l!{d^Tt5y5_-?h>5< zzz)b6#3&6+F~_-TMlb`LYbRfb@vpxBrde&tZItRbP_=T=ky5HNzpr|D9pX{V zvw`AY;n}ZRZKHE?*_Lx!{Ioz(fki<6a(#rXMCw5v1P+-c`Sbv^stotT!v0x}G>Fda zq&f+dm)JEDspnQ1p|Zlj6cSQ@Lu6iGK5)k1j$t`vqC>O_yk81q)#SXehnN6pj5^t=c%yVY zR6BCsf30M*yXO+=>+L81Nf>ql_F#)tR}15O)WT>eXS#9v?QvixUrUud)Pi_YfHB4pUgcZZZp|i_Lgd1c~rwf>CT$? zk-@oM@9T(AgJDC{<`~S*M&f*QW9P384zyZWMA+n`C9;l#SHqLQ0M@&o?vEtYdTmYu zR>YbhV*Z9yM`3~oZ(+25&MQgLpSKT<)hBp4)2)Dhv~-T@$f-OSPn$TLiEa8CmTHLP zXL!N;R;Sw9mAD^9mwuD@#eMgR_UP^ni-BQmmLNL#O!R=DL(HHmV9wb>bJWkRJ+GtE zH;M0Bs;`p2@N|-r1*=d$C9jLt_fRrQG4(0LY1igN0wxmxIGvF+3fR{nG+l8Ra2K1m z&~Za3j%WU2C*WA#yg$T8J_g(_oZgIohri5T2hdhj8G!6W!xW4QpxBIebd!2GHf+A& zhq)?3=RUqug0dl!hIKFxHU|D&+rd-2-rI|r*0li_Tg)C4*)k+_czkluk};XFy(So& z&q6snSWa*X0Mi|yUktZ&V|>4uSNYf?rt^1~Mv1*9f*Z5eoFho4Ky>{bAGHB+1!}g= z=Y94vtEJLcn;HSo(8(21kL+skSI<^*2B>-_yx+{1o!?*cxz<=~C_8W_q5ljT>O+4QxTAlYdQtt5|}Y|0Euh#-u8-k!)iKw(G`sk;#Cy_c1SZ6 zONP9%*ieSoTbwBHjPA(8O!r2C+o%dHr-ZBEKGq>wrepbHI*|w5!8YYJK2{-U!MFjY zzPt^({KXRsy;U^G4boUd3=sr2$^$c%&9vLhDVUE534N$H4n3t=}9lL4pA{DTl z*TTHE9PrX%B}{LUv@ViuiQ?GQX&a5TO>99#IRP?%xOjm;zI<+a9Sv<(lQHp~lZgBE zp-%D5utPadgD#>2J!M{*YV0_&y=9z(9DUs9alS^3JpI?!;PMS698K!;P#b&V8}RL% z2PC}QI%Pj@uZdAIBQe{{%Qf?z9s}NDM(6|3_LUxeIRy>S6alG+4!+ydtm zl>=1mJ7dodyUD2gC4Y_;UnE%<&PjAe!;tXrt;vR42Fsw!U)4qH;` zSH0KG#Re(nJz(2q1l6!J=evG4ATgV2(|*);9~+PNR$WYdhmm z)(aF6M+y(%F~Db#;sE>(@F3H@fb4n5ZVG9R1cLqI%~KNe7B*4gVX@yqok?@y@IRA% zYDe&;;Rx?_EO%ZH@Wxx%Ebb}>N{=eq;8Y?}W;oT7qNy#HGP6}%Gf7SEe`@x2?Ta_Y z<5D;w009fB)52SBa-@=*F#81-%6dy|fG6zwt!=FOJ$~G&KWQ7+t;!|{qqVx0={-jVDeV|U zvDsFmEgob6UG|@PYTI}6yurF6mtNvBXC_%Z7m&mRN-SD8-!8aHn|9dL; qzcR-EzwqDx>jepmKrwt=orl+Mt41<;UTv)j*RL4;QFh7k-v0qVrrY=c diff --git a/docs/_static/images/IG_model_endpoints_overview.png b/docs/_static/images/IG_model_endpoints_overview.png index fc4a3288fc237ab4addf3d09d57824829f634464..7c8505387760c5de84d03bd6c75ff4d60d589617 100644 GIT binary patch literal 134770 zcmeFZWmsIxvOkQw26qdPU;_kqmk zK6}4k-cNU)HP7^{)m7bHUH$8>?yB`wUQQAfi4X|_0s>W9N=y*~0&WWe0%{2X4t$49 zd4?7O0#e&lR8(GCRFqua&f3V-!Vm)DO|)ZFi;Rc@eh-;or@(6>LPEk5Y;090EDMzf zn6II@o@;je$#9?eHLVRa%$r~=WyihOUSVIM!-$A5qM}-&c4)nqI~_kT8|QB3Yr8xX z2ui@Sd2k#-1$@!#XN{$2F?2Wnbl%b)?u9}Q5jjG~S7eH4)=_^$`asH57;bv^$$-l< zMxZS%L?xj-W7=rb+Iy5>XooKB%8zcS!4$F#qteO5w!gPKhXz$#GdA_~EO+ZeQ27Es zQ9su?$vFc3p85vuhECq#sI%=jmk#h<-~LnMgi(k?m>F@Xnuu?iFSfE+!A?cUwKwa! z6HNpwSA>opnJc!A0?&K2ARpp$!uL9f!cl7C!E_AHPwqhx#x^?Zm^K;Qr0%c>92gi8 zoC>)h1{L=w2`bV1RXey009|f3IPL7L4v=8;4cIObW9)wJoxt&_$!tP_1japtxV|O z(ojpkG72k+N=t)(mGtcl4Xx}=tQ~@2puE7WW=)mV9Moh#@aS7xGU*yv>lre+Slav| zf#7rD0VgdD9dyZEEG?|;d0hA@|CWOXoc?tiKuP|$EDq-Ulxni_M*g;^1Jz0{}QXJ2N@IW3sk02C#5*a|4)J0j#Wy;2ezh zu2v4ZE{s<8RDWmk8y_)4dwn}o8wXQsEAn4_b@i+r9r!6Je--qP&)@55=wkZMlC13i zr&-_z0)9OKurM(L{=pke%J=IokG!dip@q7bsU=uE;4%a_-?8%jE&qQ!`Dcm$LaFvo zN)}EIwtpr4mq))RRk1g;6ScMkm+2tz&#w6&;(vYkKZJaMUrqlPQT$!bf4d77v;Yzx z;2)hPfV6wA5C;Jv1R*UZtn31Llm?%QEspgwG}Uh_mD*U|ZUiY&iY_?hmAHVI0tSw~ z5DNu40wvO$5Tuys)YrSO73}0B1O3XiSFe|jRZ#C9u3-hXddTHeW-&BLAbyC-GH-!+s1gx1HEXh9H2`dL_)% zekmPhgVCq^E0)at)jqZHNNNm^;{kpK{AnpZl>xNvmoIIn$e_0hQTCH-e27sHe{c!; zQbTm;^hS`x(0(L+E1&u0wRFtuj&%BuVK9`G82I>a@;_#9fW#{Ge(Jp*I9~|~=9ulp zgL-ZTG>;}x(6@ZGN%~_ABo}`5#hAFep+vn}F-#`vQz9)64iv_BTfka+dkZGkW(y9M z+Y$#8=Gm_2x~DewR`Z{66G|NTki<&eXTK>OE#!ds?`&S3_5M{hBV2)nAEFZCX>p*T z8ff*BKSqO8QJ5e&!((9{ZGkQs_^{m8l$tbu)E)nJU+`5X`L1N5MaUn^;a>)6GkCmQ z(sUaBabN!n^D`<)Bs(wv{=7dOZe#jU{v$rzf=Meg?yEp%jpPKptv{8sPnxB6)2tBlW! zFrPQ(-|ZT-X=ADNxU)*NdNg*eu5pxw?>8>@h;EnZeHGHt$$YFo{bI@wR<4*fp%t|S ztdwvjNP)>5nTW;cKhVu?cqnD&jV_goO_t|si(1WQn{m@hkDshD+%h!A^ZeBp8MUu$ z1lr%yDU~UI6DKs*FHdSG@?pIyR*>5d%4j7p6t=vLI^8~)HqQ%R@obVQTjEZ=*%(BD zDfx-~i7P0Ddys#kMx7tb%Koe!Q@uf_QRy3nzMJVvc2+gTiRl{3 z&U|{ekr#{IriwLc6nBrs_0;NB1`3vx9aBO0KTNyg<#%HlTv_{LsH?X|VwKLcBP{)N z_>PLljX1wz(Z)v#lHsp2ah5DH(0)t}sYoxC&(}ts|6v&Pcz0;6wxug~(yciD&s z-{CQ7BRP(=I;q#XQMV!5wJt=KR;6%S4*;<85_jq?m){nDvJ9Sa{gB2gX;ol@+T|Eg zPMQgSL-s9cawqu`;Skt}l)GJ7$4ISG^i5Z#k!l-xmoW~yLpvjy`^!VIO3a-%Af7&< zJaHC@1mfJL7@n%yv}Cr?WBa}TEQNX9(8P6sF|}u4=I-diG||>o;k%u=ouNd$L*G;Y zf^A%WxxOL6AD|@!qQB%9?yb%od%O~je3CK*vVF@#RqHFJjSEXkX3!~h)(uRwahcQA zNxQmn7oPHP|3qg!50gEkEiW7m%hPCQK;!Af)UT$!l`-z?}fDuT*$uB zX;5mXVexIidEVBh^0_V$JDJ2{Wy%KL?7!qYvIYB37f@Z3fEPt+jU)XcNxRLcWq( zgW01IXa5X!QP!&TsO@FNvm)~`^?qVRAlT<{QP-M`vPT?mCB1L` zE}KF#RhTP__k%$>m+SM{v*goI!p2b2NwnD4oOmLZi1ZjB8uDz7#%QK|0Yb+QgLS)J zXv^I;_pQEleOsDtD!GPD`$%MwlFu5g4soQHf<~At1|J1M{#u54E@ZQ1pS0*!fh#U- zG#}HK^gT5{yJUebd#EsX>m@P(T$~6mXD1q5&R@^f#L`1;x4KyE9C8NO~>_mp`ivVyp7=3tJ6qBUWqH%-f9!(XXS9(b`T}n zb_AYIs|+ty`_uU~3a((+E%D+UCDF*HYtR-yv`Ia|IeJ?CFyFPJ`W#oj_JWOw$D|oW z9U9-iKPemywA&e?pni{&K+J9R(d?@%QGFsdrt9U-x$6l7N=AFgwz$i^W z?pd98ve?-u1QS7Rx-jPJiM|OWUyig$^gwHoK|DYAW<_6DD;kC%E5_YY`QlJcK9w(; zj|eQiQz=r3AK_1T!uvVEho4FI28^b14?r&tO-EmTR)cO@IRfLyk6(OKUy^#7aHHu* z82i!`PZ72}L;C707OYx5GO0+?8v49dvlO{a#~Pdpf)=2T*TjPWof#BBQgngymKHX(pUwyOTQ2!vO>YeZn z@xI{}x5wUyv@w9VdfKF~6kc@NIC_B~JN(iB&fyl&(}j{uxpE^Q*Bq4p-6k4}G=+@c zPE&x0O;b|el5^W|Fz(w_p_1#AEkFn(mESz8}q-r{Dit27C$&Jw>DCN2yGtsscdAH2V|zj%b`!yElv7 zg>oa!PLYDILzPV``Q8^8kI4X>?dd5-mrW=C3BJs0@Yvp^^}vFzqXh4Pg(4#(pP ztS~`$qAf4i9!hdByQHE_Et<)aN7D(bv3~dI{-_@Lkzx4HvGYKkHQ5${yIkuwQpE}# zfxD;reb#->QDS$kjKgB!MpZu^uuZ?N*VcGne3ewun!A9t*OP4~=zKz4SZ}kg^d;FWg~KAQFxEKf?iF=tGN9F^Rqoa_ z<}JNa-FM9vC)t_u<=3-U>Y*T$O{44K1mz~O4>@*>%Zs`zc%$6!b_WmQ4rNs|^*vPA z5<9T6JT&;0ln3Gg+v8KVo_QPZOdjIB!`0AQe_ne|ZA2TzP>KiJgaGd6cH4O5w__r( zfBrb)u1W%>J=jm=*fRZYZM)G_!7LJ-zF1=@J@mojV8m7KOGzWJ-o?2Nmu zuai}#o2qwLT@)8~ZFqVPPf^*3(6NHg9+@9U_VPvBKAiAz%X+&;y&GhTg@b|aUld&K z*>zHQ+uc&>J}cmJ3m$U>k+PTjYkP~jd-m?7ql>I=OBlzT039dw2B{7;tUc>q?w<*s ze>1-5a3iot!d(h7CvniB9OAoF%Qbh8hG%S%e2t&SQ=BFMN_-)hCl+-(1fQGCTCQA= zVU4@nPqAKDNwVr#TPq&9O@C-zC2siH&CTY7=Z9(e$c>XMIt|dCdQ)q;3sAu#Z`T)# z$fA?+(5-shrAczvtv1`HSS&Y-x)t{MqMbFEAXp>V!g84RK)D|5&lr)W+YTm_vRt0< z zC0gi0zWKQpZDjUhsn({Ft|F1sgA(6-XP2ZGY`ffah2>FL4L6P|vJHi!+4^J^wKYig zIxwrOB_2awr|HTMH|?pSY_!cR?}iL!Dt%cyVKqODL$8FNsfqNVgFSE^x@i_Uedt^q zlHp8%fMMS(q9wgWmk6gh2gi;ERcW9nZCS;>OPd zLr(Dkllt}aASr)<4K&V(F&~mn zZ4WY`(qr4g9+nYWKt&A*q5GHPY_EFSd;>1HeTi?v6%<`Y=Qjg0VVgYes+0oH-dJ@t zhTT~;^Q~mXNwzljY0EfkIDyAbtefE6CgCB@0XAqZa`lC><-y50{Tkok;x1D+FvAjW zH+bAB<*ItU7P2Lb%T+6*HpBBVoIpqKaQ&U#i#NRCRz>AS!H!0$>ZU+bh|=$^kl-i? zKHalL3{QntIJV7tynBcQ*Zg3 zb(a=(N!vi=5)C={S-_pC!b!Wx5HtKlBvHW+de?;SlUQ@rd)nv;Jz<}y@rk{z_E?nK zT)yg6S(uKsf6Ui=ZFHu=CLPmFI8AM^cAG1c;DE6MYxr$G6&lVfdzC&wB2PLGx~?>f zlrJNKgqP_y{8P<=NMDB$ZPH+?S{Q0=6WVRySeqax}^YodRCwTEmTc{?({8PDkhP>J2XsR~+ z#HJ_oNrP1bKq>G2x~5Ei$6C?`4>JLDX=#h#*}F;bEb5MTCLm(~=f8Np_tIobHO7T1 zw&T_16vx)7n9@sDC(AW?xbP7U{Ubu}%~Q}bVsqPh>z6cc`f%T9u96-)nHO~sY8Xxu}vca=<(WkGQ&rz)l zu7or26StKVY0GC(LXsSv}yZn-2U3h*63#QE~{1PSkoB3qu@6t8ZtBv9n4%D~Nyw$-rh48IV zu*C}8c`sv+;mjfYM=1oUidw>S-9jW`cC+Qv7Srzu4{i58uO~sti))pjt7^+-aU;@Q z)w@J^*j3j;E&#Sw=qaky+2jBb}c7NXr;H`zan$nUptnyITI}#w8 zZmf3()AqDIwx!t3=Z#QX2UNb;!^m0w`{t@Z5N_+#hUd-n8s^9YN{0E}6ct9g7nik% ztz-dyu;sh_QO9mB&7jZs&CA)@Cmh#Be(UhU(+6=~kB^lPJKui4^AVL}S%cOpM$3-U zCQis!M1|@HFOzfBfMjXidgF7cL6UGK;#N%ICGYA z5u|{X`jo{)S+mB8;)U13zovh=SY;22aKeh*k2roY%a|hcF}W7H%rjbUZbYtW$&?a& z^G3Pgi*mWTtkH)QQ?DGs@z&DI2a|yerD5w5r;j!93yx+ymH;3wyRg;sQb12=sY6nX zVyg(Zp}opOpHZO>2AdSYlaET#GVR^P&h#4vCARnPB}`W+_c#S%ey)0_ScoXb7t53W<)r!cRy;4yw zY5HcWnr1lYSdiecUw-p!;$=92vUzps_&ym}SomH0^dUWrKyd1+F>~aq&1f(o11+J@ z-YdGBPEBy4q=ow?j%N;^;cUreqi}|l)FZf??bT@1+BIs*Ox(zFdOT!#t1(_xitXmhxj9BX zScMtoTydq=JSSW1>Bgv<4a^QNAmN)l>34W&ap>Y@8r42KGqcn3rIIfbA)r0NF>4wT>9!Y9Z5IRY$gXyHi{!(hVBizQEq@OmY z_+*W}*IB0ZFkh#d-WJ&KPa(m?%HEng2c}>uNM5(x8Z4gVF~L-7HQn%J$Gk<^TpcWT ziZvp1!<^aQPxyc!HzbO0`7))isL)0H1C~2Dmf#v0=BWL-%P7R#a*_YLE+|B*QZK+@ z2!tiu$P2dHSR#hn-4UrmbY?WhND0MW5s8d5qBXXC=y1o)MsCF~2i8lAXRzLjH#C?+ z=~zHXn3Ws~kAGo>)ZfW|zWOqS$umcZD2!P8G_Gxr25yXp)!_=g_?v zS~%9ccO(f#H1*`6QC4)Az4^#wMLS+-90_K|Pa7jhz-{!d z2Az6XarpHG81pqJQMogk&z6!VX$2PfjWQB3K@GK|83`iaoC@wey8KMosuy$_CI(1E zdfuszdA7c9>7?juT%|Xue_#84N1GVbRc?pXr7%s-lczi?NBH}VA%97TC;dHrQL78E zrIZZ0QSE@CQ=1IprdVgbB~F{g{}$HKVa~QO<$#*C`riAn>(XHEQ_hF(1lkNULq0Vc zvx1h&9B&qihh|XtQ^zD)W4_|Sm!BEz8%S`I2@EP9m?2(02$Q7%MBhjT+wM=~#^#~3 zai~12w@w7xV$>qxGGw-OaQ3D1KTL_z)aNa~DS49AaL@rk_GbuYxDlq!3dEXAR9sGb z84O09qEb*JUz2d@4IS@-*k6ASK79G%P&>xc&^=%GL z+ZH_avH%z~x>&l9u!pSkxOTsMAqP+>VLSpN)Ag6#YGqU0uTx9O>n~R`TkAKdLkP=- zgKhVY%0IV+xY4JL@9EGhZ_Q=;<%_~dJ2$NlcU|tEa$gP$6i*kJ!G)G-*Q)M}YP^Z8 zMLHJ7(bhoJCAsLQ3g~gS_B5Ix6;=ye+dtvOFY^@Jqmnp4K|y+UpMeTr;sEkXS0_GG z4njhIjb4CylG{5K`bI6LeDE?OLrQ8@*P2E;`GAPVfDwSHbuu@;xg#L5Dl&ZlN^&QJ zE{a}(g2KFS#27~Oe80YIUFZ1$vl8ue6+;Glz?8y3YmfVTu-oNwYYg!8m}ND5vOrOP z_!9aFmtJ|w?+d&@X{{hWbgX5@(jwVatf7qRK<}w1gP3%ew1B*{g2w)p|LtaKsh|3Pn7=%`+9r{GJY@E|o$Ad^Duv`{b_$jnW*v&rK zswRv_BdJD-?lO^vAn2QxUOb&WO6%d;?~a&;roFXmZA2~W@fz<(7L}yuOY$J- z=5&DRTEp=PVvMok_=HXcqzd+^wap)3{=D9g7D*_K)(Zh>jG*gohem-D9*T%zh3$B$ zYk(=urW+Cp^V7XjsXd4z7Dmrj?CPoYtw&0pQi8;Q!`xhPM-F_o#bGsvtE}+SrRBC4 z#bR_f)&EYeaX54bTg(=YENq3;u92;^9#B0?lEuqnEy_!2&@PRdwN@8AGORTlm@K9~ zT@<%etZc$nOx8?6oGc(eia=2CjY?7#3$D|Rl5#5d`r^W*+nl1U6;K*C1ERIt=r{}D zr4-c`qSw3GTNrHytYOaQn#0o?7>QXf^c*T%^f6=|DfRa|x^+Yqp54yh?5w58VF(dM76C&;Mnez=zB| zx+Ok(KBJt6OWS-d180i~h_q4X>^z3RIMuNmjAM4pk*IeD)vB|qQB$#58+E)}HZ0!+_ z<{LyFR6~!CF{i-$!&0!zo6rdi#VpK=i)Xy}p7eY4!nfTKX{4NR@X7bW-#1CqEU;D_VuQ9iGc-S zWu@A+5;w;WA0OV@n*&wlfmFnBW2;I9Hp0D=sBNq9OzKvha=46Moh&Fq3bXu8Er+#s;o~4F zYmpDxtJOxw2Md62T__9w%<#HUzIqNQ&Et9J&w~lGY_^ZLh{u~tr~X)Ba!u!sLE{Xw*~8>Xi{%qt^(MERp-Cxe{C3OIP>J{U zL3Uz^?=?4rQ#r^N)(ufD=iKYFB~C?vIARV6c0@I!I8MO05F^5wCnMD#&*8U5VQbrU zurD7R0fD@$g);7is|9t1lSRj(RW2QN^F@@D5!TPEp|W@MPe8U&xGUH8P}KDLGr}6^ zm+=fg)q+L($MAh`A1fy_zEN4ik! zJ=GtFr%L2pL`-suj{_SG3FL`NVX(H5VmLyZ$WUB&a)mdCliXo63%I zfN*FJyRvpYrGXEWJLX>F|6#MDcqvjA$i$z{0{m?0o*MJJ<4Pel7`uWwIP`D*2jPAu z#5c}N*%@k=&&S$g@bC4^O%}qP;XL#% zrghio;TD<#5UML@1rby#LU0CoJrVJlBR0k4N-}zbj>xW=O!lOl}N_s>tORZ4ysM)R#TOw0zJkmAg}FX)38MoYK7Q{EEY8@1c^^-pIr zo!DDjQA&0*XtX7+(3WfUO8BveW=_g|sxzo;p-ChBl-0KjFDm%v!hP8i4n8y{nF(%R zU`@zZ;4tkVI4jL&+8xsTv2`^{Muq3cs@8 zlSwe5($_s5#CPX+jVpaXUU+jOZIHfd!JPSP4D=wJ)x zr8Z_4P}b_|G0eF~eT0#g!Lud06NC?RM6DjRZ>l)j5&A~t1)Hhp{R4$lR}u{OzE#jV z;X1(<|1IIVbg8^6Zv<7*!&xzTy@hArPa86+>kDBjEHq+%09c8&PPIn1=csK|1pBh| zG=!}#zVDCV!4%(inYS-4t(cw3+qrwJb@hLCljYl&gE_p3uXbEgIz>;IFch(Jac;3- zibQbc-ihYAxUPRlRFLcos8SG5u zr!7_zPunA(CLSe_PW5L3yO+Y&H;(H1$5tLGFd?lXp z{vI9Z!3=d42^5t;Q<(%1QnL}mw_3P+<1ZdJ=X+|q3+u-O`I~p&Gdr-J%32>*38qJ$ zKzr?WdC3LKJG*|^!8Ge2jNqi3ov_+HH8qocV}T0nWT6~NShq*bKD%5_XYG%|9Khyg z#T0=H94*fxWXfAjY4NliVg3CcU#_RYMI-CsTvXuDlTwDcSYY(SqFnTn4{%rSm`=sR52^Ze zw6S2G)5*j$kTR~TQF!Tw2tW|YO+0YP?US4Pdy1`o!W!<~sWTi@HTjl+d_wsy(k9XI zaF&CDm7@80xq4Pi#b+!CBc1uk{=73cZ?#(Uj&;HM+q}!+9g>eJNAOz4Fj0_Arc-;> zGE5fFDUIK|POosUz+f(nq_G!2ya^glLAzAMpTUP{mnUsLmtX7ENVJ?9R$L<21EL~0 z!f0Sbu2d*2s3(fZe-Jf~kgteON0}k6SX3JJ{lwF^7w)Ns^34G0_~M^Z!J)F5X!Zu% zacJR`K@V(HxeAQy+kD5^j1Qvd+aAXJJ|>si$Uej`{|y;Swxm?Z?t_DTHZ;-tV2Mot z$qKMGdmf-{WbLQ;8;*NaghwvCGhLy_Pi8M;7`O)okjgjJOe%XN6m zCbURyDlyw37dX`t&6(y%ubF1IdaGqNK`&DCMMYB6WzD*-2ewOYE2QOn&wO+uNkJSO?4gW{IV4Ye`F`ZFRK`wNZ~&Q^52WiLCNl&{ zm+uX*+>ac7AT+)Zv#ByOZRA}*T%q>}tFN$a0$%iTLK@}cYp+Sh#CoacbpDUY^&ah8 zG4I+g+3HmSLMZegc{VKR;yMG6O%q?(X+Xpa7r31k57B7?k5b5!xc;^5#k22o2XJaX zvh;1D`Av24L~Q)@gWE3Cs+Mb{j!xzF^Cvt4fyr6jcUJQv*E?{BMZO}S^UZbVy!4V5 zYhZL`j$9vdBv&M=D?3yluHhgQtK{cx66_VDPuC&ZVjTj2trmPJ3SOHM!Aj6#Gn)t0 zL9Luy?bSPF)`PF^u|3TYi|~j|MY%+mP)BI;OCFGc!~o$#T$L=U4g=u=rT68~bDDxzmb; zYRlIGEqGU5cbOmQiIJ69P1}5jN(szVfSLtw6tsGVPk_&ly zPwX{OX1d)SE4`Z`*!x`8Trb-}CjH*WgOVz;vhXY9D}32hl&*){bB<*g?t{EX?nOjP zM9dn^yO@zWmNa2^j8D2i&*wDd8O(R6-=3DZLuCo_@ZT!`aIlR8MBRk?6k-RxY=4Tb z&I}1@01FwKJ8+I3y}ljiaUItiaer?b%B{$gAHB{eS2$8;{n z7Y3Dm@p?&(nI-L}ItBqb{d9A`M5~5??l*6)LOVBm-zmJ4k~PoixHd z+HD$#@w3?F_A&Ye+xzq(3#A_)q`SPYi)!-#2o{{y3uc`*byg1rd7o}Swy*BAXw>L@ zz4;jzv*c<=5Ix!wX1`qFl@k)c&NgL8NM?*vA37!o!9ukE+QwVwjH7ucN)S^O=-swE z5<9u1zZcr$p6||0c}UXyt)Spd`w{>8w{YZA6KWc-d*=RvJqLg-WIw z^<)kPd9-y zAI=M2%-Tq^Bs5%=gEvHSXle~7$|r|Z2d55E0SKiS^7+r#^>4|p#>eDKZ}u12mDM#F zUG~P|w?c)xhSr6b)JwBRJP*1%V&6v&^dQ8KU7b5Kvq73a4s7V8|F zkkDLfyh}r*PtP)tBxP_J-$5zOSg7;!O_Oh|dmt6hVW+#ENb{$%fe0zl2rfU*H%LR@ z=}%y9R?Zd;QxO_fMGs{(3Bjk5u>u}BnAD5>F@=r% zuiZ#ke2JTBmIr%lJ-6z4y_v|hk?hvS!CN>1nMoAHd;&;cmV$Wk8${Gsp1`@6^LpM6zG?{9sqv~T|21cenP=$_eo0Nl<`yP5lA#_`F79 zT$=d9%KQFK<6`EEuw0C+hspesW5I6gjP>4iK_>bVW|x=4K@M8;t@hq^3D(6#OL*NE zI)UNp^ijGzjO)&g&6YUNTgmGs3tog>#OPrf64-j5)0^m+vI?}1{L7zXZ+GGW(;CBW zWxDP*doL0I5^B|M4r7VkIav2rQkD`emnCb+rO+8t&m+iirB>jjnq`%^(Za(vQ_q7* zYTzvIA=0}LOn$8&j!_RMpp4Mw-iVw=ozd0V9f^ur(B=q9)9?Y^BErX~xf9iA$hbhg58CbV8AGJiPu?XG$@<8&dgt9zjS!QW*U@3O(F4Ayz`WMj=0 z#P0O6XDisXG`lj)V4EEU5>o~n)1sZt?rlbC51GyM@PMX|KRq@ti-MZXL3b` z)RC;po`b;pq}l>a8BX*dp9gZ{bczp4`Rf)ry!E3n#P9;vYr4UsIERoyb{3P#K!$S$aQS6Hmm(#%M#DtOZjZn7FcyN^d+4NR@PnV0S;#`Gd>!DHA*5+LD=FE? zJ|$h$xMZQmtr5W?f68j7aVVq&T61_zC%xL{tt+|<4_i8#Ez^3ZZFj=Ah2cg1Y6HIn zhe=WD%x+N>ozofczI1{8yHV)@jU+K&=~7#xJZ3gtiAg2KD2vkC{p`tCjd3`=#&eV^bkv&gYAiQU$vAuuu2Jl2-NsxrpEB>>;t%WIy2 zp_r~Y0}33rHaX0U#0OZ5y5-idxT}nq)er;I)85#xYOE|aB}9+Ft5zy<=g-al4GVm9 zn$|K3SALQXh9^(qG>_%Op7NePjJmsv(Ooe4%?0Bv2z_Hj@U(J&iG|Fw7KoET^=rv> zeX|_M5fu5nbd|7?+-}-!skUo5xeJTxaKp4~hu|ZYY%u{SgXRd*Ov$4rLFYPUWq4rq z-i<`&@>Z}9HU!nu$Nz+a3~Oi)l8!G)66GgeYU&C?+nCCxT7IH#*XreuL}&MC$YE+G z&Nk!9o6H|chK~rhC7I^f(D&RT;w@-g##rP@bK(jzrWwi=fE-gNA!u?qui7sd$pzu@ zENoVZJ6MUf5;J-~3UsA*>X(vzI|1;OW?{-DZk1!6Lj>vbY`k$kNu|S}4OKz8-9W9x z=*-}>@3t0n+iE~mPiFG$95xRC%;L`n0%TW!LJS^LKD)%}qPH+j!%x!T0V*A(^HH$)OPXDQd!}FtTGJD7srhZcZGOi3V&isUV zYfU0POx$wQ4MOw%z^6CnB3wP+TN-{a-iy*avr1pOEE{(^_3YU8T&)D}-&DF4sRDSvs|j~=e(t1RI|W@^9xH(A&*E(W_K!T(S|mq_cIx?EvuW$Fd{1C3tx6tIEz2G15+Cub3$`N&^O z7~D1wM8CVp_=H*8d;PoB#_K3~1dF8#^_gzHojX&lYD3)4qZmUz87J;Sab+Jxn&C zF=b!X7UA?}k=aYNn!TGY($Jzaxz8_|rzAadZ;JT(@Kq4z5qFl}I^s$|oL$4puGjRL z%Nn_Osfjc}`nM>o6E;X^#d=;(h9$w@ALd83AJcpZRvyRMd~_}8BXHsjwtY6}se2TK z7%zF#a~HI|JQuRcw-t7lcehDd#YeX^KNcquhTLV&laSL@)g0%)s+jg)^7Y6gBeW-MOYK`tM)E*qo!Ugk4 z)2A{kAJ>X)z0tZPDRt>PET{yV5#|fE%KrBp&498)_SmiZSagc8Sy{0>fbc@@2b`g zgqg_y2ml9A#)Bgu?~A4DM4`^pAx;pNh*(%wG+45FEjKA4y_fJ-c5&X#FE+6=#r~@( zewP*^rjB}NEXVC~mYK?B+vzg!bG&OyH@e-D0s@_U3AusR#=5)P#rmf|X??g1%%dwf zjPXTTLDY?V!|V`ewV+8lK(Z>c+D6JMSb5m z(<0)(BEWA$bqVH8U!$qb-$vH|BH-V${r%D72XN#|hEEmu|JK9*q5U>k;IPfS4%few z`Rv`VKuoxSojMD7cMx1aDIQ(olos1Xsk0FcloU_2&Z` zQt($B|4-Qe6#xHk!Y&LBBl+!J0RJrh-}Ut$_y14Jf3MA)KHpDZM`#(KX3Ih~o4jPo z4z6ZO{T;3Nh+awc!NgF@zMi!{%aiS!P283E_i)I6MI9Sw%uMnGp>oddhA%SpZ@Q_+ z4hJXS&%u!Q81^T=(am^0`d?A#gc6KZNkRY)zRf+H1R(&^$Iw3mEplyeiFoBqpSI9( zb?Hq13M4wGgM(~&y;1eH5%x&%=+)-m{Kul?AMpHJLM#bP^M+~@?ynT^8p9Rr`cF9; z!v2@=hm3fe=TBuiKeew zPxh~1-K$_$Ff=Mi^q~G@T>ZBM`NgkhrsJ6Zx5dbB@{SVv_UknltgKf5s$PGbj;4)b zyHM#WcU{|a&F?&)Oc?U0`%pN{240qq@Ql2+fb#M2u{$~9wvPcB{E&P_{oY{5dZqcT z@n|ZlN_vIq+0lQi`QJfAsN33isXIxdYfHV&`{xsw&l$q4cvLm-7yMnu2H_}P@AV=N zjvX>3OPK|1#Ud$Hw;^>CL>ITa3&T6tBd-0ameA4aW|wtehE!hQ^J4WUaBK^GT=5E^ z@nVa^(fFBNRxX2&I<8nHx|UwGKWQ>Qx6?B&S7bYc)%*a3+hOg?60jxt+w3;@jTVWP z5oEkf#V_gYM~!yxbBtuEMSD;LK*J(`u3imVFpqS70ghuDPHD`=`VW0?H%nc-MYa?AL)r}|?@?joP8YOW3zCC#SG=`W3jqxtDdo}LHG zNA4Rx$8*cz8WS-%yPOcX7EI^ZU7caN=1Ky55KJsDx}`6M6Qg5R%vb+&FZh4yH5*=p z*R3SH(&gH=9FfFgTtlMYkO8Ml$6QP&R8ELBhjS}OYm4Qr(J4MYq`ZqQ_6g_O#o5|c zn=~&prKHy8GhO;4mTE8kQGo9*g|G6sOsl@Ly4=oA-1j>fxgHNp8{I#hMN5PgPiCfj za2%Oq(P3>6!wCO}bK>s;Fu{M9q@n7?qeSs#>N!(!wxit9T(?aSV}(K`NnB+6iB;EMU0QaPEGFFK~e%;jxD5ONKPLrcVi>x$OE{>-b(c2Xa8v7S|=%!af zN|Ph;;VCeBWZ{1{DSd%CdF`{v)Ytg4K~cO4sZ(`h8_m>}*2iY`nRaZwG2d|d!kZYt zag-%hnQbI#{;4cx9f%g4i;SDJU2Qy)XsZ0eKXuPSz-w1S4qUDh58Xb9E510rA(#G1BA9q&67}|{E zH>;lS_)OAf8p}W92zxcO84t1QX)Q~j8uWZCgaXuu;xPr9b#a)Bsw^04oOc9i3=Fpm zC9&ydTYM(_7Fj0e=cWh_J=O>S$2Iyi1-Gw@N^ad&#_oijYtm`e`Ga9#i;2<(ly)#P zytutkd$`j*nfsA8AQU?ogb_vM_)wQ-w>74~?RM1wc7c~h!X5zJUF=4Gd)tM6O?>(> zo%Rm^A2CemZ*~?7KdnoHY-*#*0vjX2d*ughJ>&5Wx$pnczh>2Is2Mj>9?sC?cA_y);+Ngu<+bt|58-?Z$~0f zSgl3sY1pJ<8Qfj7?|y$7t%VG2AAWGV`nOd5h=ZAka1X{nv)=fNqmo6!cu85j=Gxva zg#~*iZnu4vv*pRcJDKi4rDUS}Hhnx$Q2Xfz7rW51sTuL^qaOZ1uNAhwZ1 zGagYFyJqs$GFW^xXVPV;FjL%1&--Kzl*DD|-q&ck17Xeaxbxd^e~CM=<65$Ixd7DH zc>ltzJ<3!Kwi^`uI>IH!G33_UNcpzD$A_7f zsS)zgqiMvO&rVYd^Rx|lI^apO)>-E4+Gr526)g8yc`Bq3w_KvdTK zrCSt3=n7)W-+2p5f|$+TJ&cc;pMyy_`xI&itZ+n@OI}A*Jr-sLJnq0#ISfc0S1h$=yeHm6|H4dWb+H_pZcU86Y-?+nU33OrAZlT!$?O#w=f+Tef zclrZU@Q$u)S&rR7f_GDpkh9$A?^e;JYzrvpAqg&>BiZr-Gd|H?B)RSn=PWiNH2rgl zf4KtEvn&`RrM=ik=KE-vMC0nU24*%OI5E`vQY+gGSq-W5TGlSO%@@KY3rHzY9n1;qHkv@HXf1wZ(Sh9WdUER$5!9Kqam=jxJ3t=;1r3Sw{+U}J?thT} zCG?Z)-bC$<2LTmQIV!zif?kVfIjlFE>rO+I4kF>LZy^?zm2pICB8)Jr)73Ve@-4NGtVRSs2h7N1XgJN5$la*dxMDE|obtM=Q!GIY(q;V@*zK8LcmzaVyRoMNCp+`9Z6>iL2AISHQRQ!bY@w5u?Hy7s1I37XgPMN@g6v@Y-nIkt2oXH^3?>C5Y6_b$Z{R$Se=S8ED^2W1o!P2q6EQJG#d`q9nn^^PZD;wcsQUu8!R?TYi zvyQgS;Ft3W8vw@cx(OX&^izeQ+ySjn7b^WJS?BkaE@A-Ugh*a`QC<&R7e z3o-r)%+z7yFi_XYv4*h%!XZNPSUmllT~u%6nucca9G-f`6bOW%TN~rW)JxngsQz2rWcrAWo7Ycx zhAxD!Pu+?*Qqk*nNSmxD!7(3a?SiGQy9yql5G0uq3;6nAi{Q`)_@u3g(SqbFG}R$C z2c2Kv#A&xbYwI5pyL|;_M zl5V}T>s#(MDN7PWXpsj|aL%qk2N*t0$V~oCKYGmx|I?><|CA-hu0`KPH+;0P%`Qb} z4dJj-A{*UPapvRb<6Wi9M{>z&@+i@e=?6N0jeGv}%4x=XV~LeZe^=X>Kb`Z1DDppQ zJwI;EKy>V_)Yv`@A5dh(+;BK8eVuhAYWnaM-AyWRHpn1YW4mAH!bLOT;ErsvoROM) zA?ohk7DmeydbPm#_LqKM0-KKd@fO0eF=4Bg%m;dXY}y;k=abBM_q9hzNW5#P&Ij=Q zHt|Pb+UL(c1XUlme<$3Zp}DCFAG-Ibe}NIL+}d^)DQqDNnz{9kJo`S#u#p2zd^yGh znpq5%XZGuLtu5k{TT?;rrns-NU-|R}bEd69-96J?I{Q{vGDt6!XI!l!lJSN`Y_bA) z2R;l}_wTU%Az-WC{iDyAq5Mu<{*6F)VbIT{@}_BhhUCGtswpPp1>jJ6ciM--S$sHm zytfd@GQj#gyI{uMt%J=o&hKho8#h$GyK3bNsj&_R0Zx1XVxwsqYXPUxMR+;@AA=q7Q| z(Tq%zJP;%>d(twRQdsRfsFCRZZ+p04|HyHFMG>fpjeIET+VeaMk~Wlq^0p*=4CpvHqlP%Y zJk=V#2fALDhg4a*dTeC`&CWSxcqRS{gvji$2*8wyj3|A|qdzJ`UVSn&_MG5gQHz^e z@eO_wC#w3C&A1{We$(y-8tfkb3T7$h!5ySnX{MN`d+>q|t7p%YRpn~#DOvsY^w4%> z%Lm>SQJe7{#`xtyL;M4MK|7Dn4C&(Vyr$(hM}Qwr7c`rywV6@BlafD3^MHrMoGgLC zgwY^ZfWUWmr!R&lWvtW~CL1H>c9+iLWTxnKsy+O?T>gsEc;jt{@V!STw2e?eaL96_ z1-`WwYf3`iz}Q#{r8+C@-L~j&#GNR}^fS11pUXrWnB149{!Tc81XVvcbotT>uTIju z!)m;i?#MiWfkOPhzco`xR$1{6@4*sCFM!7tC?Jg@j>SJLRW#q8B#Q;B&=jf{@S{1y zjLiF!j^Ca9Y-O?#@%p?%w@P5FtI#>Q#2nu1Vkp{^QyR1n$yPs3pH`Uz%&~|FY3p^l zlx3<7soFAYq-&dd{_P<~R0Xya=hjp$EGMtWKt7t-74xL!U{2JV(&euLr7ifFVNM$P zDzVP1eTwlcGT0)1nx=OMJE$?kT0Kc?ZKvbD8I-&}c4)u1a@ikW`YX@cVknD&c+Gc>h&HMyuN)l|32>z0%tYJ4Q8J3}-%a55c! zw}FPx2=lc?j`tJYVuQRO6aH9%6;_vUJ>eaIXyG|l;Wh_*2yqq;dWZw^_%4TD1~0PC z!R5v2am?U}v}#&g1=88RL%bG1Shh*v{KvJ&McJz$gUm*Vc!%&9e8ZZuf1a4W7!gFy zZ8Nalgcr>{b*06gQg|hFcCsEsDgKjzL>opae)`A;kVQNCPVIOd)a|T7!SH!e^x0$m z3g5_wBfbGUyUDe7>@)Wv;I9lN%==hv3-@n<6bS0ZZRS9L>|9)`TM^BhVSD`oBe!Vo zYy$Kr9YBSm#$T>b3tWHi$(mQwR_%4_3zVogx*>uN^jA=V!5N1k1Sld7y&H+MgM)d2 zX!nWzwwqCh^?Ke?;icIac$MN`kbws#re|xOa+Vqv{|(iCoo=vByncl2S&hwP?j~Q8 z`8?vPR}wFLJXd1!ig9L&+rJ9^B~U8F_xpx_;!W(tuHO-|c9xJ9kC;P@H-v33cfLwI{Sxe_Dt+_Xk zDtI-J5!oB3PkX*UONK9ue-VeEV`9mQOBU;2rXJ^T7}qOZXNnjna_U;?q^zq9PKN%B z2OVuMzNVXahPZ^#%8-0Bl5W9OOd5i@kigyjYQapm+cg|F=E)@*$7i?a>zrhb{P&1G zVynF_-^4R<+8mdRZcitPPfX*kOwb&`CRxhqhirP=R_WT9oY#ogF3KjT-U{)&QQs6S zVGc)u;&u6uqTTPkAq9XfEad_i3w zi^(9S1kEQPXS*L1f2Jo|45cxGMqCWD;z;MIw$iu3^ARKIX2JXeW{=_Gu#M9 zgkH(&>5+l}szK6f&z*#Z{@Tz>-3fg{a~Z1kt(mWEQP|yP5-oyd^Cgf@`=Vs>gk?Z< zO25+n=!GSdr){2Myxve2kz?PZR41QsIZdIW&4c?rIgFG#X@d);A7effl)fX*zCV}9 z>R^BJHu(U+s&)y$Q$P{0*AH{k=yS5C@vJX*!a~S$w~{1mjc;H=k?)U(E64Y*^|M6b z*Agy)m*~}k2^N| zyQZW{Cq~0w)$2_dFRL^P)Y6tXsCjE{^g&cS-d8iK`;p!)(&SGA_HZ{=9=HmZ&j4TB!INk+vsgtyEET+s83?v;6qc@PNudPsA+#LjBSNXY2adsscTXWXKj+WY%H8*fs0k z%WIfIX+Wz_69wOpGA5{`iT9f}8GirjHBs^vOU5SsRv2xjy^Z$G9$eFs_SVxmV}v~g zYkr;*WfB=g!>k9zv@&>N^d*5!qX$q?H5=_<8*+N4G7t;XsCL!%9!wZ>pTkS+@eBPC z(Nhs>;EHX0)3YM$vmJ?efkDIL8o?AHbMMk=3EY)d`Z~UzH(=;dw?4^RpkS4Zj^F96 zjWmylW?q6g|IqFh%#9l$-PvkO=!Y$YNC9o7h{m z%x9E!n=}(=hZjqy?0g^nnTBk^91jHXK%91ne7&>5tW)Ftrv?`vS-r{4&J^d%T5ls+ zR*QYAB3^FppL>}u)Zr^CfP{1e!Mpay&&+&sb$#x~RSGi-_zB?d*(G{&>XsM2ZVyP9 z4>;@BtU2r7avyfLcd%UUs0PCH7cD^qx7r~>r5DNgEl3JwH6?PLciZ{{ zWd}^$i{5P<>;^g?zTmXuMQ#8lvrnuBZB^FoG&f;HZX3RvB&cKT4l3^&mZExU`XVZc zRjgKUk&dshV481OifumnVW?{4FHJTv8tYpQ-SYZi0<9gcC*RTJV3}S0D(5D)whNe@ zAm~l0=(sozgfcTg&U&)gzERq8I2na_zQd_KMG#AtyY0m^JX?{&igQLF1fW{4o2?@+ z#1pR|WWbsqzN-4K`kntby&bqqh|{@#tC?i--)YD9vKWkj;uH~KC;^Y{&elf@za+Y0 z;UjMB3X6lTQaW?@MF7Bd7u18PJ-w|r(j$n$!zLcQcHrEcNvFlo;T7&weu!SbLo#X_ zELM#{T9tIsWYUN+7ixn0N`K<8HAMYo`D@J*kNF*z^a3B6<+*%1`w52HiPx{mj^BF+ z%5+qPoV1+M2gEm9a{iD*&fNBFh^{gy>1fK1qMZd95IC*-0|3V8@hACoySCIo`~@-k zqojnwXW|Io@xeHODnj~oBYego{fKt&mD1q*oJJi4!4@(%iw{_DBn_|A`3LW?R+LTl z8#4QyyXNbGGl#ur$%}Q$_`cD#8R!K|PcEjoyYp!0zix`Ea3-_;i;UR6J@6J*Ah9c1 z@#D`}YR3rcQwOYl!bMT#K_99?Vioh<)>_t}bH=%V3PPBG#lBbzoa*XR5#P|$ONpfI z`)SDL^k_Jm&BJhI;QaVvSfY*Ytv-QVdGl3e1C`>+t)#GrMpBpJD($q|yWfL9R<0`d z_EZ>Klhm*biJsIxIYcf?vN!~Czkbl2^@qz2WH98|HR&u>{`*h`d1%^02gF5?!sm3q z@}@nh<-t{(iywWJX1%$nt1sg7r8#@Nu9LxJp$7KzQD@HpOKqyn2u3tK4RhPst6mWG zqi zvd|{<)i^_xfW7uSTWGS+>CZduWu^t)tAOgvsW{$;kd8fx(IQBdozW_`U?mTs=XxrflmqN8L{Q}Q1?}nM; z+$|&`YjoSKr^Q3F8)xd>*`~oVu)n{wRNP6i%x?uFFp3o*Fppg&Mb`jfOR~=MZP}i_ zFL0?z*7`|mxbARESkz?M%Phu>1-AmuqRBlqdeF&lrU0DQ0+@sWuGoCmrxY360CnLb z_vZhdx&XK^CMRW0yp~;9F8PtEiG0K#ni_*QRcQ6P1?vO{S7)xpT@52>r4~)=Fe$U17Lk*sP_!qtQ zzk~AsU{qbIEWoo(&ASf0=t>ixc$M>8>FMZ+k4@#I6OFK9bp7I{Z)bY`k%`{V1^AOn z6D5dW;>S+_#pE#Uc7)xF;#kigciJ>Rdv~=C`2MsFI(NVo*<`dVsGZHURp}J2mX#SA zUHRP^^*{Gv&li|lvy%iQ)ZV>o@#~7Wflp)r;k~uh&mTxT$+!=YhMAh;J|L~I+Dbt@ zNN5kQ)WGxO*97X}=y0v57m8mm_KSH#17Tv8B&{Dt-THZI$Ks{_uN8B zhKo1hkI?^1%J*%cbkn&&uKm?U{%ThD4I5K4Eh_*dJL5n~^{>Bp1%LqIooMB={k)YC zZBhjwr=fDy9vanxnN_m>4+Lb2CQBX*jh9-1iRneP z-O)4b!{U=h$YJA00mG*{Pj%QpHweDWug{PtA! zjcGu-ZMldK(ziF6C%F2ZTk}H+Zx#QWBZqhqaL1{6+zS@Il4qTj$8}yVl*Vsgmw3K$ znjDinzFc8v1AJDqfrM7?`vS|5gEG80K1+9>NR513k}&QhK@oLo&b0PTaNbM$l7;MU06ZJ9(;~5L=hDvZp zx4-PT>z=Hi&jwKTcV{)(2U|ziY`s0PTCmI~Ot-S?dXoc9_@(q+{t~~ZgRmKXb#8v1 zRb_VF$h^O?X=s*2xTBhS+VEJJ#ptK)Xor>H9~0{-?v=LdPaBM*M-mxjMcH0)my5&C zR2$0tQp~aRAkL_mJ2@BAc30;lu{=4fGv-&{T3bk7*2V$CHyd`aj}kt|UXv4>?{@8X zn#0ZF);B9myX}6w21im!DCS=Z4wM<&b2NY=I*+u_Jl#0Q+Nmn-W#!_`o0ATeC;b_$ z1rY5ppuNFsQXV`XKsWmS{@31w&9RC!;2GxZCqa5~`yb0_^Ln?zFRYx5hI}SVG|%b@ zhD2HXC^T3AS;cOWACp^$V>W`=>N>6^Wmb*?jQA@PM5Wz=ciysex+5HymlY>?VZE7rkh?;aQ(rOCS+IDD%>=Gsu*m3 z7`+(yJ#aoE3>dZh1(@L1QvT-ABPO#x^zJp2Wm*MA#ABR#b>b7IBkvbij6?dSvhZg% zuOv)|m`^6WR*byQ*cz@77iq~YNSgiSj%k~DOCE>AD);a?sp!+NqDwQ z8o#yk!(|ck5pj=|NblZqzS8_Ke(z`}yWMnu?KWNPJ+?TX6l*;a7;b>Qjbr>=S+_Me zhv0Um4~_ETBi!f8FRVorh(+hDk;6(B>YLXmX3B;03om+;q@o9e%xP=#RASqRo?1hC z$XV6;02GSZ3Y-LFQD7Zo2kqN+nE=s3e+ywM_>AM>1oTM1}+r#68jAn%=LHE z?hgv^FWUq4@+`?U({$d-S2T}-VkvjstCO8M`m?(wtj?HN&YvzZ`NV&_;XkndEc0_I z)GGMw8qgfgS@7K@-U@D`BxVi!nm*)#OvNjY`}ko>J>H@5Dm{Zpt^f6ga`He>hs=mJ6%D9xFd!qG7X;pf!Ij{r=9TtlPJiP1%w49n!C>-y+t zs9N`uKcj{Ch`=B}3|U_B9Qx9Aa?&Jy##j;dKF_8=Eu%6#RpNoIYQLtE=cD|oRTG+) z`-eKH@PiD#A;UI17jw?J*es!i+%gg7#xsR@HI6j{9XGkkx?k$j`q$ z5)}rpyi^5sM!tQ_)kvxxU0t8dGoDAQ^!BCYN(cJ7PXX40Kab>MdO&DU6K;o;gx&t} zN$Q4$n&2WAmFC=q6NT_8NaBnZHa)jx(-dx&@Tg!r^E{D=+^S4W9Mj&Zk7FU{xO4&g zQ3e^-8sgdi?7nHT?blYz6duG|qqFjlHqe1q`yw{=`Lbr!z0NvMAKm|SBakGXf6u&|Rlhe2mSL)Q&!`OVz$2P>OgUL)=Tf9(rD#@I-ekF@I|gWf-3vtAk8ATd}<^8jrSWG2 zywAIC4#REdq5XKv)epZrxcGgp8}!&-szgumTBv}~{=0F+K^u*p#X82-P|p_ttkGW^lzG>21cNZwVGmDl8pwm}kpH^g^YHnl z;f1pfqQ(V$Sw09mjzJmkJTmY5lBm_M)1PE~`-cqWV8Z5w8wh<@KAJ8*1MZ2TP>9cLfZ+{lXj(nQI|9$yHw`aY&;ju_)5D1Bble0aTP8nz($wD8)~3?8 z#}coeff%D2TD>A0S(u4!&;BOmdkB)PMxV<*R~Fg`DAR*`Y$END_+KF9ioFvQUcNwc zp1qwM)C#visv&G_yUFKRsr4D|qXeVd0;Xc7U5P_xB!G;U0ST?><*3Z%Cn8X`3UY{)XZ13i;FFjkCWzLBk@zck^Nbd z8E;ul=3KHft0lwRL!){&LCtpExqiOatCo+83(fs{4h{Lb_{@hMEM6?_X162puk-S$ z?~O0>r5$=4DQAe*oS84b^|LL@5yba{Vo~hHG7Wl@1^- zo>Mt7bzGLe0huHBlpRctc=Bdb^M23)L?lB-OcMvs{c`Z%}=v9-`_ z)#GJ8)hc{444qQsP3@J=^4gahDJciE?t5l#CZ-S z2p!aydUP^7KUUTN5{0HixH7_AE^296dJdzuo5hZ!8mRlpY2h<1RN;cAh3`fhumk1+O)Cb!4u=ENf_~9s?-zXL;e6Bm(#GbRWK;K{?Qt^0askVx z8sB!cm6Xj`5m!d$_B-ropx!wV)7ampJO4t!bL#XMZ~t9!sQG51-6*8}pk5bBJ=UFA zG)6x(lL*phvr7eo^Ek5J9p%m)cQuqNjBjY!V!X!o*jSxuAEb|w*O%m={=;u;WY^d~ zy>N(ejmE$gwHx(!zOOX*t+j{tuW^@MIRqCMcn_)#xw!KK-q9zSq$6HRIYe+hD7X;F z;e^htcHN!yu^32LjMMnW2VH3qw*9%E1W*fx>#Y<=@Apqm4HwgUu0<7-baiIaJf!|8 zZGdgdrK~m#Ln7vmpV;34B(T~W?{E#?!dk>iWve}X=GAeRr6s@gWx2oLgQTuYl&|TW zsC!zXma7bRL#ywINjjb@|60zC6EQ9C=*}o))__|r0OOsbQq`a@Jo)wV3aWy9JHVdW z43?jwIad#5?jA7ycq#~lHC>5<5-%>YTWPnm5FS%8cc7v0)jXha+E&VqH)t?Z%=%hM z4)xPzmAwW%!*V6A)fMs3GHH&I(qQ!325rMwDgO;pUiVQ2Q2)?2 z>?ApJ5S8U`73@SEVe5Kf*CnKKeP)yBE9LoNFlcMQXC#)5CXJG74%_jqd z25E*hNAp3*!Xx4Xjkf1~``n&PX)(iQKS zP@3*~)TGGTjN3c%@_>)aJDp;GADJ?c(O_m+mUpw3cOrk{xJ3|mxz!8LsRwPO(~B;r zO(Po~!t(s0j|`*M#j%`u2rF(rJcx!&}lT|iBF}?^eVUrPrVxpgb*WgKJ-bwZtbcy zcG7AWrGaddV{PWErQEC3HCBSURTp~G=7TY?TE9&aD;q51s|$ZqzBSh};lZyBt!7yg z6J>cBmcx+gQ~lTv}gbeQ_xEurYMl z*PY+yVEP>#z(A(Uz=bI=?^3=3j4CvU<3ej8&we;XbK)&R{0-mrr9QvDDjXl}%W+d` zWP=KCxPazYT6bEDh5lhH79;L;d%m6}j8aR?Z6jeW{i$2gclT~9efV*yTtDx*AZ5(K zTz-6VJ%u4#XO(3ecC21pg-I*cjw2B8wyos9IBdzjTAgHrnKkxCz%bH>dW9^H#n1f5 zz;~iN*s0%-)w-a#(wqtcRqetlICz+a+m!>LXNY3=G;~sOrfPUMT`UmqOl|RV^3}h& zdT~}AnsTvo>Lk`|96gMvx*Cg+p96O!u=^nzCDAv9XuG#fTR{%q&&<+-J~HWbo_~87 z_IGyT&(~T6?wOE4Y{D|e?DJMB50VFJL+K#%Bz`;TXYOUy%PgkK!A>(XZk{UqE%Dtr z8B3!EKNPVwQknCZ>A)!}ypyB3igr~!u7-RNfJMV-qC9{G}LAt`8=%6Y1KBjb)}1n}k_5A4us$eEzSj4k%J=cdlvJ(RntnNy6-X1r51${#w6^0<^(wjD06h02 zPC$F0zD&So?`w@$d3PEMyt0UMeEoQDM=EeW|C$Zx0((4<3%UE7jU3Z~`&NNZX-V|< zJO~SLOEmo-N(?3DkKGNS_ur4oaIo!1mX3mT0yJu!O9=Drz$c?|krh@)0|sw`q1Qu0 z4fzL9D?)inTNjj&!?+4`N9ffr(4tSe_E5`Eiya^w}DdI{jq8eB8OY#67o}OvHWj)kp zS~-K23n=>$JU!M1pX9DfcqkOt4h|aleUYo0Vu{LnYrON&&%u=U@(~z7XN-=gzt0V2 zG}z4>tzCXj39By2xKS_e>sPRup zA_~n8&z?QA?Vag=*v0i5h>GC}PEQQc=k9yy8Lsq$Hec5kk)$&+B?izwiz10>NYgzo z^&Y%p{s%gJI4;#zyGp^%2>gqkN0L+~-syND^-V`{#GdQth4BTAHf!~@VmsCxoO)7k z`Kk?Gsi4Fza!*1jdB}%-}ek|!ama8877$jjw##(jZ#fR==8s;^>6&h zU*9BiDnki9&n!1*YX~?m6Dml@TOKRbN<#wp2ODG^Nuwh8hNeMPS4IbMS9_z>2I*@}MxCHCm_tyS`taS$P{B#c;xz`q8p`v{P7?_p6{1~>H zkaDr*{xLyMvHm|=0DH8=d3h(UiEVo@>^KRIjB0jF3lCAwj>UyD$==>X8 zzZ2F?G%wnpdClHyHjF1?{LMiccu#?Xn%Y}3hBhqPCzMWv1@N(>qLuLn1)?(vqROlt z<*=WRd~uE#e8TVfML>VzN$|TJUx^o0J?aH&+k%}J7mDPM#of1(&4x2M)lNq5#(UcX zf>Jr+=!I6gAP?R(PFyu!P2k&w7<3Os z`+z4RYD)EM;b_jQYZwxWpXs-+rQt3JcJ(U_HycW4YisVz5OaseRq8Yvfa z)nUwsw;m8n_x3G#X@9CL(DUE5PD}KNDC|TgCzsusyb_JhfCBv^-{qID>A7NZOBnH8 zIIl#^$W~1*jRFT7HCdy9@o0Xe;R1-u4=S}MES zRMYlIyRFeZ^9Day7pJpBJO!_rnN&QZ-YY@7sYm6_mr&xRbxb*x^>I>3FT>f(Xr|EX zCpjUJOGc+{k)>kYb+gx`R>1=hc^opPbl3;2CpQ-?JFmJ&yFltPK@*W zPq5|tu1M!OgF}vLx+SigDfX|{$}c5Zxc2OO1?`i;z9{Ggk`_ zn5@sVsQ+p}Vt(T6qlQ656dW-{TE#5YWg}USAAg+>K5tSWA_3s~*1hyKX5eFFWPu#{ zC0n&CaPzf=*FZ@emTt3s{I>j%fylS*Ve5Fzg3|7Wrbg4K5W1>Q+AA zsoyN?5bJp^$O~i-wgPz{0t42Pg>_S^3wj9-P_&+VHD}gQ6Z1^&R?k<=cC)ietq}y! zzEi_}-j7t^i)Nv^oUj^7SOw_c*?6i2^9Gxd`W}_A<5yts3o^RLOERlta%vQ&0I?SgEvM&5a4Nt6cALC2qGO<+ zp@h5SeXILV56}QR5I~b>8eF|=n^uIFS!yzfh^o*~XTpW=_VLT|D%ClhkUq%&=@ zv*nphyKe6T-G(b&_ikC;nAx!D7S&Z-!&T_&s%kE6k$DHiJsDB~d#O9rjep}_63R(q zGe%MiC_S0WrYB&CX~Dguh)JvQOdAj^XSc#VFuUtF@@Gl`0A}<0R_#JVh^aV*=DmY2 z#cTpEk5DQNoC7#W!2IRDS_~32RSUNL#Vv%Z5cZWfKYWyRyvEK+=hKt;E&UV*sx(2o zr8;lN_*JqKK_z0OmL(m7osBCsiC29p7@BGjRU3+W@FK5s2zoIPQ4||Hl36 zc-`WFDdI6d!vg25Z50^IhcRSAyN@4w(EkfrUwX-qLRf7DGnq2orUU8+)oSyY!|!nd zDfVK{fR|aE7sqrlKs|m>#tDC1CsC3jio|er6YUKz0fX>-Ip_m^>U;Yi<~2oSo(zSS zD*)P0DR`yDre89X%VyrHmxa=&k%(b<)2kz>3IQHe6jCep|>1_#Tm3?GC zZOL#4=t|PN4V1L_2-r~=f$@Nw62;Mr<%n8yD4cebkNTjL$N^X(h0R-kgw z&=_cBG(Po+LQBkZA;wH_@-`vF6;SM?1s6Q`u-BY4rTNXh#gO4%W-=B8y9NphnGla! z!4}eN?9=p7w7+6{hQ(X&CiGleg|})P`Y-&zllf`1|Q< zoRDZ_vrs2|G#G8KeCww7MRU=D?wC`Dvp}|xHkJ_rx1Z3_?YPj znhf*k@xc$OCx5@59}@<~Jmyit`}Ch{=RY6iU+YEsJq|$+E-Ck+7=**_w_~Raj0Rx+qde>i59f}6PqPu&8 zXL1TG3uy)%{weOaQ^G0NDBC|B$EUeamxKILKu>)#$^0GST&7SnugyIA)8-IRz7p;qVD*a4Z9~&ZLPF-@ zO{XQcSwGUQ7Q_h`=eW;qNLhY0V{%QrGX-Tg+TqzP&aQOnw)ffgsBeC11vQ!{G_?PDO3?tI6Q>PQ=+2t|FcSZ^IDy6Y z85lfjg3_aZotR{-fmvN@M?p^13mdy;?7fK+#g~_WJL=fVJJMp1q*mS!4wj6+$3{LB zT?nuf`gTXI%z%a&>Yc;7hE7>sdH4%)Q-&$s;iI{BvXm-zeNGt)w z@cGd{B>vFOT>GgsaSd)wC>}shMs!{q-{zgJS(K>*3ON-jYUDV29W-vL(KxF?t70Gg z|NBghamBei#G0YlogVw^Ek!+(v7SA|=>^atdWbWR#=fyIM115Jce?IPxGlV1L%ZCF z73h8(d#631vU}dgm>i&Ru{4MuWO>!Dpz@4ox#TkMyO-o=kyg9emy{UwvZl5l2p5yU z^Os{>No*R=GIvI`5o^b>O~Z{4<=g|lXt>|G`C?Gsem9!W9N5X6*yzMt3p|~6 zIqaY2vjiB{x)z9PbLZWKH@z{E8TU9}Kj#N}@G1}JX1NR;F@b?wPtdm~x;MaFGbHtr|p&0EC!NgHpZKIo(0$<*T>c?I}^wx92a7)GE4ND;$~lgEpYTb zh5>wWecm$WLk3aRjlHP&TvwXL&Zhsot_~;uOtuEi9X>e|71Ipg*8E-#PX_i!s zG#l!$pJ(gWsXXw6mM!!CRe)Y;5JfAtlTq9aP!xdr?AfQRDj29{0h(ODPzu3UJT?=) z=sG|d4xBW;kSdt*a;RoKU?|RGeq&vGy8%z$LPhB03JNBkse@WGyvBA;C0r}Z>$du8 z4=`O3R{$-^;ln2L@qOZmK^{aIt|^7u66?j*i|A8DYQJ`h zX{MmAnLJa1_n6JPxNWBjH-}+;H3G^MDm!Oq8}jACe=M?K@Y2P8NARQ!9!)F3VyRL% z3T74pXNG|bPOf_;eWJh(lAJy6WHo^K{6%1q+ITvSi+(}V+iv) zIlR!;cOzFZ;`XR`_g*{_)l-7?X4EJ0{0q$q4D80J$nap7zs(KBG#pmQ3(ytph-i z&la%zPm(9;a;DvvRYeBAr)Fk}d8k3g&F))?0c_2|GT5pvizhPGVnJ@4e-|0#l-fJq zVa5(w$QTkhkt=t246sKLzMkv@$<Lbef=DEWL{K z;7Ohgi(M)eD#&Sl;Gizso-=Lk`vt$UQ>}B30F8liLZT0Vdx+)cB8Hy#Yn6TLi&JB z3u=ulzWSrD&3RE=lfG5U*2(V;K5q6;x09x*hz+iU7p)tFg9YlREePwcTu>Z8!UIxF zDiAtwVxjSbP`ai`b{fbS&U-g#{dYa$cw&G>Dp$kVuv^wYZleuZsxT^zMa&Q0FX*4) z$3Q7**4v6mbPa+i{qGt7)q@OdxfR1m`Twx@)=^P*YuvB`B8a3)BPF0nNjC^0N@LKS zBHcrSlr%`UfPf-hLk`{D-7)k40}L?4d*f4{b9|n2{&?5=*80}F)~s3F!=8Kg-uJby z_+6#qv2&#9X&DlxW$N%giE zW>?*174iJ(amiTbvyOC|b_4kt4@Z#dOYD4;@LsSBZS+_rzPtd>q zuGdnWK4CWr;CHkqGS3y>m_s^rZ1T*jjRv;{T(&l7aqUavJ=G>(slR=FVKr&#k(dkA zxf0&0={`g170QS;R1Db^|F_Y6B3{bt$52Gqtv1CkVUsI&o}=3*HT5mm1;JYFM=< zrzbQ4@UDjLdZbn{v*?d736Cw>qzFisEV-frA>O=zEBv9Zlu1GsywhZr{1;~Wi-%X( zMrY(_0BZ9TCBW-9;~XoU)>&Rvmt#xC^f58jDfZA1mGd@)T=$tG9Vu(e)o_0@zpoJs z-r}BPRR?eFYL>;@14lr>Zl|J@9wXiGF&D@&GG1F6^}*!9#Xwo6w31V`-)&N6ARMe@ zQ;qO-qLz3`wH=E9cKiCN2oXEPuFhy=ojVy}5%kFAYhG3S+~XoHY`t|?X9%+7~)H5fGANpx*W*aOF!x{1nr+Axa*Qa$mgi=+HSj z=S^WOvpGs@!g{vhV`!hc*#{R#XA;jH2oQ=~90Y;vxPYVLkqW4392duu&l4+&5BVY6 zNr3YxC+m_>Du&Sr`fbP~`!ra?M7~FFB)6e4Yk4a=_)x0P_I#Kiiqs5lo^O^3OS0Y^ zvnZx}>=vd^@}IUH#uva+6Wy@21hibL08Bvjn;w5Sg!SkyvgNHUNQj|ftRUU+VRjtf zhDNo9sKw=l_YPZP8I&DS6Jqq#+x6{tzndy{E|;uzCDlQRd<9bRTbYj{FT$UqHkIVs zNrtFJZ(lSYWXi;U8myY3Rm6D-Rk7`HoG>2}jWZf|8_g&nx;rEq-m70en~jzuAS^r! zq!W@Z6e}JLbwB>vPz2R-9z?@t2tXe%H4!yk)wVUbsKd&H!zpSAa`9Q-DH)wcJsC0M zo!gIg&P7MzQtyv03U+mWv#EyjrpxF3P<@Kgu5Ut7h0{hlf1CMtt?xNkgwqc5uuE)- zE~yzT(pq=Yy4n2!9YEXsY_IdrnLF25(kH`Ceigdl1=##V81_3?M*0IquVIr{Q=UFX zQ}3RSPhN75%aE`1S1z=kv2{cO396t%SsqUA2?=l6e&89#^JBZQkL@pan4p?CZ$aJW zIN**eLlRS=iNzlAGJH*O30m@~Hj|t$vShm?H_9?hTVx1Fim|+6%jxHb4|dRJ)12o3 zPcZ0ua}r-km)7enMbx0ljhRZtWQr~qbC}EwcTkMW{fM&V-e^Rq7-WDnQ_e2uN{PIc#v^^V>quP zUhW~uo;sY9KfS$>DKTRmA+27u{VP!4ivZ)2^O1rh6!JqMcquo(VC3kczjHQh%6^fp zZIJco&dGidERn!AaZ9)-@UaWExE#-L9h-8}jMIX&cPo+fOQiy*qV6RIq0^2}EsqV` zj%M~(h`cjr^!qxBmWZHv`?Y?sV)ScVo4w%mGy7Om2!i5?S<)GwIS_hWqD?QK1XS4F zeqgFpfRHOtI(N2IC9=p-mu8HwtSU9jG?kDcspq7B7aygzUc3+=&TrJSItaUtJqfrg z%#funlzfKleW3}S(WYvliX~K+XXlpSj@~Y)(yL4v6C$o4S&lVMS|;hypcQs5b@T6! z82F|P(kkck1$e3ufd581lzd{#qUQ~n!21tdvt?uGUUlo2$+u68eB46$`_$_CZcdH? z7TlMlN50OILw>V{xW@)>oXL6`T_ZbI6Ji2gcOzVAYB<@lWKqZY>$J=gk4Ct9hKDPU zI8A%Y+Ub~P9Nfn0GmhrK(VW)}5a&+6JtGmb4yo7Wue@ylxvQj|zb@PpY76hG{Ppv> z`!V)*WnMK)GEqBru}+GNMTFy(rS0|Lp;ez-h#`;)!)~#4SABI4o-+4Lq5rc=fiO&} z6U*z~OnhGy!!T8HOyqEu;bZBwHv9tUmw`~9u-@c-B(_1FIUAl!q`dS$g__#2Qklfy_mRWDO8TdJSoUjE4*Wj z3pA*WUh4v{3!1*o(PVwiAX{QvW+H06c&ts+J*7nVN?|lvq{{$OIa=6}mku90DK9e} zonm@A!+qI^2!vg%#RSDNz4!h^3PK)x_p$Ps#bWkrDyMbRP0jd`W|?I^uKhOkKCTi^ zv-$_u+JT(r+qSINSSL`gcfNW*A;oNX<%lb#C5Aq3W&aPT?g85MI-_xbz}tw9CijqZtJ` zq!H`IF%w%8NOM?!C%wY*WDn1nO^{8=Bh7WSkc0r^iwe9uh)>-KmG^b~KV8vQD+*4qlm zMPwn9amYm9JdhCoH>41_BuEe~?(gr1Uu;C7FLFLaC@k5;YPuG8-dUh#OdM^c0rHSX}=MKLixApXKm`jb( zbh5DRb9Uq22P_Z2jExmX6!Fa=)FNWb%(9MNdrLoh^-R-D6q%FB!$nL~dH4PDW(%S! z=DQ%NbGN(pDLRFIRY3~;$W&-cmz2%?{FqiT#pVY`N#8;v`-Po=#)5a4{X%(SeXaAA zA5k~~D;rVj)5`DUDA~Wcj%mGy>5o4Rg~78-cIg1aZ0sun-}25|Bkg8dmFgr<;p$vP z1Ia2|&bX-aMhy!Kbvqa-Mi4LU=kaG(0T`{x|iO85kpZ@U( z^F!*tz0Ni~E%m5Lj0Ot&{qpJJRoHlW-dHZ}LR-l{Yp|yO<8Ci! zbPaDuTV$p}%w?Sy8x39WdRz5^8@W@hmGi&9~gBLUKz?{B;x`BGF`(n`-$ zI9T>i-ySAK{wGcT%Y-pdyg*&ySRaapBz}mr7bpg@5-7U&{$Zfpmsno@sYYzS{ z{{6SnRQ)K_UOU$if9vY}eb|#Lq`g}{w&2KZIa@cV!M?tk7H z@L2;My(c1?t$shx#pLV4ssSxwTk5# zH|%zVxMbu@6_q!Vn$_mi^k#8%O-_%Jcw`u3Fe1Myy&U?{zbU&`#R$m!}etEfFf|(O$hyGtq%~v#HnHCT)X1;oqAE@uNjJ9C5djh?#Ip{C$>ag5lxcIepO{aP2b^J6I-L+NWg6x1N3NkvC0IrLlK6(1X{k-oc|mI zHE-$Fyw{Oa_qO8xm7dXj14y)8_K#RvnNx)uE}YS&JikK!H2q^}2s}?e!Q>383zJ{^R?Eosq~4x|8ZGRio`RbsVQ)yj})mFchTLenGo z7)Nd#rd8=$=E`IB#qM-Yn%$|<@`+}{j(qZ}o&vL*-@z=Oem7-GrGv{5`aS?%Pf6iv zACObYS~%51jM)vJ{4}avUVp@DmrW$2A6oUhEQ{B6?vwa2>oXR% zb*Q%C@UHby6`$=&xLjXZEU>x`5s#h-%I9YWV2@)5`4j6wM*;#H*^B%pnEvvIzbK}G zdRMfhI@8s+7%0B4M8aj3OD^IrChS}X{vwwwwZT(!QtbUuEB)hbGI$xeu$>AJZ1gaXB%S} z5g}7e22Do~FQ&I|-K;nHF_VjXaqN4Jun&}nuIhn=tY%{c1sYnT`KqiyBb!Jd+_U4< zo-EVg+oHo9Q2oYml9dA}xYJT>eNJyHhd)p`VUeRewKG=@RF$bxa9BGMu#CK_M`Xrf z*{1lKzgTZ&^UKVo+RLnNl3FAXz#yF^kDM}CWRB{mO?y>rhv@`hIL}1h43q;wv8?Yv zKz*Z?*he1ir)6ZBNMp;F7bs4KS(x`Pg!DsmwN6swu^was7U`>mvWmH#5~BIP9HycQ z7*Wj1Qh>{L;OLMWjx>bdOeySgb9Udpu`G=ITCg?e*neiB)_p-e5fxB3L9i(cGhsSM z;*_`Xn7IZ4E_Y2oH=b7XL%WO%Ipk>n z7O{CW3QuofH?Oo=DTQ5iT^*~2P^Q&n_BQQ}rE;tkIfPBdUeJ}Gca&B-R_j5`CMzU> zV(JVtB5p$#lR)yl;KmVOm>R0XUjoEy{!ahg(oR)Dz?q;!;JK!C+fni-Ru8TCOBfAO zAO{BXJ;7SOlgu-2{@9p(xwOh#-7#bA(18Bw4kf%z17dzLE%Avdr<-riTji%%LMj}T zJvCFAl7&)@OruU9QZ`uplK1YFSeqxkWj0qnRITpy;RJx~KhrUBy_VCCFEJ=iM0z}F z$7+51&7ro`DZTPU=Rw#E&vD|yqKm9!GLVl<1td#H0vPe6tgTG>G*An`;RHZ>K@Ge> zdmrjZ5@_h=p|uE~%TaE1Fd`p$6*tTvpS=IUx+?!E1bN?VgM!E50L?P~uj~n6>A;!# zI<^kv9)r3dSf|zRfFL4Qo{1Hpu40$KjP_yqCak>b;^Vh3yA}wG^t!Gp03j~pt}FOi zWbej$YI7_D4%CF+d)e{U{FX^Tl66+*W6jFW_O>cG=!eaKkyP(>ezP{w7^tJAzSDoC zI#Fzr2+qe!V0ILrQOn|7HmlZz&iCIgzx(J_93Q__hjgEhaSgLt;cyJnnqdw(pF9XO zi{HwbwFJO6poUACp!-?HJBQ?Qb+X~;*ed&?u1B&H9lGM~g&rs5eD&E+1$Yqt=!dhL zdkI3>Ityp7zFJ)eYPKf@pHmCsW!r}RRCMNpGL>{pw-(g#`0Ppi)bVz|uvR=gx=Xpb z=ST?f0KWojQ6Ss2)EcNwN4p(nC^#*5#9+8R@8=XWZI-)t9QOXQ(LL+mVjVM6WnbsU zt}&^XEmfB&A#_4k&(jw=#8s7{?uR0LNnseAV zAmLDMyFMTagcx{dj5JYf?ZMd_=A7x7+%ec^Rb>9Ak|L1MC|PAyv*|JAFLD`0GK#wNdHhn zxp(l!Ryx1G`485)$Exu4#dR3HI=6N=J#s#hXE_)%?wo%`{-gn|T0_9~_@jC$%)*^- zE+hig34)|yMY`9gLnZ8YIwG@aQdjdRfyz7b2`5mE@IsxmGk_mjpDcmvYV=k9;^Y`w zUS&oWI%9TPygw^*4K!D?To#>t9!Zx?Vnr#KJcvHaxwgsa?79{}m4HUf^VjVICu0PjdQbnlF2)rT4nS+l5C2sT>5SFlna%kjML=%l7a z#+#MxUXJx83oz*F>V9Hz%{n+Yuy^n3H4m@Qrs3Q&|B-kH2f-e@tqI8TmU`xozc@gBt; zv;9#*;^W>PqU_Dd=_>Sv!1!)H?3zk_E}iTi3RC6d4_BHxo`M&>w*4L`Yo+`s1wQpT^LelL|;q?PoqV@8~Q*Vn+zTEFKv5rbJWEg@#8N30FG0*acbj zt|$S%$(^3{w5O#&_JUN;;0i0I3Hggaq`?Y=wJCFe&%w=57g4huff|F}rP-TWbZViLqYu_$tX@IOnyd z%}Vcrx&HOPbbPPx_`CX|0Pq(E2<4iSMY{~FP7}6XAP%AJorA`*er-_U4@m=L=AegI z7Ll0GxpNK21fT6D_pz*B<>h=)DQa{;VigE7hY9U}(M;JLdNUAfsa|d5-SZwkLpW31 zdwuNW)!xs+#<`)Yg34#`Uh~)Ztvf-)@4Q1Tl+An#f`ne7&<+F+ z)EKo@am=|aN~w2#2{BnruX4j@GN&MvBicD*Ig2h|dy+Oq3bTRMBJ7RYtPEI{$Fbei z!d)E?TwRs~EF$j<%@HJD_HZ%3e1nJ~Gng+F=JxK@=-&?9)yf6oJ8hoFHicCdO13F< zcGG6dLcdJaUFtMnJahYoWeop|H(2%DNIyQ8@yIqCDUUxO*^QXCR&IJ&{bgm$EzuHP zle13v^gatvz>5J;9=6U|;sq==4=;$WQ`pw^+>zstxGX1UvS*1105?zx0LA zYKH#sRQ*nFdUttkS98hPRwfS-bL~$d#PX{@V{+;EyU0|Rkplb+A`>!Mr_S~s^C3q! z37lyO;S~ET?b@pz>m448{Ot4-pu$kLv)PUy-#J0>(B?Y1z1n>amIhuS)&~!okGyq~ z-v1$JZ4+E0!)7%`mdKSCQ;mR+beS` za8u+SCE!C>!(JQBWcO?*a`SZO!~Yox1lT8Ic;g)E5L1ORGN~x@p{7w%wST@Pu_Je< z+Ya%61&w7;-21|9c_X~7Evg~f7O61nU-{wxG7JDVw}rAt7>?yYeEK6I>?fV`-{1;Q z5g?o3L_EUuC*$;2qwzuj^Y@+3_}`@c{PElWpWV(AyWJPfT7@MJdB2B4mjmGRzgop_ zpNY~C6sA=$k@Vi&4*yUIdm1Fy_6OJcH3jG~1DJv@+^^--$HzhE3hGcDr2Ge30p#x` z*0`re3&HP5$ECH+519Pdx4$|eKJ>s(oY?xk1YjWowK=`5up?EZ+4n8zryT zm`RWMwy!2r+pN7~$HcN))cA-=X4GM&bqk7J;+7#}xSZX}|Id3bcCX?ARW`+lC(+Xa zpVRg=qJJLFhWYHFDv1n5cEPXK^3!K%-T;B>fTRxa|8|qF{z3Kb3aH-B$XNR6yT727 z1}Xr@Ax`(Ks;WZ7W(Wj~|*XF~g+1YrFA!uBs+eV1Fnv zw*id*!@{Eh$$un5ug^euJW**_Xy~8l%P$xGe|hVODI25ys0Hx<%WV|ZyQjkI5x}Hf z&RB%Q`Z8p5y3&??sV~0RyzA%z*N)Ept%(w2AfaT1eaAnb*s%|ybN`mxzj=XPh@w0q zP|#lckyI?|c>|~eO}^(FEOVag#xA3Yu^CI@0*-g4UifNCMrB{PjuD;@m;zg<@WX0*>v)=8v9mo zndv91cc0cRpRj0@^aC)%OHOO>^?(( z)lKC}_ssvU&^kpq)VM0EZa6x3**Qq}m=1j6sF0y_@0)IZDvd<5cHV`0tsG&mHlsS& zEE2Gr8C#?YL)Hf!E&;ei`YOi-0I7=jg{!PvH_`SrT#D47o)s3(ouzUakGiUsEs8iTL1ee}1;td1I>8SrvI zGOqkW7U3hatC%-+v9Dhu&l?_ypssI27(s zJq0@ICaHFBbhX+_NXgEtIN4PdF(W_gv8vAZ`UIdmIc>P@dWP+r6+3~cwB2g81ue_g+DH3(RI3Ws*CXf{aFj_ENrrU z@4FA<`p`F46OrqPO~CjJd8;6Y`d0hsLTNBl{_!CImjI~bLA)LUOYyo98jSjG)G45i^lyo$?pP2!+JB|2kQ-ePH9F1>2&V+je6flkrF{C>n08sy%&c0 z-?cK-LJb3qf{BmP#UJ=B$k9*I5Do>nNFkPLsG{B_8^8(5I8-Ot>eR||FcIGHSh!nB z52J57*)~}bn~{6RguIgsOO=iFDIWy}pw*Rj_YnbS^kGCtm)wrjkf2d(smnbdC?2x=Ok;NM$}H z<~32i9sOOYQfSF%Tz1jtLP2YeM&T1C6IU8zjXH#HN7HfVhEVbLj*p}8M`p|7vHKG- z=R_i%F&!+OKIJ=wOAZ#1vh1f+NoaWI2X`# zs-|=-^%CD}E%}7J2sYu+4;sxsXW-T(Pbn#CeJ_UgO!UX=vjXOe@W*dzax|_kr6|3; z#7|G;y!l2++_vw5tJ?10UljU?!&e&O|CE6%85wQ%<(mvzI@+35>e;%oMHqeciDC;= zQ`dePk`e(|H?$AsxP;YRaxbhp(MOUt%F(z$dji}Y`ht4dA__iC8PC;D=kd&LLy`o! z{Hgr?m0JSuX|6DC)_U*iJYC+LJO_b?dJo++!LwFiqBkF?5RsU}A4x#FCaWQS4~g#aFJQ*vB;y<!&o;tI>ELCFyY#mYwpf%a)MvBr1n(e8M4l(c znUnU-*>fLzp57rzuJd40i89Bh!A&=-aWiYAS;VIJKH!DrG4SKpc(b8GA5kt#*V^KYy43R$5gVp z($7+*W1aAMa#KG(wFn!zy=m7{KItsYy4>Wn=<_l^#;N?EswP{zwaED(tzqXJld=+L z)vc@k`c|hYYk4n1D&Kxe>Egsc+q8x%_N%$OA+r9pUjqh%y#ROjAQpA;+VFe4}mBi5ibekvxscL7$_ZExx7cX}-EDbMSYl zkj)aEd9f@{~eJd3F6NLhk|t*rfDhA#$8@{q@)p!_eq8QcF74VA|>qWN%Q~?Xj z8PhhiQb#chXm!7^}kK= zprCf&$SxxFVz9;tFA5Lnx$N<1YTthI;xO4(7fm1yR(OlP_Xh3oaz8^V#{%mRJQN4i z%uQ*7_qLsh@iTr-QMcM=Z5O+kxh{nO*fyvqG3WgVBVY5)8kbp8o>>@2vTrr4C`jPq z3vCbFf}?H+WwW_@Z$m)O{PSyk{3jcEQSU|YKcFYPW3$|`gb0@pY#-dwSQ2tX)V*d- zHK*n-njzg@tUDQ>$--Y=Ykn5qG!)(|CFVt=Hs4c^v8%(>=1>%I*)T3SS@Lc`k8%fw zyZNS>_I44P{)5wU!qJG`%7%xRWk(x$t0d4AkgA+T?^Y-MNV;vIZEzAXFMA zJ?QOm+3}$Va6oSOmcOMGllDZ@*V<-5?I$ifB?jlwCbD z20V)p!8My==N6bLdQnFHB*N6X*czktWMpaUwO*UZ$Z63gCqb3@Q10%xB%X%BCm1Wo zp(08jr-tgzdx@OBTt;dwSDz&Wxwe7c5YMDtI9wcecXVR4S85qclfC=2^0K|w%cZrT z?&AJTQXSD3{;(L;aitxnk2f?@C8%ZIl~eTN;*VpXRp8QyEeOefR11a*+&9rxEmak) zBQdy>FOTjuGYaMR2$Oi&aOVN(JtPQrYzn_fsRdqeA^#}4;Dkl#mKRxb*Q@Gfph zTRMOigb!4-$?Bs{1jBN(I#6glKX-Yit?7K#bhKQXa|7`hh$;{BYK_gf#;3MaCXh)V zB=z(-GyWj-EAm&U!88E0f?4F}(I8LYIAh#ZM4`SMc!MP^Io_2Ap6K+i3tHW0OqW$I z;YPX6pHp}&_l{pECgG+GTTP!Zk32sbyG>{d8HFRQf8CnUw!@TL?N@@%vS|-()(_b3 zu5ks%{q@FY>2A!%M9=+&;dt?Qg3d`ZBh;bToQfZNYlLL_#jb&R`C%Q84$o{^ku`zT zH5B&UiLsA9%(%mAckq!=lon#(E1o-+->jic56SDT+hP_a&NiS5mi6rJ{I7dOw zAN%8FfM7y;)KB*A*SiGcg)Zlkof;U)rA%AhKBPZg1>ra!m?)>YI`zftD`uIIAUd)1 zOYZrTC7UPXSjL*Wr&E$EA3b7RQ^dg)+z+*8!lPI(L{8e^(Jb2WZsAGBmFQsPeH@Za zFbWLtL0>t0nhR;HDdc7CcO)+Cf8i26`Q zDmmpvEMus_`IWigO*c1f$DuOEN>z~_obQ|JXi{`uy{g{mTHwN>hFMpkA>>1M(FQTH zW$O_-9+xMf*ZDvo+ifk6k!^Q8T!cn5?=$`#aes>0*|{Lhr(|3^%(rzK)T52JmyXV$ zDM^$|LUgyyj+<@b^+Ya@gX;X13$&OG^9@t{pWUi&ti9G}zdAC*2#?<)+Q_KKoOh~~ z7=^RGxi2^rtNP*qyX;xt1a{u*&EKf{im-7}2$loetY1Br0Q8^FDg;!e3dFTC*@D#Y zQ<2Fw7v_6i6t6O{qU-BaE3vu~s0hl~S0CNwZ`>uP?hEW&F`ym4LxMu$7yFu~Ng4~N z$)(p^!g@T=`>`2RtFxS??}DKkLJHdu8Vlvk$q7`;LhVHmIZHX0rj zb0gYz`EBcTC)E3ka2IN;qU2=KQG|W4(}`@IaB@0pu=7Y?&?@Tk@$jqZA^t0@cax=>m0Q@hK%DgrgGyAV0PsYuCNfHHS$| zOmX+;n-AWwYH9hCPx7xG-irbAz4KtrwPETN1Sh7RPcZOF4wRVpN{dn0M+BjlWgr%^DpYDV&sqWnQQdKc?;i#4%*$;6VSgC5PGkXwDGauykB8p z_98cZKK{<#F#hr|V=SoTZuop0d+M~!bjCxX8dI@~K0EWK*Tv6!HnehuR(+bz&#!sy z1W5X~))=@o9Z;iW24CyTm_dw>G*Wo12^H;yw&~wi%-c%|DIO3#)-E>qR#&IlzsW@A z+CLU81CbMB%%lfS9HQ_*@@dr1gua9^N<9v@;pA}PzyrhxTsOw3@xZ?lL;AscKU zD`S(6g6#!T$Ld?&0hA{&IeHA6u$P8g41|Igc$i4+ilLDlO^%WGYUN&NAQ2r;x#7kl zy$L_Hbm*4f=(ocw0{-y|yy<1rk`ezt^NMqS6t#XJ+QyztOw1Q%KocG)ilP3I$FRm% zE__VeZOT8adfvmhjcBjVSSE43HdJoQIT(@fv@`(xgtb;l&!nHiSvQ+(*{gA7MSA~5)T!69u?_m z*l^|>ddAPzbZ5&5e$XKHuHju=i@i04dZ}$>E>5Q^p&UU|$wOkkezs@sX4p12sdnz7%6bjn9Pr_w` zE(Ds>7nUKUF=p$$&~kvld@d|baall-=uaPN3Vb87_U|tMX-Y6-eAm5?6uQU@u#glm z#=rcKDL}X$L+fCpMR5Nho6WT{)i3^7ImJhD?RgAO&Toa-l-f*fa<; z^2w5*(^%+@n)C&I{TrMBv3g$98UQuQB8~iT6Kf1-_%CJ%bl-d zt56myTg5p2uuqFE^Ead=&Sznr{W%e!1GOPJ5D zrahj$cFtR+G2|=mX^i7!m_bA(Qt{tF55ktevgJo$M#9UHh^OLgMH z0>-H2lDAzNSmnx7dzhQ)M%!y{mCv}7iBJe{s9%7?BZ1n!oi;NxomAKGxv^rFN4~$U z_A6ktQ$eSWjt7}jx1XDwEN@u6ZF6}9=xdp@@?^8B%e86Z>^`It3V|ZCprsprATga` zMTpqZn39;AuG7kza_1?P!SFf#K9wI;Kh4g<%t3D=?1TWMONDoFu*Br3o$7xs*qjHT zEGKgyk;M@o>U?SpFjql4mEh@(m%>Vkl5bk15?fyiN^Q69IP*xGzIo58hPnmU?th9IFw%c$FjUClLYx;Y$AZZ3{HZh zG~~vcUp}aIc%SGfS?6*edjx4^bN0d^xAHk3VOxrok4WFcF3V?20QOk*P1{}lU0aSr zazZ{rbL`kucYD%<$$!ydCsch0exJY8d&#W@Fs;l`y zvsbu%8CtQ3gZmrH{48d)Tl)>$>Mm(`up?EGK8G3s&O&1&#->OfP%BsaKEM21t*%?4 zqK0G`P4$gmeXS;$rtqYXl4ipB%kqD)#w+f)M;%^ktdBY=1VtEz)_pB|uf}@H!A+sf zty(#LD*^91KJYr0$M6{d`yk_wwr_(amuAi!)?c)2z@lcwn*mRUp@&f%(SXWjq@YSe zx|nucwk5u&C?5JQI;ImkX6%b>7v@d8w#0Q$Zsy_!nlM5XOls3O5_K9Zq}&)d_tBH2O&UyE&D z8=vtBajw`4$TSm!AQZB?;z#UH)dKO=Ft&TAH`GI`n|gkz{x3b)D|5`BrGfgm+#1KE>+LBR{G*vd}1bBlzmsst}HVy$Mtd zr#^lz4_A7$pWlwBuhW-kqrU6IQcxRticdqkiC;k!-5uh=S^Hpu~;K#(_94Ti%gZnXz7yI$Kdh!4Oh|W2jsME@%$MvJ1Sq)<>dH% z9`{;?iX?9-N`u`ic8awf;>RdZO!(2A_>=}5d`*Dbt3>t(AC=FxVj>DFcg=O$B1LwH zKn zz4!8yS8%EhIB5O89BxUUc~7T9asTud9fvmATpJ%OU$F-L}`%ckge9g00x zA(2)W^9cQ%fU*elPhB|pHx;dlo$f`%;!vaN3r$`uetqKZj%O-%^MZPmP)q{{q!VsQ zbYc$R8Vx~jAt*>4pu8weipNJ13_Fmvu78XDGTo#+8b{4hMSdc;=~gXx7iZ0WqEZ-$ z-dpjabE;kKPBEpX5P?rIJTS%^N>E+vQD!!v(nVchckC5pRT~VrSF?t<4=G*Z8+*oW z^ZqT{(+F~-h9fY!BM7TLz|{o6M`m4&Z^$tt`UDB1R1Ns5@wE1CJv*-6D}StM!Ru#E zcRJfDxVWZ)TsxPqC8DB{;#HbPip34CRcX8Jm0=o{l_UpY_ymKXDhsl4?HEBFUr#r< za@4Z-1t2c~Ap68J6mACh@s#w#bOf#6Xp)Yw-K2m=Ehd>8jiEr9KCt-3#U?u3A1}{z z>9avuH1Q`QP3dD5zD(#lcT}iGb<-rb%=Zn~?(OS7ec{M7$jktnnX;^b+SXZz_7gT1 z7%_G2s=ojWQ?>YM1#{AYy_eYe{Vtp-p8_V*|Hkgo?x_#?UhQ1K0ySp&`5c zoAUCV_1LV!jUDd#)O)cmK-Dl5_HeGo-Y)sMh zsZ@1IVbmDDx1F1C_#eO_WOY5pDWoQd)hEKK%rNxnx%}#rVwIw5i*0DCH!AuJ85z|b z^{)XawDm3+ec#f&Q{V^#8wkM;xiu#-jKxO)7uC7(?9JS_l#*|wL+n?K`s(HNN&K@Y zTID>+*r4Dd-u!V$d#hs&6;c06rkRT5+;8BXul#%|hkQ{)>b;r5v<%m;Oyw`cPT(xU zLym-S0lRh%3QIfAJCinbF+FbYIMFKcPWR_LF863O*>N+Es}8CU@TtN`#W2#g*`Dz? zR{HLKeoHB~DAE%bv^QZn&Do9llc>}jNh3Lpi{H6$?W5!tO zQy2I0W|KwcM+aC*cQ*xuCQv0&Sy0%z64fQ)HskUSKk~-&2F#*D^^9bUrOho0AKf_1! z^_UWDL9YuKw0Gi(+ZP#85?KISX^wBMb+Y!tdB;*LJOUD?BApD|f4f53cP`kqv-^zux z$Flog3Qy z&0Rx$dT%1qSw-T?+b2PjEv>|gq41rdFb1S84*Ige(pf#c&?xWlet_=GvBO||$VZ8D z3~62MjQegF18f1frZ1D%LvN&{d<(rtGry(1+S2G{VT!_!@&*GlCf~i6d7qS~mmrH^ zs@rFtqi;J#Az0r@5Op<^Ux}%&Dk`qqNTW+TalX8VTm8hA{5tb{j=IZ^=5_1U29+tt zS(+4oeM)Actfi^v6Ba^pR+jM(LqvlbvXSu&C;9}VLtu<3LX-w9Ay=N+k_cR`0RJZl zai(Q)Y?=1WQX%*vBx_siQp1OI?QD&64ag-}Ojpb4UrTJ8p~;PKBE%@U>iN z1CVya+>;4zg1kG)ZSI3kl2CUVX|Uh`{~mlEt?A6qdOxXKz)R4jgV$PE1y_ZsPQ#UK z&^nlv@{$aO5h`B1-4@#igJGL6B(+z@ZmFzh$tHo5B8Ma9++-b@UNJKSGa>PNB8cX8 z-fX@m9DwB7g9w*P9zn3l;5?Ps>t=nXv!fvsM&{~hfuz?PQ3{Hn+HbmVnLmK8cCUH9yi3oj zmJ*s^H-=C1hH*+^!0TLwZvKmUN{+d%Dx+MQOpayWqsP<6#Wvs2$VXqcJyEUIb=M87 z(X4ghDvOkT{^=&lUJb-_EsH~EgIm|hNFx%mK z9*sg2M)+ol-Y0ZwqiEJtP`K2mw}HcE0oIq>3onkkWw_%u>&%nO>Hl>rO69-X zI;Vkb+E6|2R3dC;2X!snAgWnk>>}i`dSCE|X8i*t>h z`4h)>FLt3@RvD~tY5$I5{JEjO{3DRo3k839+`1Y3^($d==m*^k!)yQsv(a@gF4nDt z{Aqyx^2@(}__>1MA=^`&YXqZ#nqE|f6&7}+(4lU?y91wkLJDpYCa@jlD7Ev2bl54bWJesjw18X zDmmxNRDQV3?|3yjk4Mq_O)CF1Z7*!v&yIuGOW9>VlV%{jSFuEi>ig(lpOT-a0Q?GtBhU_Wj#q`}5N4(^Jt88Xdf$lUV=DXc{SCG`!$mu&sGjB44E6?-O%L{|^l8vjJW>l`?y1#?g z^ke19mz1s>tu+UJNas#Y1Y+(xkDB^^@j^>b49J0mq|3H!(jVrr=$>?9o8Ore{>N%W zFEEEIfYC)Csvs#I{^VJJo=L|V`iyB znwe7fhGN19kLPQ{rc2~QY_7bJQ(3c00}=@=AJ7Qn`;T_i@0{;xSuCpesEug)H{n>j zO!5UVtJVx->AuZ*<52c~TYEDf?K-N?B{{aohfMZ=3*Bxz7T* z4YrSdY#HHDrmO{;*&fLaFzvT$D>Z!k$1%ATSU;{jXOkgWv-Q3DBIfaQD0X?D(%~mM z>6sNGO&FA78WOVUJh?X!b4esZTxiqAjHuFdoSV?J-fo%T<~!(sPP@R|=X-@=oMU>I zZpF;f2ecQOTo;J9v9mzZ=P?$~VrGLyS5?`aw`oo{Wafc(nnkv|Z~qT_?-|zAwzUmg zP(V5;pmb>#I?_VuP3aZ{q=N|3LJPe}2k9ccDX5@ykREz(0i?Ik2|W}cKnQ#*dvD$Q zyx()4bN;;lUaoaIMalO_N@T3h$ z_4MHU9x}7Xk=^d|Q3PcI)#G^PBwUFgS&;L?;xmo^p_IN#~ZW!0o)ex!`Aw; zFKrU*eXmPBXTA298EKbj-A4W*K+Ux7#_1nBF6(#jY&kxdi@p+mYRny2D~yjq(Q3BO zm3C!%FXQ9wNp_u}G>$sA249L)Vo9fJG^8>%8R>T3DJNYA zVqej;Qo@azALa;Qsmp{kR%e@Wyr!o5Niw^BWK4* zrjFHNv4&o#YK~yD(U0V^SA8jg3a@+eGhc}}1teejMIEeCzxiMRY9HT-jd;i zDEPj=SenQCGdqS%99CC*5`6A{adVlb{mV2-v!dhXSc-jGXOhPF6BvT>=0`r7UZW(l z!PpM^m6Ywe5Tj@_NwX8Os3`vRd2pVN!_!JrceqnZag0s$E(>C&=;!d!=Ww?n>842q*8%5SUgsQPyTUU^*h5R*u07k% z)07gE&x-5rUOswIrKqMc+>%L$04smE`>A)&B4es=ciW@6OR3uXaHtW_kqopCf{}M-pnodS$tmmcdEmwwp8wUT~pvuyWj_f}htt$U$P-cRG35TB!2Bund zk}KR&brY}RO9`DqDbZ+#xpvs8V!ll+sqGBAb{bN6*3?M~b&NE0Av^E{SAZo|*%_{r zA*2TCM6FcwdmT6#*5I|P#FbqfsjVyQsY#Y%ZQC{FM)?NAzKaFgy0jmI#hxGc1XlT= z!RtWi(Zyy00Nz~ccu;G?vtEK7d!E}KoprY7qGls`+D==bit@pUoLWa=frO~{XJ`^< zBQMD|-dU55t?3d@#(7eyZ`reF4hA)t<3)~$L(|CfkJ0)R%wSD`^V3iLw$v+VZ+oDH z)Wyh0a>P}#O}q72>1e1c)0~kk0X1q#GX^kaU?B|Y%5wM;$j*-D2ifVVsM zG53*-TMxh-g?5kKotSQyDdesAJguB@jwYKLFX>^~K1|&{*e7qYtJ}46oz&&5eLd@i zYRO4La=}LRQaRZ4%ABw;LvkDglVf?hHjVt(1@& zMV{UydHz#n)(*Zl>#Ilp3gZ;HC9g0qC(f`0prm)62k$&*ja1{Speui78<{Zl{g+J= zUz$&kL+Yv!ormxr5yjr+3a{7z_;^Ri&9wZBya0*SvGSlAzE!FxCl&MRXQy^w+*--_ zJvYRDp>9JHX%u?e$5m=UPSq!ky|Xy7tY39IHujIcMy(- zI+;?q3JoAz#!iO^Y#La`v$meXsn^Lh@^=^qN}R-{Lc_YrDKRq=jY!QE5zJ$Yo=LmW z{<4`NRz~QQol$?dn}~sv#in7M1}@ZnG!p&daBWMffH{^O{8!8>%)g5%JKKa&NvAG2{kpTN4eez zG&%a-1VDTO=i5vOm?Zq|5-DH2=-wIcACPZw9T;x?V3g8tKvmw>vlA4}7x|Ti1$(=< z?Lw-G??tEFc|#jwJg~S~ct*#+(UcF?bB@B}kD4aJ@%%>J$tMqoKbROsWIOpiZakF> z4@Hkw?S6dt19B!-KM1-`TOJ$Y;DUjLPOA2GM?ajraz<7uibTN^S&r4B}`0+)`An->V6qLG-1LyMqz1H32kHMySKmr1)pd?l%2~=F9h< z!;V9vmhQizG>yKGKJ3RboUR!9Zl#aEa<^!ydC5&gM15A$X#nl&Bjw+A^SsJrdl}Ms z4#jF!dit$z+#Q0^hCJ-RT*^=-@J1ipB>%QE3uEPr2}WxnxS1-8{dWz`2XDBMeNr2g zx(ZQ#^=Ur8LZH4R zZ(ZYRrtO-?EvLo4N7rA$Z4+ZS+|1l!PW=sxaI3T#`>19k9MC2j?dHo06AP{i;RBtc zrKaEWHaOtWnIOSv}Q+6;Z|n}wpS+5vbD;J#II>7R`PT>0j!cbI=NF{

  • vllsbi$DF&3sKTj_0#49Wyqc$J^jCg{mrm8s|F!DLKLG69CJ(4xf=V z3JFg}-W#{nRjfvMVMcaShf(_E%rtZr3d_Aw;rjUt;V<7-&6rh>O{R*lx{L2{EzUH#;Sop!9G!hMVYGP^7gZWn{?%u8jmzwu|BxnHCc-1b+vD?Vi6{ z$JmTAw{mOL(D^ncpq39zS5D|}3ii^^tYH?}HimP=111-4UN(iJpa~G+{+IHVs4u<{ zRIU~l!xUh(V`=3*aAucJs6DHio?uH;o8bZ@N$QGl;p@BlbB^LLAn3jN`^ESh6b0K= z%k?(U)^pla_9z3p%K?W{_EJ*PU(BY=whWFLq^j=Md)fIcmMBQ-ZO;0hK?9h4N0D4+ zv+Yq`*B3ozp#|1674f`+Jz3<;3ns9hQX>+VNtQ#zTb)p8(ZRhm1^{p2?J)kpndvP` zy?ZhCUBf#&f&}vg@R+}n-aQ9ldlO@tj3DBlZz};h2A^14kq{R1MngxYEZruogKBcDIVf= z<)oA5B8;d+fkSw?ZM8G$V4t4X1N*b3U-CWq#<^Mg)A5T1dTvD}xTKD6Bj2jNcnY~- zDiORKxzGxDOil*vwujN|nCC}8w)5*$f3wy~M@y`W@e=LvFz7PkXnCzz!q#s_b^oi})W}4;&#-eMt>YZ#DKZ?5MHl)+wB|m~w=GQ|jt`Ma^ zI)&Q;0_P{xe>ni>DHk$k+?rV;B7HI5yoNjQ&^zBbc$7AXiPRz~%2}39WUn{vC@;+X z-1s^)V=FSVl`VaAg;3I;^Ln^I+dGIE>MP+}&&M0@?)Udstlp$MrEE4VWp*o?b?OM% zp6?qCvA39LmfAZ+w19NX_ur5`zkeTW+BrmjHCjl3tj0LdnD`LunjA0?8tBknlDx3| zE#CLIEzp{{{ez(6a)PiK`6!4kkfuB)K=g#?h5_RjF^%2OB^dhyOBhQQWmZ}!ftjI4 zi;}>2o^ITc>B;Ztpr$M?x8fuI^ICRX0F6%hjjwz(_>hlDefcKQe5cfAV5g7e~> z$@XvY$@9kqkf(NW`5kI$9glUG`ac*K*bLhbQxeh)E~_ri1QtYYB<%`Pc`>(3NF0eV zmA)fSUGfi&dfp}&3cg8&zz(Pt`+nP9T10K*+Z_;2-V}?(XEG&3xVzEEV(fC4mk1bc z6AI_)z9ty%?S?3Z+%ox;rI>VAHq+&8sz!1&7ux_)0@arZNza-%b&1iD3iZ2=cvtgm zGwellkR2jM8m>?4kJ{_b5lUyMg&z39JU^kJ^Zu6hF93KCbKgRRmmnxLK}19_}2y&kk;LT9*N;!0{52zek&uk;Dip=jNq zcgkKq313z-6?^nViDNdVqPg;T*;iBcUf$MTCtNbOM@x-Q!q!s3_{L*K9bH7-bZvEkxJjM`g&g!gRtF1B6-iFZPF zhCj^L*Q+P&-$Sgwz{gj7=jQ0j{E*@r?@|Wq1LL|afgnOfe5x<_;T|#yPG(TaQ%xqt zP=SbGy8%AWkZb#kkppt$z$|$nCYDrRglgFa_+-0cb4Lt9jyA?P8gxrpv~Na0$!w^T zo3??+$Z-^3>eBqJokptB*!_A|XG#{oqwUup!TnZ1P!KPP=gc5^hB{0)Q&r;9{;Nb;wB9;*mR z9&J0S%(6($s0ihHBs|3Hp%PrX`SU_ub%Xnl-4NP#kQA1$2X=eR^Mk^Ms<7I)O1}S= z3f(>jNd?#}2u*L^@Z;t@*Xx!R7kCK@|5Z@R+d`#|+lP^nKgs6rP$cXg*;u;MmT+&3 z9?nX6ojdi4#|t$5C>MGUfN*~Xrfj$_ZA^ngI&RJPxce?*-_bXM@pE)UcXKIT(6gFe zW*OqosD))OJeIWEZ1F*K9oi|0uFa{fSWxc)Yz`PifFM-AW4vhJ$0^mOG%W9Vft4R5 zT}CDlwKVho6av$bcUlZ~wYWNw?kPhiRTYWWMvPE|$Q~WaldyV;vrMX#vgE#tPO@KL zB5(JpfW09|>jGcBDJl%)xSi3)4S}rEtVN7G6Vq$eu$XT~2|=3Yyz-A;zxXRis4R(# zyPnfpli_Tec^0pt=F)As`QvDflAMY)#Nxww565GG5+OCF7&x5 zu*pdmuf=yE#B-*kkQ>=iiBpcU$1!=NtoqCdpPY2(j9C;*3;q$LL6fQawtX+5!89v2 z(;2(%)9Tqpq4?8@2(zCE&1INe`#|qLlY5;%Qa=C^;XTF#flUFDZ5#X7OTCWwmJ^qJ!*HKep#MNRv=3&~PB6p()lb!zzP z{^T8*U9&h-mElUPw&49w&%Sx&=|<7*H4N~;XLF4iMi(cSPC2+ZkmBhUs1y;-V9tx} zt2T~9#J(r)z6Q7L>vv9R8zQ2eZAoN3Y_6#N6-aMXyxsVrnD4_qM7ZRGui_Z*YVH@CvnGU0qJU!}0?-@^tAjrIEC-EYono7$;t=O-`6+7oZ~M3!2@zowOSF8# zKsK1q1v*qFOzB;A!G}oAbvN?E`j17v+QtL)%`f@DC$etGL+a~m3gV7;;~>#6Ct@9y zm##CD{SB;Sw9b$p0%4=~?e<>)k>6{Z-j1sukf)eci_Dl1`!D@7bcaE!v{0KJssNIk zg~GTuMqWz z>$o`dHPTX-$GBU2a?BBNQ}_C5eg+D@q(p=I-MF zle>2I!9r7CrO{RkiJAa}qgk_}*w;C`T1XwBnA1@2NP3=CCt^J!RwWkG3MW#SCU2yN zaSmLLo?bqg9_=S$;xhaBb40{&A-rw0Yfq;o&T4moy`C$bRYv~hu+XIetud-^#jr3i z6%0w&tb`U+cT{3@N`fplHlb_93*;^papwbz@aKfH7c(3t zJEhWx(K24qmy%Om+C;V8!Ml64e0mh=A=LSl3V7djCR!6*3UM@AMM4_NxLp}$8>`42}_ zKz?Vei9s%Vk3d4f4hxs^sNrh}P-n_SfAJL@T}Ej_ut=uIAZ4qU%a_6-f|o`pshV{d zLBtC^J7hf27^BN%*|1xNwrmb$pRk{+y%K!h1qi z`5k90Y+ZXAZ}^_>6zH3W>ubq!5+IEUpf?*Rdk7eAX4bO5Co)}M;TXRWNMRbMem}6Z z`2j61RIOd@=&0_?W3VE|AlKH9UK3nOrJST-M;?nJ<%cL@bB3Bz?8pK4F!#!^{KX1;{ zuijjoaGyoi-J3Xn&~-Q!o&xZN!H(_*BD(1*yuZ*|O)p$ZqhyCNH{eCzVaI!0#;mPO z8td}(*KqI$%<-1%VzUmqzCJpdptvq*C}U?EA3;?NwEj`<4TXw%tE9iCU0|?$Z;x|) zjxLUF18bh^QaHeImj7w^KOs)}54q&bIsEfgMvM}|)L-^?vOzC&b{NHOi(dzFKN$W- z1T}Brz8r~?F6Iu9fR>NDcpjn>iSze=AAYx0j3+X0ax~k{JB{3!Q2b`{kIyR;yd?8C zP1uZGhH~G3eC#^md+xW9Pg4K#W`7|)|MTUCPq^{&Nz|u9xqqp% ze^dG)B~Y4;Ed16V>;1p~=`ZZ~U+!o=!M*ccr6(AcUk&y@mizOjCL7L|co(7&N%^~a z{{6|FyLfD!@$Xr`V*jqp|E2Uhp!AKdmMq2JF7975<1df*pO^2iSu%N?{Xf^)zt+Fvj}Z;E7d=4XCfXE z-6*<8^N%-sDEFZl;mY3m`C?HU?irdX09@bl#SShfAZh7EAu7x zH`0DYjxzQEBn#6O9NoJ#K7zcetkdZO4(M^m2I;@5;$KYXpSAIqdHCaqIwE&_1bLD> zgZW%1*0{lvfnu%V^3}F#@8kZPb`6z&#HRT{Y-#0{d&8CJs%MRkK+f68i~o*1(&i`a zcg?ZQBxFSKAN}_q4fB84;Sn$4^(1Dcqes9(!sZ3<*j9&16jV01N53f7|3AFyUoWgm za6ObcB)6UEl3pH{45=TF4*AAW&Ns5ap=V@XU> zy!ljn@SxdW!HIwW5`WT9u`trGSWn zmLhKWcuW7+T-ANO+U2Xv|NixkSv7fl^Op6)HRIiC+4lo`s)KvV-X!Ld_WgyL{)g|C zrTRfy?m6``gVxl4)AQtw&B*WYkyTqR?!IGnHor%d;mm^4|5o}}`)lqIr~-LKnB$xY zyP{WW)1yZjw}1~t6^3|^l1?5pYI9^I3ezyZLH>YaT6X65k>nMP?6l)oLjf8RGU0m>BIJ6`Sv+0&o5#C-C?5T172IY zJ-xZ`{+yb<(Uo@36&^MZpUUimCsapk9lBBbtBu0kPicmK(E4TB&j1_6j;oz*-q-%3 zs#|G>-O>*T7N2x(3T)%KhM@^42%}Uo-pqqk53(8_VxU-V?J5R3n@n4#L@wQRo`zKq zDb(Ddt^U|hRDZ>3!_n?yZ{`d`<+L>_SiJUrVV!6NpkJ$0?TYZ~2NLpnW+ss@(3%Y$ zGnG$sYV!)uGpF3g(oL6}<(-X^$3B>NpZiQ_X*p@vDVA%h&eHi^Ko3N7dT)R6#nb_L z=0rmk#v|QY+rVp8;Yb|n06wV=;6xLn^RT0zi@eh~An4pzecdHus?;vs#{B2+iRZ&h zsyhRW$qCdIj_|72cAv0USTps>k>vAPJ$K}CRg(W0-J}Ou>|a&(`Ux%x2 zD;%9H-&t%1#Hs=O@+g7C*W)|g(T?#LrqJ48qoUvxCT>^VUngV-%amM5Qm5nUWS_L7 zrUo?Lb^z^4UL)$2BZ;CUgM#M>b5i)U4K^hR0$iJ%V# zTEF2tEA8r7X;z@09caD>+CSijRNvFOMXH#Q4BsXVcn#J zgy_C+{6K!W^*LeOnI|lrZqd-QL+ZX;(Sm+;QU3+kp)GB(s5;eOi=7Y74;{QUVetNOUgbZg(p@6LNWv<5 zprwmZQ`PGuc^ohF@2w2p6|tNf+HSYrcIJ&_+@S6?TFpv#B);UfbJo$j;Q|QzCVtVrRFya9pkz6%e+%-aH!$gsZ%m^I?6Trr|*w&hPImm=_8v= z8GM^YKa1cA<-(JN>@}#<(?#Cxl8}(9Y9}A4Et%Lr_%~J7BtRmj!xu3sKZXE^DAU3R z6^^dc-T`u@I9}aoClZ|Xh(2>JG!rc-##CG;O(`mhpK@K zO;^juFR2w>^V6(*lUz#lCrgic@uO!K$dovj(T;q%J}Rg7>32z1|nYUj1HB zy;r?-Wu1kF{PUiNN;y}4(VzTHzbu(KgZ)6W@4m$VH>r8>W=!uYENQ<7tUU!%-(3o= zym9||&8%(f>lrS7Rv)njIfg*UMAqI)*ju)ACH$W0iH4e#a_g$@_{z}uZ1oC*uTR+b zU!L@E8=U4qS~58*Ox)-OFFq}|(i&U&@Z4@T!LzncWVY8+g2LtMRNiq0wMXt39P#Bm zHTvZg(UnzjPC!Mz`<$(9de*5~^qV-#+Ko}{hEK9khiyYk1wW)tQ{(mNnOW2A{#)}G z1D4+|-^ZeFi5n7pPGd4@6)>cUc*T+S+RKfz`+5HNui0fUzcL^#LzO^_I`!r=lZE=# z2So}!g3qgzQ?+_sxKbobd|pJN3~jv2}#4YEIkiZ?e{Da~YY~JfG-1pANg{7F19{I#{E} zPPX_Hpp^(xQb^HKoAB-D~_<9pt#jCQx)zLPbYO1*3Y(`Bl3 zCoVSdt#qI9$14S)7al6{u^v+x`!_kvG;Dyg`Jz$wYQ@phaU0`j3twp<`uHl;yd}$B z(Q*Q>=p4CTFjN)c(npR~USiU1YV`y3Gncz<7u<*QDxqAe_xt^iM@?SzTs2_3=1uP_ z!B&gXJU5F5FH5u*edmMfBgb*9w~%e$=}7NoZr78EoR`*+pGs9Id)pw-N6zjw8|?Mz z`E+%MqbYCR(3bqrc_3m!^|4GjsGItv9OfI`F)KxBpkgvx-Oux)s&zgxERhsx2kJkP zM>5!&H1HiVk^NAnh>mGwJ2TwWE{;C(Cf!;T9~5NC&p|C{yxEU;?eFL6W$HJI(2OM} zCF;rY?2>+`2Shykx+r~azJvlvA@rA3syLmBygRQd;7#SUA+}-R@EtTgxD+!n7YgiU zzpzLqwXZPVK7q_k*zy+5r^Bv*YF0}1PV=4ePyKB7nGlH+Ostwj2jz=mRU`GkI$}1)aAGv?1gw+AfJI24c%w@5|2&O@^;%X1pUus5Adg& znhEylNs(4bfAvye73ud*&DgvDX%|9cEB((!gd_pqiRit~!N-1Wu5Sidg0&ly5@$ng zgWxMFg`)kS0zrjxWTxi?asb#2ZoeICo!*Y4K6$PL2>__|Clp(li${q0Aj+A6D=-xX zsMyTFYaiIprfTaStdDqc9?o@S9*lU|Wpxjx*Hk|)BIb`y7{gIR_s*XURZ}*T#k@hAS+s88d)B^26x+0SSh1a%cIc3GU%E+`9}+ zI4Y8>CYhIIF_T0;$T`$m9E;~`vo>AVrdTENeLqOO+?aqdA_WpZ!qRz4hQN#$QQ=jP ztH*!?-?xzmk`}}C0xRT=n-1*|t?6Ft4n8T-&oGUSU}eVg!(b3qP|@u)W+Y_pP2ylO z(nW9=QWvUfpd$bmPjc1c14(x!JHjGK$y}z?&_{4K#|+rX0?-3v;OqN3WaOrPaCedKw4BQK`~;%@ti`y$h*%y$sdf5@zvgAreY*o&hg4O)X1zP(O&mZ)}}EA@szBG~A$iZfgx zG32#8xcgmPus3ERu#S_-bF?tEcgJ#P(WeGTWy4nY3pE|@_VhcQigg|PN~coSV~4vW z;$xaKz4y$=j8!YQMz8iFxKTe{aBWGyOD4O1tEsQy8#k#99bykb+lDo}0;$@e?f)7IqU>!|Mm z)~=RxzFSbSZviBR6SHF!*oRPr#AzcHLht=vS}KWaA*x7bd$4P}?Y&w>e^->+Y3&tg z=UWj!xJ$aDl*f!L_KHlDeH87Oz3A9+-A=Ruf+$YrIk=WqTBbE*gPXFscu-?pa7&!G zo#n@_m;Ces)_SD70oJL&Kx;~m!gCX$s@QSxT;XF5vX+Y7wW;2ItB0K{1SoK6TgMr> zUUtYWi_5L#O8`9_?)iH6OondVE`IHE=9I@XyG5$p7$N(Bb8i3Lb2XvxJB?*}za4i? z4+1uyY%U2*(I0G@ixdx~FEp*s9Tu2Yr1^$cNl6Dt4m?3s z&L$aOnp&)<&yIg&84ZkK!+(8d^f+q3e!NkPnRW9E(ab*#I#T0n>E}m+d7=8a-M1# zinNCk+;aAhbB)59FFC99P{mu;0`g{gi_xU}KdcC9m+o*uGOR*vof8Hfc7MjPPOFQf zRtxLap6tUWdx=O@7bSuSWGtqhkBUJNZ5HF8oRb*WXcF3Y++Iu!GfN*pJ&p|~fhnnw z`A`A{=R!h3A!{x4)R(6uki?1*`Z<7C(qI#E2~uKPImNN!7h)-|1oaYYy$f18v+#c) zMs`=p;y4Lrtd>d_JU+&yF1u+^ zJc-r}-c5Mx^nDY_(LS(SIfjVLE^|_4Jsfbj$#B4MZIDA&l*MO|r`0R)5%@dh#C_g~ z1K5s^^g|X49diD=(KLOmSbTR>S6TnYc94`}I$60B8|^ZMj95q%=#Y;$jo5pynwC!X z5sSR4&@N3j_hl|(E8YDIvTWvcw}igA4>CXIyy;Ja0UwtYHFL})#^s#6j&cRb1rjob zq|p&zaY+Y7PDq2Y5Rq^xB;|-&$bJq-yXk@2%SEr7KUaqe{oUo7=y$K6IK~wt_ZL?e zYmWrd#zKW^86ZR z<=$|{L;U&heJemAvKkGL6LxKsp}1MwKDwhqeQwJeM-~-?udO6@_}WYCaDC{#v7)O) zFfP(wzbIjSH+9|P(Fpu#YqJ{7xb1IfaXc~IQ)Lt^bNQo>F3*9 zm$PZdhxVHpC7JG_oTSE`gp+rDx|rI_6FT>WCThN}jO8XLi$^t4(V{+KuTHN?Y&|W= zF;|GbfO-m(@{(Fqk{S=Qpg&*S@ww&e%i~d?9sWBobM4(5@Y1z+U)FKKT|Y|MH=1VY zHQ!xP^+JQ^W#f$?HV)Xn-)t?9661PXP;s7f5{s}gnUq5`T2lNlkX1kNkn;uS>eP1g zdHNPPk`MN5HK{`&-zibulFQAEZ>9tEh(N4LyR3ctw2Y?`I=i3F!KLzQDK9I)ymo8D zy(oHA*fEo$f0u#FO%SjD#`;MIsB~Akg(%#7SF!T}-bF>FA(rdifD|=&`4Cb3c$zl7 z>C`0fH02c|e*C@XKN(VXr_9Aq<6jZEzxbBap=C?75!x93Mp5b3JewGA_k`ubwb6+i z%};`3o`edroa*8t2-PZu^|o7SjXP&zt@5vIx)wg`x@VH^0IF{-WrRhMHxtIJAw*JP z_Vhxs1%vL<7COa`I0oj^gK{1ZlZ))UYmO?WIEa%wU@L+S)y=+Pt3~38ls17 zMzXjelm%01*g%$}i-=7vs_Pl+z=sY@$Z_6gbl4Q}r)a*0p)P-xmc5MdMX`sL!xe^$Hr5~M$3VKdMn-Pq%hX33ybeCOh%znb4VM95EK_Fj~p;-0l0fimbRe{=Xk z6S>W#$gKO|>Vw)sa;6faSA7Rr&w~vTn|21w=+fkTQIrw;+budNIYjE+R)6G}bf^AKZ&W4+FE(J3m z5AzAppQAWP<5Xf~80o{Gjj7lb=}pQV4cbRfH^YWooWB=T*CnV|crvm)32?Aw6M#28 zLoyh4$KSUgWn-4VKc-yc$$y(TA<--@*EU5PM!1vKNM)BIA=1%+t@pK%W0p&M+iwmw zn?(Cq++;iSBXMbLZoCFdy{X?;cV3aYeHZ}u^21{q^b|vd!oBIj-C{meWwuF|>Pqz= znzCH3S-{_5)8idEWqJkptI4(xwVw36z3f+*lRmdyy)WZA>(wqI0^X;Z$qsKh%Z>aU zl09Z+mf`W8{{Bbe*n7)M&dzfNZ*u4jZ@4i~)KkiBHczq9iY@YiZr_O>lu^sU_aD#K zEt`QP^jyu(7NN~hZ`z?WNgcmpVYJ;Khcw%f-w}-S8mLZAC8Z90=U?nRg=q`C%H_N* z=v^B`L=;@i+Vkkmv}K@-XTM?-mEkqW#}&->4Wh^KP9#X53OKLqw=1!J0-odU!AYt} zz15?3!TaO+`LQd`quwd;$N>0D;`s-$kE5dEVl?mu7o9eGa{?p~viN|;HBSRYvVYKN zaG+`Hk1e_JN@nR2b!WQMjDsN?-tK9K9_;B)YT3h}*?Do1v|Z+Gsxqy#MB`l66Nc8R zRT$sQL}2nSB4D@~PFj(?N+$=fR znyZGC!QlLCA0li*5qmq(Nq(R?3-MY!g@(`e1dF~ROgoPqcb63duO3c7AiSMw$9vX# zkQ|*(^#tVTj%16|nuWLvq=6q1Lbv3)ngmQ!SqBR@LTHuTwBQDm0kW6oX7jowkG_1+NQ#)j~BaWrNz&zu{t?h|N- zI~XDES`P$gnlQPk3syyV(B=}4Q`j(!jo5h?6p`!KvLeeOjc&Zf9w`$Mew?XZvg zPx(JibHcEGMo_ebOP9ut7dfF6a|XqL+|syo1e2HOFoSJz7Z99A)=7lxJRPC`g|_&~ z;@D^8^WUs+ckaOPR^=srG}n>1n?l@rghT8 zZ2D4qNv|ig5nXu9!ai8jdq1Bv;na>SnmIzvO62*B()3*?5iU!QY*fbB zOjCZiVwQhDxvg9+@M;7&$gObeRYt~QyW}R4B7+v?u$Mp`STb{uS~+E4c)Hd-_^__7 zp?0G_b&sKbFI|N5XeSkhk21Yv#uo^Z&@-6u)OK2c1y~LhYrV=?S$lgb>hb`p` zmY^@tk=PyWPjmA&FEG8UJfqp~yd09GL|`n-4^Q1sr3tS#3UP8-aZNVqe>XxQ)sPUm zfUL=46?gj)w3GE-t%Iw+-v1;$bL)A*i?&SdY1SP+BI-i4^O&4y;UETnC_cOo-HW*W z-jEMQK}d4z;1x+vRTN=$7?QMlldpn-Yv!OC%RPeSAC8+s`PV1xIU)WW;g(#^wF-|Y z<}!krLt^iX#zMui0_Y@vx{DydsWbpRE|V&-x|J2W)FY?wwEx4W4g4%b3JdGkR$g5* z4irXcE*knAkI>Zca_j%(Wb(E{6q|7;zDDYl_k)Fg%(9va+T11x>Ged-ka<0{1?zM@ z3ZX>kECrAdV1NPzDX_?-Lo6m#lDFwB2a4GZIZ?S~_tb zTBjG=R3I{`8uzT1kXkTDInds~F63EH=7-TN5rohNqK^c^eB$nt&BrbVTu?e{5iw#y zg$H}uoQfspX7EL;>*6tQbK>h}$ij`*&pY=TDHw0W-dWo1?bON0gIU;;A!dmju_*Up z2CI42n>I2?lj2hWq75WkTxx3hgvE$4BRNgmO`FG{bhz&5)cS0qRR~_cyjd*q&v9sDh+9bPIp55IzqUe+OFKkx1eI`Hm5{)L z9jbe?_X0)xUN8i|xk&6*uSXD2HdaL=AAj#=O*Py^@(EZ~2=fR?5eMcpRu{Otk zC|Y5ys`mIW&)bkh{)*!1kCKxTsPa|0=?^WAJB$4%aF>N);)$wA{@CqR6d-C1&Ls&b3(CHSCRm*!J5Gw}; zmu&EJkk^@-E)e;|ISXWXt(W+K-kS}ri7sM88mR)!jAWKrA4m<~)P0FzY_@*2J#}0M z|LK1v!;#j#YiA+LNir5HTolr_G9n$SpcwO1ay=-NW;)1aI)s#QzTG_R=J&FY8BT&V znve8(l8?o8X;&66x(ouE!=7XT7x)7AaUqtSLgiP)iXVoqLuw3MrtUhFaOs%nT{rcG zmy+s$0>mKvt+d+;p`G)m;TcZ#0U1x z<_0Kdv1j>JZ9dgHfO0`N!EC0yNG;?D#gP=P87PDHB6p-ucGw!7^Y2lX9X7C|A1^xYa)Y}3yIz8 zG$S<=c(ru6#2dS1_pyl63(V1RTmEGAv=(y!f6M`5^)brs$r_T^ty*bUF(8O!H2AXC z%-XmwhNwEnZ3l1;4l`0T)IOuUiSqng$nJ+4j!6^@{g}Yj1Bh~Q+Vbdney#&_Z2++? zY90fmm6pnIyzWoJIjxq?Wx)U8wBcg8naE|hkQfpLJMxbZ{Y)x<8Kk~v8mb!x()lqW zqa(amryy?oqo8GP+J-FpI{3mGEV}Y&>vs3!=PO=77$c2bcVUaEC$YlxxSO8Ehd`wK zm^yWN4K`mTI0?RYQY^6&$4g3KhSNBQi^5EcF(=?nVB?!cWah5mwH?+ONqfn`-Skiu z%c}_CG)Pk(U$4ebHQI(d7R^U9K0S4XwS-Hd0$0o<){!2BCJI9%Dw6l`-alKe_Ms)z z>aFV%A4VYXqbF(k608}!&YYZ+n*ylJkR<-}8?ZumtkGgo{9D{Nq(Z=FW9M5X%b zT&Qdy>eMC?*#iLeXC4}@Y-b(>Qw!n~-XZ40T|&uI`@!USZi>Na?}sVM|1c26jjk7EpW=1tS_A9C6H#PA36&zu)`LQM)E;`37`$3e* zF?MF))8FL5-$nmEk*4+|*M2DdJD&HypIq|;ByVKje7pBA#MyskLjL^(W`Kf9^KkVS zL;UYQ{7)bFe~gWGzg-BP+=04T>&4c*c9&l-)^#r}=-ZJ&0L(0FSO1KkEa=eOs zy&UFmU5`WXs_7p-_)YNoXL}l(QQXNVjfpImn*6O@{0DheTG~Gie)*5d*^l;Oq+-;^ zfxv&5rZnXx%HDTx25o-D1^!?OKE$aeT@!u5h9eq(j(h{sR-U35JNy5C@zcz@{Wo!0 zmRW8N_DgA6j_U7XBjm#<@orp{xqkhM)Q!uR-V)GWxbaN>F~xOuWLh>}ESuOv_(Mt2 zW{P?r=vdEYsi&v@n*h%?i0dm4;O9)Rw({r3x7!yfng2Md=_5BPUQf%{eWfzmTKdclN*F&F;RA+66YEyGiYa5ohBPep-eH5#x; z%WM}KuRMDA2Jb%}3->Nwb96X>@7Fx~FG_D;14_GI5!8^q#3ucp4svZm&6w~!*Nic> z|5Oxsmc2HCW=!q(}kW?EZ(+ZWjpk>z$7)@<1L-RS^tAx*U<2 zK2!CWg1H7NPV@HR>QaZ61lo<08BwJOXa-5#?*@?OVvk`?I1HcpZ3F*o!q5P`SL&&p zRPCw`2JYLi>QC}tvz`>nzJ3%ZT%I>Q)tyU0^|I5o<}7cUXd)F9BkAuNcng{`l{k1kIk}r)5|s2g>%{aQbZi5kdVdp$W+J0b|@i1D4<(= zsuewOR_A$`x^b+yF5)HomYR3drMGjoz01E^<&$#x+aD{Rk<*9V1~6G4tox^zJsDb( zC!fNnB>N+G;Pbj@nd$Ze4O&4pW>pULcRI>a&-LB$@!T@U>V0QrlEFT~^kQDBPd=&g zeD1!UcrBhW?%CI;lA&&HYQGycmTeB3D9=W>X!85kznqZYxMb0v5jS(bP32Z@(BLE2 zR$T9WRk`C3)!7x7(_#y z>MW;~@7WPFjbhC1NW9hdytU-%=U1QG?Ji~O1z95QXx_;@ys;2tVTA0}&&BMm^xx>S z@Gedf_vLwb)mP$~dOFB9u(YLsW5;S5Diz*(YvMUWY?y<0py}uSn73xafcesh*l#!uXa$x;% zdbA;Z_ZeG50+YChzRUJF_oFzD*ev#IDsOf}#ewa^!~Z|Vz5=SMt!o>QE)SrBAaD*T zB@NPzGzMLQ64Kq>DAL`6bax&aX*e{}-QCjv=6dhd`@Z-4{%;J%8T%Y|t-a@3YtH9+ z=3KO6BErHNQ^|D3x3lxPA+=lMX22mc!h6xY6;p_VhiaNG9g%gO@oll*5uc+ zC*R#)&UcN~uT33>p4W}#T-Qw+j~KrmN2Q|bU6P4#?<4GXI(Cx>bm^3@CtOwf<2l}P z;@ljJr+(Ev{ftByJTT@N>t_YckD}L{r@e>bP6awcc02IqQeLllb^ft-bS221TcT7~a>o$*tU1@r zJD$a=&Eed-i4&38G~pEYM~%%^1dv%z>sp>3g54>6L0NSJR&vL zI0B&088wmq`;q2zwbDI|n%GGhCg&RUgk*WWO#?82E2^T#t7MG^7qKnKIO3*5zm=VF zB2g8N2WkS3e7}Vlq}L2)h|DvTE@?Y;V3`3Yd@ukZPxpVkA8#UTk@VbcD8MY$(SrV- zkD^+oL&kDf!W*LmcqqAlNOy?0`@?0CgYu{zr5?ql#~#f!b*4BqYp-_jM5p1ek$cwa zQMwnA+;i8w3=%p*ZnLy791mC6()6B<(`9tQi;(|eSOI*9-O-FWNfVW}{_0u4a=B?- z!z_9Y>|96>Nth!FW_M0&$G(}P2H3D0E01QF!s#h#m+IM}-7glyh|cLy&BIR~U4c#n zPD8su$hhyGK!>zkYNEQ>m4uOU_K-6+ zA3Ak7?UvImLdtmZMsxG?j#laId}TCBEo!2H{7;dX0BksB>;=BL+kM%S?F;8o?(^I| z?Wta328fKEdsUT|^C)4|vacP(2;D)M*4wzb@BW|RVpLvY-z0mD5htxbQ}8oTx4OT; zs(PsFw0gAGKR0$yraw>h(o&g=3V-nybtCADzl#;yFAty?aUEhHX(9Q?06#CT-MQ}> z4)(lELAzSRO|`>CM*egyPow`483ko4|7EX@mEeLM{a^+$yVxDAl`wLx{DN7O(uI6|j&W>&Z*E^-2|$NEBFdF$hRKQ+6?=B1$F z#k)~Px32Q$3}@Oo78_1{WYX`=-zBW8f2gG_2)@+= zZto~>dGc!iMW?FRGs8Vb)sjotD-bG@CjX`y$^>5_F+%d$eV!OSF3<`Z)q zbaWl6AV5UZcayMc9pCA~`5a}=82TDDp8~DaokNHRTvH$Z4M5dIwXzSrl8P z-mij!iOG|zA@|ND1e>vcP*6XUCFGQiAw6|cmWrmyOxE7F^FCv5E5BalBs=^F@zMWA zyHZ}*ScTjE2K#&$gaxjM^Ffp^=ufKc3EsKA;88eR>x)r$M`=3RozrxtFdloMpwiag ztw{HQSjMb{tMswi{(kQ3f;6Y`)_cq>cG`206Xf0`YX9S_Wmi*#UVj%(ndo>);eCtp zl`z9wGZOE)7e8h;*e>g^F0UqhnBO2ZGTe-x>$xk|r2OT{gM)D_NDYHh0|gxDZIEA$ z^;RL9yjxc=iT+~;Fftk#U-WX!Ja)i7BV*4a*@>zb_hzxo9El5$38NZ#JlXi@J~TW5 z@H+EJg+<147*EP=2CW{VfVffpm>l_>>|g1?Ex2eKD;13w3om72b7~2)YNYyaf9V?e zShnmh)VTfyWs_WC5Jex1=#75a3&s*QPuupwF#^*!0oIhm>S0SBKMQVG$bYSrDIZ5p z{n;Rq^V0Q)O0zBG3=Nt5Lj&6J`8o#8tK4jyndESWgvy=(o%aehp8HWfnESkj$c+s* zRsrQ7W@NUK_-(}vGNzm{Q}gZW3GW>3B5F?%o9+P9!@!sD3jyHX44JqV#oPVVm*%DO z#fgj`@D>uln5pvBDGvSEu@kdn@%&RSm5Maz2!42_0&^-q9ha4_-?Nt9{S^Nzvo@&q zOiB~xW(CJ(eP5@5+qS^t@3+o|-_r{pv(XiuSi0^XI+a=yvuZ0fvzL*wPViyf6Yq3H zNbQa%7Ck(+@jas!JwX=_E%SRB(-WN^X{>YZc(~e=0V{a8aLY3O5tcp^BE;)*!@SO6 zI%IOhrA3$XI!i$aD{TRg6O9~|B%ysoQ)QeP`l}W|tKg$u))t)#_lkdU6@kZac?EG+Ta4zB;V|COtQe6l;l|=_BXdc;{n#8S9 z{F_DcoZI8C@1n1Ryib`_Y0{MDd{(cTuU1ucco%Ry;gARWZdN@#Y7Cb+j_j)&$JYsP z_(BS*tY2iS1U$uIh_4)8lx~i({V2(H7O}tyNA-XUs}V>0lrzdY#oBf7O7B7)8Rq}+ zW71`f+~3}J6G?YdCY0M)A^!I+mO9%bR!|GQ$G`u*wf< zWgePQY1Igf4J*>OLrP3X6A2J|6fMAkIjT(1b3;W;;H7m?D%ExUju~7Hlk#J_XI>J+ z(*XV`%O5i6F7u#Q>Y1)pLK9EVaHd>h&bud8=?d8@;$sCf*|4kv|7q$ucUA>HL3P5x z`v@fZE$?6U#@TO|qP?9w=(D5!5lq4?1Vh_{Vi#2@niqsM2E|Z{JNOkN!YZ5RZqjOA z&wC;jK?U51=^C$o_@f)gp6{~xjJIe{Og3^>Bi5W`rWC){Uv(AjUFG$x_7`I6?d^cW zJ?!K%JuhvvL3Slvazve~b8gfIqai%98r}Y0DV-@7PdQ8l8-pzsCOIo$ww7u1xKuKs ze$?Q$Heh&NGEfx4~2KeMCnw)KUDypYPWj zj*Of>`8Lb0ecZfoeVc|VO|@j_t66HJrs-wufY&-|=VN4e?1p8}<GVCUQhJ-)yL`Jn`R9*w<3c61T--A_8eRVHEZ_sN{4QREvPa1}H2zsZ_ z3LP@h67ZpKbA4N-|J6@N^oOv@qb1D>%hpFzi6%2p5tRDLSOdIdo#JaM#Noq?Jc{)z zB;|U27{`#%^9N0JKq$z5Xmiy5EabU^LmI>PRoN+pWykISOH^Vs=gm0k6@PR8oNA3R zhodC+mcj}XeZhQdJ%{@W|2o^sPjL@h57& z)d4S)R9c3yQ{U=z`XiK}3qR*wWI7^Bp}`as@+do%%Bm~`MOE{oTB%oUzf8S9?!|#F zpw^_+XnJnd!9j@pz}q`;m2q?rr98w=dtXAd;nDXu@|-V=#PUU&j&wILqds!aVtP@u zDLW{UkJb5U*gB=5BDeu;l2;Q{Cz78;xxe~ zkUnlBO@R&>!41hfmhPUS3TF3|vV_L-DZQRmQ$n%D@Hh#dkNWFtS#OVv{cBd7eLo<< zIAzp?zT3*oS`r?1G&6A_rIFtB&*m&tu?cG}+GF!}4U+nJR8^Xpen^O=_h~HQNGzY( ztLfFgK;to@5^iq2Ev=Eetb<>P^||M*{Df*{;p{QAo5>KnF-m)^2eWbbUD?7JjN_tBUP%zQSXkSOTk!q*<-S%FvUUiTtrK_xa@Q&e#lLen zWXBJ1G(Ewt(}_#+OW(L$51*Vs%e>z`r@qi`-!V24W=H4-*exk2rKUM_{!O zn4~=Z++yo`aR4{dPK3O?T`?(~g_=2gZ}k$6ECU;}aE|_)^drlj5QC61X@gmAmtae* z;y2Xt8!T05_J&yF+I>XQm`)e0KKQn3g}f7rOq#}s8t7JXEE?2+v+G$Wt=-ikz`n)= ztQxP5)kxDwJR~n7_o9MY?9(IMW$DY@JuseIX>h{&qj%7ZEScDxEI|ofhYj`CIbl?* zvd`2HggMAB5O~*Xp`&k((pnOpAk4gx7s&AkFLRhoE~{D%Um5El7Uz!Ab#>w_u^Y90 z6~ow^b18Ums&syE$TGN`dD-r>(Y^=g@S|1E*}>FT@vKI$+K%TSJG09jsPm*Yd8IEL z`RP?AY3E01i$kCK(F4?1!IE^qioy~Ibutd!6*>{R+syNZag~O%Lb>C)X{l*AT6F+2 zYBNM^wWSk2vj57v^_p0F3aTtE0}@Scn)3^ecA^?WKEx$666Eq1MCi6 z_OvHI?H_E1)70CKh&FvIvpxCPQmB7?RJ)RXd$Pi~Ay>3|nkHTFfrrkQRXGMB?Jp!= zp|<_1Lu143Wqpg3q*0#U0(d^0E|ss;AHv_x)TpD#>>f|&ld=NFXXr}&$Dp$Byi^NGIQJ}=Fny4yY}{{#(R~4&`7-T1?AN)5HG2y{BCfPWRBM)E1ELv!kj8B+oMpc^+}58qajh zp*yp~c*iBMNqN&8J#f^fH;}5#WY_23E|c4RvV8uCWGnWwuiL^Ng;m`<7A&g~IW)X} zLe2ulqMJQRzK2w)5Cx8#hzm$(rdD<%UERmj2-TVA&q@xM}xImJqL2VpG)aUND#avQcg?Git? zSg*8xz^P?c$o!qo@@$-ww5HPX+EfJ5&5~_y8yVGH&{F$ zZX(Qfw&Ser*oCEu`d3p<)JO8LP#~n!$?j*;A;3x+lc;Dfid)?S7AR5RGi$p-;C1fJ zau#199o0?-tfRLKH8FkhNVMm8Fe}A+`8b+RUQW}g0V2A1WSY^y5Y9 zxjl`zde2)uL}bp1$)@Vfxt!z|Sl;o@0s}^;uhc4niL8SjO>WPDRb(jkoB`9hRpuCt z%7n}+7OX6%-STqbyK2W2H&oEs?s9rLiRKx>z#EL=p6w=LL{8KJ0#PeML_vofep69l zhe~#r+(MUu6+G2S-3)ZAANftJUvzWcQTa<*@jW#QnEh zaV&ZRnzw{|_qOx`kJMXF9N!zBIjei&^U@oSxlKQ$!cRsh=0O|YDlm=yX~LqxaBU#y z)kLrFl3CtRp@?yF51}yTGcEQRv)X9IT*Z+&?0w{;-nCC3$!CL0Q0`y5hH8%Lea+f5 zG<)MS)GkV}HkT&IdXRV9q(vCb@=B#zM5cD*yz3J8_y#FyJuTTdW!i`(;sDxm6FB2l z!Jr{h6~bLd3x43_a9#HBKz&r1@A$yoyfLk1AwxQ5Byg6*Ag2HQAj-%Kl}%#^M9UpN zLoP|G^;_fZLIUCD)M~bvb9MV*gx}^K2_5g`QCquS*U-1#wv2a#l`L@w$M$jxZF@od zu60{`Qpfdv2gq%Ke=`<>2sfyr31I(33(&+BnlVR5B9_j_rTw&@&!No?0rfHQ8Lz0b7F zbfhO6KN^G6DxL=pP#r>zTQ@Argv9_XkWr`TTgL<5kcCQjg#8iW=ba4t`ds?zS?sCg zoTjK)NkMUav-!}j6Mq*A0oU7v$RY*tRt6*@A@P>Y7|xohP*(k`bsDQ~hZjPET1KM$ ziCHgF)V)foDqW+R%S;Llt+zWRsg@TFPh5xu^gm%Yafu1QmYDH~y`De?_S(K;dW&!L zod_1or7+j)vg=faE@8K@i&7J`yw=$tWc9&C?hIXGXu~X5WIw)QZuX1p?f!MUeYp}B7+U$K|1emVzXzBQG)_QwO%pr z3#0b=iZh8icSTS~aIuP5Yeut>`22(=JyBxd-0pFUea+UOxv89JjG3L3jHCT#`VG|E z!g>|-$x!6@Q&ZCvkPHOUyBTqlq|31iWWj9(!&co>O;$JGT0MMM}~>f(V1e z8MW*lGj^&Q|Hf@ud;n>UFw10k+!Y* zjadMW#X{FJEG=zR8k=4RVaQwpyF;_BQ&|!PGpyN!JYDz! zzix|ag=hFMSv$kr&FH~;~{&e_YxBx z@j@9$oW;GF_i61+i#8X`Uy9M*MaWk`<6eK&MoIxK8@at^=O~E$gSk?3Uzw0kv8YOZ zL4y5?x=U#l0TD6VR<3e|9RCqSKr*{Xdbn2-1!g<54SwD5{2GYtFJXYkTZ98IFE6u;f|V?}H_!UQ0%3!4Zcw}>v z#(Qj$`Lwd7^==-Tp9MX4Jk3m(C-x0nsT(o%vG<7uMTN5N3yTntqTWxo>vBMXz!Dqd zTGvq!qt+8MoKCm7`gR^pfP9n3y-WkFs+H7wK|t}FT=Zm|BuTfG?uSP%nlbyEOOM$k z*A*lJ3$@;|oX>-4ot#&{ZawL@Yoh$INQsrA_l+gMdY?&TozB7MywqCUrq!T_WKES3 z2qNo)1N?@LdHGAHP#KUJ@43hg3Ivp(R(#w#;2^q{>%Ye+?84|v9XTuMoG<+sO!QBg zL9}4GZ@#EEXh|D12QGy?p!DG|f0wAlW}f}xCmm9Ki~8Ng>eYs$!#g7AC$e{ggZE)a z;9D^pqHopfRGYIThv6S?9-h5RQQx1$w4OdzrA~u=fgwY;J=SJs*vvITo%>ebViYmn zh|(m8JcDwiIg^Su&GKDH$5~NkID8~4H9C(U7)Yd|$TYY~=}aWT!@Sb8*PUEp{=0+z zV*|wCm53!^i1b|B_c=<5>GpO3ZD(rlFh>7sf(YLf^5$r zo=X#LvYamHg#6mq1MwZXXSzMtm4S(F6aUX?=$GC8bT9=F(_}-ve;}K461febTPQ^y ziM~7PO-(N1PVV@hTJ`%CfbCsQ1o*)TovjT&sO7#}nV&7pbi{Mwy@}?tzdf`6IN~2) z=Y_~Xj8C7g>)U9Bp18E)&YE*XQu=T@3^Z@^N2icBF{5Uh*D>35vo#zPL z{DbA)DxUV6@V}KN|Ggpj12K)kKXJ`4(JSHM%DWb#{v+u0|3yj!_WUjiZb#%=6MG$C zJv7h#zuxN4>k4X+StujcR3k=?OZ{6v134megd_!AHS_)kg#k}AO#=L8_46x}=F5c# z%TQ|_nt!`i6CW7#<{tQL{JXn-6(#=9*95PkGU@8|mgx2Tqo4nCl=}5A@Pp+apnox$e|i6yxL?5d z-WC&V|MpP-YyX#_ORWFXMgRFbvjiZO!feqS{&iRX+~J?U3kAde>tg@ffHp9o1!G2y zk^QBg`d51aov?3itX&K&T7Gr(fX?YhYAR<$RQuPs5?*s-CDHmx) zIhNr|A1-$-A8bBcB16z({q=~ZZSOi4w^j1L+DQ|>XWZ~=7eJ+H)LF0fQBq1hjIw$t zB^_U$qq#DxBpwtC7HJ@&s^{OMQp{Bvpk>YM54%Zm%iFpy5Uz?;vo&6{Wq0-Cpq`!l z)rNOjGZCj*cM#DyB^cb`_|1Cv)3K9H`Qs-pT}ZLpCmpGiDd|Vozyxj`TMrPyExT=+-mxL6Wzb| z`T$DA{$xJty4xA*eWN?ikB6m!rSCLOG67I% zi`0Em(d~zxN8xz(cZvN&y}MTQeSDuK)h87+gDAds)0y8Pnh>_p;qBCWZY{*}V=L8a zbHp&`b%9s9RFrV^Gc|~{!LwthE&$64Ls#sstv|%?rUR5nlAj)Q)swUe|JA`gt0e&| zn}+T|jsE)Sx3?WIiWpyIMBDlTD;lXMOX5;_sxz#$W4U0wR!X9ieTUNr57l7bZmP^V z1rbRzv?IvlaH;5C#>=oPHMi|`%ZZxzRTQ%EqMxFl(!;lt@Ak%(6>haVNk{}3Pg#F^ zOKRfk<1t-tT%>_o`ytTgdhwg~jNc1FM8Ygbp7xqsU_r%F<2AFYr2#kHDIbHG1(6pg zw!`_uP&}2IEAyFy{>!8hx?-i<$nX zrsQsvNI)bgWigwz*J8i3!fAU%=n6v3-ZL-aQ)z90XQ8lO{T$9?F4t(Q>iO3&0yYB) zs&qx0CO<-@OyD$AT*&h{5Hy-y7V{j4VstlqI8%GNPLFa*8VOK4)HBWv4EN*qBat<2b;QaMOo>N$_sz`N?5t3DoUHY<)+>!Ng-cj%v{sF zqwMovEu3!#B&xg3^;J-oKZeo=o-t~+hfW&d>5_T_A=l8|!2Xs5PV-HZRfmJck9`{p z4EPc@NqA+!0(%b$Yde(_ifoMNLZawaj5&M0hBX0XGusOpQKTB6$Dt-agr@bpOY2+T zB%1oE1OR(h%X~3852x)kovIZ9hpO=C9@;DBD7|9Q>;K}F$ly-VBXVd~HO}r1pfs~i zd+d@ikfg|`=h-epN74nz3x(PXSEryxX&C<%w@vydOAr!ik(T6YpBBZ^3;8f!eXGGc zj3s~LNOu96@wz}p+jPZ`2D{mZVl z?(C{i03Pag_XPLB9xlWVj%qfPsb?nR*3L0#G(NmUq;}_R6+P0Lc^OZX zxGJYlihh7SbT1s^@W&+0Ha*=qOEo*%u|weIqHA(3#cz>sDQ4uws39bi#xHsq#%a5$ zu%NiY|M#6gm+&C5vfR%+3cqvrz+{N~DJOqGxnKbl;dKg3%m+Nu4|LSMyuB$U!zsd_ zTaxt|db$w&F726JZT6{TO5kd8;NUaSXKW?fJzSx)>c=E#DvZDFCChq|tB}Kn-}zo& z+=eA2E*JI8yIAz8ytzJ)IwOs;03H?FwG_R7pX2K+*&X&AZr&2V8~#q;bfX&N+M8C! zw}#n-~?C=46Vhu<^}eFk$P-%gE-Zk zH)O5S9-%s=BRDy9BesK#{VNp{!#iO4_Fkp_iBsPZv)QIa9IuWa z9X_f)b1P_aACRV0PYOz;+;^&{130TNSN$=zJ84RbZy+snp1=pr>9&=##Oa0KmF>O8 zA42-D*gfvuKV^>D1OcA-KJf84(IJSCFc$r8?F&mm@z3W%BII}4#E_(V2QT!PLceO< zXYk9B34nGw4?8&uf3hNUspBRhRYd#)mwx!!?%>@?QE2W??}~CdVijOj zBOCmAwIJTEK-GmI$Ky`O@(M zu+n^n(i{0EeS!8Mp+ik{3Tn3#PvSMO9kA}P9|H*B%K9uegusCKuhg^$8?R;wDqh#L zTknjWZU2PK>qjZp>a9R5Xl#4JuOYYi@0bM~j0Xe~9i(tt{C5t@53+$uy%$xQ`wxVl zRRySxhs=g|bVuPj#Ehy=%RFLGq54Cd(5v!Awy6h=@jIZa(9Odc4JTO0b@|4!N$LMm zbc{%jj%yZ3eaKVSbhc4iKqMm8a*;-%177Zj7G=Lr{z&L6QTO?=u}E!Nmwmhks!sQ? z3Y&3XL@X3WGo4HD-<0P)V9-)|O>{t=gvK*b^Ts}+W9coQ+3NAY&G7{^K_Q2bc5!>E z>@8C$LMvDvCOKadk| zX-hXqHcr=U+k<>hD*q^f6L30EYUnH^5JRs~Xu1zffj@3he_V0cl#j6DUEbUnC&?|+ z8gv&d-01cu3ej@&lCSk^fF=9PJITZ_CXFZiNfxAQ#Uuq$TSoYmAc9zCYmQKMGJzH7#!!UoJCy6Ek_; z&$kFesBI%EOnAhOKqD*h^?xtjbl1FN^8G7@!NBx;HCwI|CW3Mr^NE8Qv78UjeS_ z=R5wfb&ffJ#q)_i>J)#!!$s@Yf+J_*pfk5`5>*p`?8*wjJyk2N%y+2MBzjZIRHl{^ zc;ICkoOjPNSyn?NsYGv>X5wHDdK`dYbQ~w%ssd+YKVb;n%DLD2Q{rI2;=B{`Ov1{e zUoj~v5*I(>&!}1p4V*|JOwVmP-XDNr)xVUu<>{GfIyu1A?`mJWdj)&Od!P-vO`8I@ z2N7_nfl6^9v_%>0VzODfTM)V{pieVLr$g8<%qw(qsO8GP`P2q963} zp4nt#C!ZU^pI>Q5kuoxRqRphd#nh){f|2Uyk=txuuoI2G$GxX3o8tI7u`JoO0%rBM zOBG8E$BfA6&SAY!J(XL&HK4X%>Id=;RFPSF3eJo95Bh2C%tD8DuP`nG#_d z{I`eXLeJP_YDIM6h$LH%CTkd8m)bD9LQE*hsGr}y^||#v-k8$#Rp~+33cPOI)%z=7 z;tzMC2r+hWobJ)1pH}Zc4kvWaZodgKzRS}m5XRvAi0>32RW(e(q#9WXT)EO67%Yh=3({UbTX4Fxs z8BVUu5#e22j`pHPLWDVq+g2P5cjx?_?#?puT|l!s1fPH7%hA>UMo!wpzNBb?QWq+Z zz4OPsnh@7N+w#u$GVLru~y4t0$?m+1amH{D7DSo_t%iO8WTv5-~>? zk(Mdd}uY>6jcp8Mli0X}_m{+=Aw3v{1~iQ%=pZ0Al?J5yPb7#|=T zVL(J|1@LZz<|{?4y<`XlL;xy{QJ&L0e@EzZ9)Ru~An?(9H$EoR;;aezD~A6sP?byu zto8Ecz&%bD0)SiUV#OmYrEQ^53A;Ps9hzr^fw|Ph$O3h02$m)vkk1D8kybd&L_*R8 zEjI}!W6Ia*TD3r6PYyMG4Ax(CNAA{ex8#vVx#BY-=h-Jz-L!~cxAY@f%NA3esBKua zdzHl7BeW2=HD&J)Aur7nK^LCIx=>fcW-(0|wNS@GBm1u%{6NmQi;0{fa-i}J+4TAc zE|T{TZ1by5f2H*O7qZy^z_l5SEPqBQs4*UW)c9tU0z9pFQ=uRD$sQ432Xs2VL_wRW zpJ+MnVRO+J!$V-MLkp{RSV&RB(y~1zczZ4it>|E}M*uOqezeR+tL#i&G`vZMpmsNW z7Eb*8)IZ~&-#-H|J%7y9+HO6LyZ8QxS-HsP$4PcT-?hP=ufNFQsl7B3!DhLA78*EZ z-*vrNrt7Lwe-84pPU@0JhBZC#3d>c6TIVOnOJ+t?5JxPPj0r~{gGQn?CE)?7_&ixC{ z0&W$QGqVZ1J|n-`)L zFb{Rb(bZ8fZ87-}y!avELn0r$e+pW9flV5*+!f*2_a<hqP zUs2G!&I}Eybz9{|MzPx&R5cpUD>S8#trGxHY3p8He+O7pq0HQNAJU-1^a0 zA%H;le8a%k939xx$5Pum-TsD0VYw(;==x_G&(Eh{-H6IG+j?!=_+9aIE3gl_v0D7$KD6_-zt$bl`wyM$gdmaFk5!t@J_AzPp27I$6#5gn^3vvmh&XM} z?tA;?5THOn!X(k4sRrWx2)=1kF55-=nGm-xeF(mXxeg!bD1pFpKsmsvm)aob&lIyi zMw>Hgc&E;pl(V4vf5GiNpUZzyhgD_BOyJ%n^3tnAUn|jH_5>KDE)vWOf-YA=_D0G~ z=92H-&8J;|rUvPugqZ<}7U=dy3IXdkkSQ1Wt@A@G zoE9ECb~$`&=D04CnH=MMq%n@>i!LJ3KV$&Y(8>pt@D zX6WfG2bLUaoAlT13eT+~66Pf{NoPkj;^vLB z0c2PX9gcUi4~_VIq*sF$kGQ1ej+nme8AYb0W?w5k1*lks&^iZ*igkPHlT=nndvkb} zL?(m{GPyHlFW8f4zK)R(fVj1osHh-~9{1XBf5min*5!%vv560F8Str->J_{3VPp?` zf0t1GJ~l9mH96HZ8y5<%l$vFAy#(!|UzYmkYur>hO*Q9Xk=R-rFop{WoLc9( zQxnB_c~>}vQ%F*LcR1w2N~ML6wX-H${FZ_UTs%Rr98kHBYujyn#MH7#PPV8tS@9E5 zOG->*PB|i zIjiOFXPuH&me-GRjvctr(9puH4MaLrOo)$hkefq)8%-%JZgB%131KIjEvSxh``T1x zX`}2_d#|c|vRW8i`Wue@eFXB%#(ny_Je1BwH(`}H-Kl`K^!;Vy;aRTj(AJUJVJG?h zEfO(Q0*k7~bTP@VJa<7nxSmTK`TagO%_CO2GzV z90A73S{A3>uY#Qx(Ck4g`QwRv!GGqY|L0J{w5y{9L44kgM=AAy@MciqM2V-#$9H?U zs1B2GKjJPJbR*?gvaYW%of~ra7KOikyB$b!xZ^>(vPSAQe56>YrUm4}f25F#99T(x z1RJb9Q7h{KfI&7bdopr&BI*e0-hophPGc~TIj=Sqh|dT%c|6$)Ane_4ORqSAgz7kc zz2IfQgST4iS~$%0w$}joFxQ!C%VAV2E#~6C$-z6Snjd_pVR*`RDn!9!>{}VI?4^$L zD0#?{c8eH=c{h$pp8Jl@LrH3UDv>X9H_>@shW*qq7@3QpGP|?mpnbzTs-J)SH`I9N z#<2qN9BcdPu2{DDz8eO&yU`sLn0F`-msB3F~8Na?kH=nMC0BBT1`Nq(XyxhwL zG<=tX@N}u9!^(^CqPNP-Okl7=Z(RWb#_wm|#Xxv<8hT_KHS5C-{ zKF;Vr(EjZ$dO8b(#WR&vnX#1-OZpO0gR$N`)=ajX?Qi{qhx*4%|GVA(wOt*9m^LHQ zp8%+4jFrBWX<6D$?XLY(^zeJ5e%~gcKp1@1Igx1Lfe6{1-!7;+aJuxhQWIz1q&EnqY}RVF-`n-QtVGX4ap>2-X@vh6q5ko;$y)+E z$+t{T5ny%fQpWZxLj84@|8?I$d7L!*PSrsy2<`pHA)MxwL+X$}@@@Vr>iGQ`-))1j zK|sGo5l5BZplLrxS2RT8?RhlCv6slO^nZL&4-f{)IvvrCKug&1(-yPz7)Z~}RsNTl z;nxJQ?{POE_D5hrXXn}1`gp(bnA`rJ4irSVkM2o;vkd3oGUa_3u+ROUt@^iSrwIc( zo|0W;2tQ^ykrV_@HBGIts(OH)EQ8pL`qv5!LeK=^?8p6z8u1R zTF>FE{?5!G^$gvOcj07WhbSj3ds|!FZrg3HGwycDzjd_v8>&zm>vn<~I6%VT6%}-M zy{2pF`3$8bs2#uLP&&(chnv>fjN}5~9<8(oZQ+7IbI`?^uGF*g-RsFcXLJT+Ik%;D ztdIDN$qnzmI5x^A^E#?E+&X1LM;09Kd`~s^me?Gt^TUUR6RUhs%1{z6hJ8& zV1#+d;5A*}VAiM&Rj3AvvdljOf#&s!ZpnHo06b!7tJUpwxdV>jP=3Ssy$}+vXLBy> zqRGaiBO`R%75zz%?y7ja&Xlp7L$iwUV(j{}f`@cMOmf8MIF{;X*FaVWi}fV$`G3~& zDCD+7Ix`b&Y`)7T&H*w485;7W&Cl4m`7x?F++ib{^#dQCPvow+_e4sTrT3tiOKgth zq@{}O$!avZX;zv{7Q&Q2z7{(<`Tp zN!9|@Z{9zkE>J`WBZAcAX)bsDG@xB7&%1g%P+o@Gwz5Bk9C|IWG|5>3qf=+bjqlk< zBy4tvLmI>P8m}pFUx>pXbTI|dc@W5*x~*IY)iO^hKWy63YMrAE!9nREed1du_gPtz zA|>W->IMg9mA{zdmC9ZHd$;*>`Pck5^ZUdpV_gpsOm=y>G!e3^+JQWSbeJ7qzn~Xgj;mE4C(vAzjNvZ;Dv5{yNazDi+1auF z&pFmB-p^7xfs}FGv%-3-^P!cg%_mGLXY8>3WOl&YV2bc+p(w(V91fSogBmmURP=wrwk*iGKMXQaF$-BJ>va zG@j6a_2+!O%KCU(CP;|>5x2X~a&Uag}^0s{NfVE`ijc8Zj255m+|F4P#cD|Xvhq`aPn zKnZynEp{Y7gY*~x@vmg;)0ZgA0J@zvj$w{xAtYd;!coj^0u^~3TdI)rda~8^S&^?s z#8SuY8=>CQ9@U8{TciXS*9B^;ZL@LDgz|RXheB$A3+vC+*pe%JYI3Z=R}WB72JmxO zS#rr?4(-DnrjsK>(+(Y!vUY(en-^O=o6oUGLY1+OdhefO%X;qddB0@Qm07Em-{-*K z829F1P#eT<2-&tLZ`gXd+p}~JDi`;jMIcfV0S}qGZdt8rt%=W;KgPigk5cYKa-*?T zFYjC1&GDD8HlUbJZSXhrPg!Y03B+Wqr<) zOXk%{D~BKd{2o?t$I<6KG}xui)?`Mb!HnB3U!yl~V--s*481?3(R*y$0N%A}ZW3Tr z2lr&RJLV?gj`$GdXYf{!0&loODop3E41)aCGK&pJ-c?sQW#7lyEh(i`Oea1aCapxP zS^?~nqVD|i)8(UA_h3%)V^)f98*HLJbsK-CHmHb?(p-s-I^x0QA$KY`y#aDu#zr&O zJ{7^qGlZpeB#+FWT?jgAC@| z{)s{)5M1red~F00k2R}+Bsqp4*;p=2ID6-+n6A7Eu^#4FPJ>m1$6N~rh6-7}(F|Jb znlUJSn3o)VZ=?Q9&KoW^Jn9p)g2!7lyd2Zi*Urnl3zasTJ*`vu>Ai8dFOGM1N59@< zJ#YNs68eRh;6kxXPSG)A34^u`OtDgR&+q>+_ttSuuK)kIASFnMA_xjtgrtBV4N@v4 zARrAQ-7$I~t+YW&cXw`#mhResF*<|+qie+X=Dg4GoOk>_fB*j3V{GHT@2g(%d_Aw{ z)tTv#29`)N?W5}|C~5pvF@tg4%tQFUNe0LfE%z%~C_ieBRd!W??CBLHo6O;`wC$g@ z>C@sYt&Gn%vA~YfY(JbAyK^+xJ6ngdUj-;j7J=@IUGOXd67v^xXH` zipiz^(vYENMKjp3`w_!MBYmC7Ny!LzfEd=rZBHgO)68&)bN}=*`zpWVaNcw-1@w_( ziVaIaAGmZz#T;{7J@7h0ccPL^8k~rj_Pix#^kRQ3Urq5)cqW1Yt4wfkp?^sT0047@ z-g7de$0^pj0L7<(5n$I9E&0%qKU8kzJqNgdQvoX9n?*!Zhe~X8T)Ib$?(e@}JK0Kn z;Kk&&er>n6-E6-tRwwVJ_fPJcL#pfb;Me!)*(bj~|{ZtxW2soyQ?B1*~G&aiWLd06=1B%pfhy2p=3Gf^0Z(QZbE!kNN$4*=JOOPjc9bIaF&87@=Wp9LL)Fb3LdK$ATG^#r=`n{ zW(j8Dzm|LY6F%cmEHWC-39UX9&JY>N|K5IFsPCTlg=hR~B~#_LJ;~uL;&K zHBfT%-!M17sP`%RC$G_Sjwe1R;UNx+y{Tt)D18K5z)_H7;|h{rQ{_tFnC07;2)oLW zvm0Q4M%8LBi<@NJDT#c^7JAcbc~^mh9omm)A%6DS_l;V@Do{6CJ923>0cde`2rK$< zFf+!%)ZTV^)LnZ6k@iqMtjEle6_7{?U*w*{a32ZT9Xv=m0~9!hDD*ihb8mjEA(Rf6 zSctz?M*8B$=ISSd_Ls?Cf>}tpq;SHws_pJlc}lyE=S@A#y;p|B}14nUupC0vVJ%~wFsJ&V053Z2=sfR(y*xoLJHTbcl6V_2CX zg&iZ&ga<_dqfm1)_uQ4L#^v3op6$3)_ZUm-jR-13Z*j25-{#bmyaU=!lze9~vMIP5 zf1DCz7VwVr3sKAU2w%y7)srC|DE*KCkpufh_Ps?o> z*c7!ehi2&P1b`Dot~0CVMJ4QdEar0R52uku_+^KuZ{_lX@5%fO^7g@H&E& z$J8AxLvAD|-Jag{^O)PeD%iiZ{vsgo7fIUr_hY4N1E-?sIWXabOVW%Ekt>nZOI@ot zns1W<7FGA1Z6JE{NAH;q=Myju`7>%qu+|<@0-T!zl+uf2Nt5|U#7{Y&5PTtTq#ASq zsIad}$WOZDP-3xwV4`%bR;BM?Q>hpJ&MzJq=4FxLbW5Gal=vryqti;xCF4NnwhHLJFh+;j_h+`4vNL7 zShD#JfX4X;JvH~GchQ0ir8pf0+oepXm0}W~JjLysHVwC#*6;Bc9dVO*cT^|UxEE@b zM7KoVrf_4vFMtX%t;9C4V^GW9Vm_gV zyJ!CtNr0cf`*I#h35F;*WH1X}ZaR?vfbhke`ROJK_^8M3dr$Q8y|sH2?FuhSAt?tK z;YUP8!Ee&wKSYr)+q|r$@MOH3a}oqx+e|8peAr2tRLO_ zfxwE()jDHNbWW?v{9JXy5;nON@BD-JZqaTW7fuN7&Fiwi28#j`Vf~>z72=tasyX;09mtsoGA#(vrV!Nz3}| zP+zB7JO#_mpU&yAiMeun1#49+UXKM>@i|N9Vxj$J`$}S0Cu`kxV6)2L2^;pgKsV-< z@L#Twm6djPrQ-kChypNSfvq8DqK4#uz(^XDO<~7ZtY4?&ngK(uj?jp)9IkU>4*ey+Yk_JMSF2RfVic z;wg7{;X7J=l3ujlHSz4bQr@@n0e$OfIvXCT2%*NJpJr4Q*i#C_nw&6Zly=5_g;Yw3 z$1cc&q9P=kCB1HLyR`j!m~rZJk=9f}5!qRMeB~QC6HX@lkah|er|zFxYugrBKPTz2 zLYiQg*(|?m@iVuN1zPCikY?9_{R)D)y016J=JKA6y9M3fGuft$q`gsJNF06x!ikJ= z(wg}s-gqwZTGT9xkXT<4*vabhIq1ex2>%U@;n+Zg>nF@X!@|*lkFXQr|ZpOGk3-M0s(x?eJ4x-dlbZ&ncx{6fEEd8OK`H2N4H-4jGRuO{ExT zr17j{Hc>d|bQ?S~X1XcyR&x5;Ctg>}OM#;xrc7eos}v#)G8U@L9#?XgHTW+`M>l5lWQ{tTgu z@wT6d?Z7P&uU1cZQEXl#hH>Tac7Gt|MZk)*(sMUh%WU6j4<&QM{+(u5oQ76T1L89s zLKTW>5XC~_#8iN0*da|zg?0r{1FkN4d<5E<+iW8#qnYW5V=>0(zKfYyHd%N@+Got;1K`2a{=pR8Sg?2@$X zA`fAl12`%=kv(zuuh=E+M0e+|kUR2AUI75a`;y~Q2DiBwlaa+erV`Wy4y_4#y!v-b z0R>!|=FJ;5Pat6{nF9W1*IKSk4&cEM6yAl__=j(Nk~{S4=$}ZpTIFQQy^~yg8W4VF z{MCB8Qc+Ydih1qFwHX2DPNISAbQrjg$ctvQNWY+5M+YGI@tmI)MCvZl6b3Qwr`@m} ztnq={z)wM2i7)8$K6HJvL~RxKjKvw_UmXz@Urm3(AkrL#C7%Mkn8;;qQt@nyhR66qWXucEagdtE8|)hpfZ?=S0{0pzEs)<;xR1)Vev z0!iuLD@fhwgi}Owo^KRVe*rCe2QmiRH#JQ5T00`NaL9ZT=2Qr{j1;zfW%h-K5g?C) zncaiQfpoKaVO0NzhKso*OwZ;XYz`cY6(R)ATl@!h3lwb#aSm?;jt?;(`2F!YV+^H4 zEjtZCsR1C3CpBLJXWJ1%s~ieiEBef|EXl~C#hrg%{6G&vS;sF{|4b1sX(E@+SG zZ5nUN_o4+zukDMKW^S74)xHC$cbD6P`-?Fn3wXkVaslR-8_k>ngtL-+ZZAsNbPFM# z;M-{O!$6LX+ipL_tA)zXzMv~5TDYWcd}UoA0^+cceP?u!Mx`d)!Cek7s>J?tmrl^u z=g$3LY8W5mZ~zU?Z3xP+HU-W|wDFWap|d84+ojY-=Y=b0fyswuq3y$l91B z*(;?%X1yWjEK^3j1J2ZX*Q#u4R|j7;kE-BO{TBYo7b-N zgv)WjbP|LDOzies*?Quh)`xgdxNQ8qS!vO?i*1Stu9f5>GwqpLE$H?u)&n)?99#E5 zf-$-)l3-{7kV;_+*0BMLamSG<3$O`g3M4Cc%Hv_K1soI@@08!qoEI=48yW;}ebBaT z*=lye^Pi4Y{Pj#(ibnoZQ>RUUB8CmCqb7z%+0rtq+ZG_@=2Ks zEK>PPCi=fT^g^16CN$X44d??zmobv{9E{J%0MSmYB0$=kB@G_T(^9OX`DI|SGiS8r z4&OVTjObG&u%r4!@`PfgE6vYgXg<&c5dSVW+@$n9Y)g0Vu@$Z{ljL}4pH5DZZUxWF zr?=>ABN}S)$ax+Kv7hq`;oS~riR~a7p_n$J$a&zXN5dD?oIv7!5tU<&**s$k9&>31 zzqWcIWII`^j!q?aZBZvDUg3Z>cnV)nyE1o&Kx}JJnvBPb@Y5 z8isEbZz(9){fsMI7L(gOpKm)U{Kg8Zp% z?Q)>>%O!yBy9O2I%$EC=eKrE_exvoM3wZ^22VMUmUl-s;u|M@5oQ1kfi!DNOy-j(#QYid2; zI7IO849|Pso6FsvU;*grGnkh?He#lZf%90$@5f2o0T7LV)x_uCP;_aWn}%L^a0TVW z?g^GtPDydG(dN_{{LS^5uLAcxljF*ZxSt;$DR6Fwu~R4sKtlh`p1)+Iy(Fl~###Rn z0sw_&8v8>&H^2Q=gx+OQiEx5KiGtqJ23f;*oZXFP6m?uNeB*f#_C_U5JG}eJkNXJf z>(lrqy|XmZFo%cC1&o@FDmH?Si*dvcDDLIMb2HHt_YJ2{zP4Xq_7ToZ*}GV$Yi8up zr z-&Nh>{J3(DV+~I=`}O_H^8jSyn|0l^nr5Rvdzr!kfrwPG;c#lkTvup?^F7xN2-;Wgxebl2Z1jVZu@Y&)>8fST{K!chQ+hG43J>&cbZ;FypuF@CK_2sM&+zlJ;9EU$>7Nxy_cH3Z!gFKoNhi=V$g~odB}AQ zdkv##?8RE4ia?1_H)+s;82BdWoB^yxgl-L2l%b9>Gz8lIcJL96oIKh}544ayyP!58 zzS!|l`cX94T#;TV?8u3w((48Amw2dTjE(Rd+qe5hbJ<;Lcr0k z>1{kn4}~E#;lQ`)qUTfHn9`|-;*q{C)!%~X4D@V>9(uk-qCuO7-G#J)z)wOHC z75L~=>u?bvJ{Px@4j%DDesT5@_0XH^Aif?UN%!keH>+N+>WP$!vFGY|%i~<;sz|O} z0a%GN_NAkK28x73*IuhJbpJ64ew;HGT8Px!4i8H--HIhv(yTO@b76^_sKS~Yz?;au zQeT38O{?%5|Jn;GP} zuGn79nM7>%5Tw*`T{jPdT|J9Edq&r0S_(3-jgjt#z3 zbiAAdCwn&^E)nC$EB|^~Wsc(Dx_;X|oSwfBADlo)oFdm91#BGb3&$G&Hw z+n3p=%Czf@(fMx?rronDhV0rZ)uHrjOo)wWXjbn6J=22G92g16Z z#J3fG1#x8o!srf_dqro4r^T7*CsMD-A5q;6fgfaU0Akymhm7C8LUGshm5Yap@snEn z5~`1!ZcP{Thqv>Gx=gpA19L$4*m7yf8q6WfR;R3>*t|j-P*V2C@7)#3u=ToIB8?#V zi(^9rXPec@kp@c9N|cT-RD4wQ%T%qVj-Igk-kWDEKVn7YTwG_?J-Hd!x zlawkPJGV>0E{aeR{NjabE(A=*{w7xl)4u%_^{XB21PKqZ{0z!HzZOxr`?%e zo>9m-*p{iIJ_Y^qCcL{DXGtg|cIG`KTIyYo#m;UTfg1DImCtNnZemi`!d!}i52L3r zsarx8&+eV}k3wQ~X0XZSZUXJ52C>`ccA=Y(!CH@hk?qdFb2iBLgm(-N(tHwfd#1Oj zXaQ=LGsfmN@xZ2&<4}9i(mE`AZ ziOnXt7TSMJho21}{piV2hnWML#ZK=@)m-wSFEwfkYUY9)&sk9=Z%)BNt>P(F*ng$12^X+b9)F8h86^Bh`Z594g59%ZJ zgEFX`;sB+sRjC1ZLeWMW^1iCd%BR^V(vxku2Ljyi%CY6x(F7h@0_MF z#2#dxRFShWB8t>7olgtu#-RI_Sck9Z(V{!?3^orebgClS%hxB%G(YS+X;;}#pyX=H zUbsRSjmZwbZF??$;yV|Qdsn%o>`LlwTcBCAjnJw8hCbSi`qopLBy5)4ldR;`mp`#4 zTj$Z2f6;uSRQ}J=>HLI?x5zWEm8SUj`XpkEGQzu6I2;MB9dpa<0&yf5(RzIh`sKTW zZ_~dm-kr)iOMlv7X}^6XxO<+4>dj#($@#nWI?=IOS6YO+@z=E#w93pRo1IufE^@c> zxdMurxFPn58V^T)q;^h(mdwH&r$IFjPj+$pmHJ-rQ$`47QIT;+*kOTHU8t^0V!4O7 z%R*J4A<_RRrN8j!VixJ9k?1aXab{H>0Aq-(0!?-p5C3@W!B{P+D15rpC^ zS1;uC1maz1;E0n(k@43Mg?iorAylMdUYX18QjZyO>L>#^<)}0yx`)}a0QZL)lk(x% zGkd+EWsq^7t%Gr}r7#s3!bpRF56nLP(zN(W?QBH{vu-1In`@bOL4Qr4i3z$X{*9#5 z;<_ciY_y7^rF<+*u?9*|JEUw3zbodh~=FNT+r;Ouq@D%w+mjRV|O^SAJ><> zZ7>NWrcz!@dQWVe3lZw)dllPAcRYp7ruX)$?hG3Yz;&=R zc++?rHMU|ZjGf|rVf9#CtN4Xh-RgnW#`5&xIF0Ld;!cZ;H6gWy@jfircQtOBj=#yT z+Iw$;(F#>=oH>jxZa}$$A;8{)Pa~-~n2B7jj>#w^J88J_`;?}TB{r}?G;uqAmSvUk z#z33A4ffzDcOss+GcR;O)bl{vFKJEzl1l&AL}9cd`7kgUJTUa|FPHV#;*gM~ev`To zJJEP-8dLHPe#;Pwjv)j`3TLi2eYBE-ub{b zu|#>JXU_O^r-Na0#oq+wp>JD*0@wGH>(~s`UP*2@kS}J}+o{omAHPp|mIcyZL(JUf zdLvd+NF*_&F#JSX*oAg5oiLyJf-2&rBNbSr3)v^(G&O(p}gnf8fPmxb?_# z66WwBndufDbfsD6;ettMv7a(CnN3&TkrhL%MN>P5|A{KAIK_iyP0^LU06-6@7coO@2BZHwfB(}yTzLl zSpC6nwg50lE+==w3sskh`K^`*G}2TB8K$($pWo-!xLAaW)v6DJNRAQ1jT|-TVjf6( z8Nx#s*~TT-f^TzcHeqYyPy_P1O*$dh)@0K{q?V8#GQ!of0FCfETax5An)<--NWM~% zhv9oyjzk7DUboIg??cgTj)VzO@~L@oE3Mi}dz?6d2@Qzs0E>ac$9xFL*MJnsqEZmxhyPWgW+VMmO!C%DlWC04krs*W>{fZT=J9HgPWADj zIhq+Ib_8q3f8<|%i&d2+ZUaeo+$5b(%+h18%tEIU%_gNLv+z@fE!Nd0#oT4d_{q2x z7g_o0i0REJ`A4mU|8gPa)mLyJh2V2Rk3U_??*{W^O2pd6o+$9(zjR#c1~u_-ewvv> zsB9_uegnK)ox?D}cNL~Ti9#f?@uY)Vct#%f=WQQ5dJ$xT%;rHtS5=Q~eKQJPrA z`UbfHuSw&*-1U$T@{jwohh4Tkk?Ca^+GLm4>G=^Q9t!CLtl&5vcc_7Gj;*5jrrv9G zIXeiC`1TWX;((x@WyORuIS%1C$`1;~XJ!T(3(eLYLVwQTC8$UYaDr1ywYWZtMuGX z)DAHn6_v+tyY5=;HSyYWui4CI^1Bwi!c~)wfBxP~ct-h zxG>auwG(ETemZSS98Kj4Dk+9z+c&+Zs%1H!=WH^OXIoEEn6-)@50^|fnGj}csKkbR zU&IH{J6mM6Z3SuC_WW%Q-FbhQ`|k3DofJqtuUNGs?3BM;0TKJU>|vAcxnih4x z(IWIrKq+V0{^mfMr>3E&J7p@+fP8e=(6yGN z+Yz~+z0`46a{?sqo@o&#gg?suDXBY9>Xx%^F4Sh1CxI%6+uJq&2nS zTWo*bEPGw86xwjxs8Q2pVImK>u4Z+|nd`*T#;KV;B?+JT+nM&d!tR=x)24pW#z4h0 zhoG}f=u6SDKJt+EUu;^?0M@1-GbU>AJe}4C2d%j7LiRU|jo~Mbg$DbX9#wvdZgPLQ zM;fUJtH0j=Uc`7j5fc3Jzi@}!M!#}QRG3xBCBqQOfDcUIn z?jlhR7*ftw&RDRhWuk%H!cR~`xxb#<5JU_KsP><&mo*Gb$YFfe5Sni=lXTFDvJUmK zB2K9%4zJ7=MtJGuaNXBvcj0v&w zA}NMfQ>pG~7#_y3qVD!*B;hN;sH3Ypl{_nD>K=)#o#?)+{-okASqCu7v=j{cPDC|e zoedWb{qKVZ<{tb#+w5QB3cIYAawODf&pilCx2*w89J2OlKjF{xqEZ{A3xhPk-u9_% z!}cNTJtK>B>iR=&v#njV!xlw-Xzx!F=CEC0s`38vNd7z_a*dcb67BI+IeF?r@#=f? zlC^5pJNAN3Oq+(U5#RliU-_rip=$lQJK1%gpH$IE1uLoj8T2rc;t_Rb;XF~le6S1I zj~}NRhm3Xp({}wi{EQ3N9-W6Mw_7oAyttehFXeor*)*~irpKv%XActW#NVIj@!U>O zO!!A!b)<|>)OnV3x_%eE%V&SXI{|BTf7TE&QQ@sv9dT`;;Sg;-L!YuP#Qyg~eGYD z3P))N!&>ZonI5XH0DOE*Lnq*pUe7nn%BEfTG6bCiZ_(oQsa{F;81vYJw&dT{SuM@i z19BG6wIS+t7)gIQu;oXt$aofwnVgrAFOut`X8jwEZ7UH^T>KYj8z<(uUR zfctXG!g)kZ=Xe!CAZF#hyAuj?0Bza!2SDN^gl;Yy==%lsaKLiCaSKA1oTG#jkls#EkAV)AZr>GwA7{?29?DH4Eu z=KU^g$ntlS{%l7%>?71Q@#zLLCY2qkgITkAW~pA$7{gi7zu`HlA{Se|kLDkb;n1<^ zEXC0n#P8T_M3;5)Pi_^92_Pp{lIPGg!XB`C-d|EPCYh$Ac{(Fd7mRt8a^YMo+GPHS zq41)||7;~)CUiVn)3tn8r)Dq3bz{=xEZM8A;mG>?MPcl+nLt&7ZC(3L-4%o3^d{7| zNSL8PB<%`yG>g_0zun|rEyWF6M1IAEh~pG@PlAB*hdVd_9>TK65d2` zf3;5au5wlxz1sd=xX9uvgEZE8JodBo_f?HdbEC_UR)AiIzHX(uL5v2m*W2#WrXD+S zLvFF6gWdyfQgLR|EsaL5uCbKw+LoQ<(5v>t-6Elh!jQuV<37;XTQsI+;at8*#F~ zy&X$F*33E?w{84kH#?OOGGVY7eaz^lD?WJEfmljPn4TfbR@4De`8kMpK$56RFg&h z`Z6;qf0m+9d7kDq4mAli5H)D9asXu&# zEF-YAWv?^XEHuX7(lSQE1MFVPnci)ewj6b!sWZ9BoNCt^dExT)tZLCtw&4i*4x&0(?Vu-Lu2~9Os5vWDHVFcu9HcJDD#JnXDI_vH5X(zRlz|4 z*J{`>r;d`zcKTyL^-bK{1F-AG!aZ%?(Dzx6=c1>oWsdk44hOvkXV}Zsto0;(PThJS zS@fKFNI~rHM*K_!Chk+$ZgXy6fTOB$Q9}C%!oM!yyD<;tBHqr#`!RXsrb72Q7BBLD zC3Gr_p7=$x`BWx(%RUr1@3>pHxrQv#3ns~kz(BP4Z_1arC;6IRN8YhTLorTV0n<*)KZe61%t5?rC9af^ruf=do%Z;5XTE zz9?O<%~xLsvaBK?OC+p=`26B`pT8y0F7IMBvz>lcja)0Px-;TkjzJ0OLG{);(kJY) z>UsCBV7{dpjE5{0tar}KY-D~@t|TBuKVG9nk5Lm&{!qV0e(dJ~L>BMs?IwM6eVFj9-_`Dn_etL#*q&qsP zgD-{!xp-8_&dD602b?w*BE@P{v*h1J>T5pIzU}4ax+Pa_-kbv8f$nMb6BITFpdq~sILjqUO5m&HNNWK+P9$VBR}t>DPCv^?XMDS?5*;|xQ$<+^kC+B zfJwv5ydWL~t=fmrxNKodq>CQvGK~Cfb?<0?{HWV5Do}ap4Ud%Z012xnv)^woH3-vh z`+i0c?pG`*Kj-Igad-#HuS=zT9GXMJeoa&4;t(Ic5JFdEwD4_XCwR`O7p1I#)3Kl{pxbd*r;Ct& zEW1ajrsAe@z6odzUP}C`q39ygDwtm3Hv2n9RiDbwuV&87=>*y*1dJPyT#Pki_!pH5 z;yvg3Mr*X}y6}=2!yUDWO4epBSfa(h{O`fdm&>8L6H>dofR@GJSq8RWv$6zi;);49 zcDg8<&2{B64Os2DPm3=4SI~a-aETUSl7)3eg`S-t=}PK16701{2wb|dY}kQF_6yfH z6Ou7{uV!|)#`*k+%^ovTscHJPi=@;Oof<0>RPp9Ix+jJsEhF6!eAag{0={q7DST1E zB}Qk`anGO)a2rp*s^|E8!`K^?4h?LgPE?WJJiY%2-);a#{HbHQIn(7D|AMz?C~-%` z!(1ibxhq!@S$n@<+?-te6GH09a6JNuaZU)Chywyc{b&+U%dJCI8^w@cR1`+c`tj z!NzvKVW@ERL*05)ABaMh<5G`yu`c#p1g$f1QOg{Tj7clnEMuUv~dkl!qenD5O%VYkXcZRW}YTw1c~-1@+0*!>WX%w@9CCu zq+y?+NE&<3`$}?BGaK?sKuRDjr@kL4)dt{6<*9bXa}RZwL_deMmpdmlYLzyjw^*$Y#oP)o zUsZj0qfO{JNvC3q_HI(wbba5BVbd(`d~Z^*%B;KDaA7*4{ALeROOJ3VEAY_YY}C!* zvJR9#Q&UnH5yD;m{NSztSER9lV6-i*@Kfc_H6z5L4faopheYH#;uALAB?G?mIkF zE=~5M2Gcj7#OMyPNNKl{Q$+hf_~XloO8m}8h8G;>AQk6t&BPl}L9)LM@xukzprfH-nl7TC3i1okxRJ{B~D&*XNoa!P3QfYi0v%Pj}1 zqC+c=(+w*_=;`y>de@6lbBvDMob!FId*u%{ef4)Ml$F1K&gai7dUbWULG`e_4gboR zMtD|+!pFss;#UkJ>yynVjVCeWzyar-tlVO#Byuf$t%yk_J+CM4O4bF5{}tt31WEw< zFNY-Ufyf(P6Ko=wH`)7#{1)7tSsPSGY`Px|wzy!nTNvLhk?g+{!uQG_OEmw=(VBdi zQmxApJZBdC9S}2)ReGp`0&@F($gT>wre2!-W zaNVA)QI;-2%uZI#xf*+u7Z5|^^iM94P=*3FSTRrckJ_>VKBX}oJF@NO=lH8ogON5u z+>66zGqI)|n`F;5su>M@oV}tY#w$0AxkQSrneZ1!`v#*$h2nWaZ&i98Lj(%9R{_XZ zR@BUQeV^QwWTMHG3vMamlSE6f`Gstk3&HER3?Aw7rBpkJeKLRv!DP7Jw=`908Q zEdXU?)60JA_Dw;wh@H#CHPDAoxh*ny+0TKP4>eodOk1T?x;>*%?rwV;o0^O6AWPzO zM66JGhe_ba@yQxi4yO)vO?wP}ywBff;#I%eF0Cdw5nd5PFN3fnq0UrSiBf2OZql~u zt>PNJ23LuJLg_{Ih_pM= zz~>PC1pD#tWb-$y@PP`4XR*9He)%~t#=dVU#^<0to?An8UaoG$lt&7&uhhH<5W|mq zPM@pG-@aHnd<($#8dZ~J-U`)?G3Pe!y0PLM+WZxEjpzM@?R>KDUK@8_7C9KA)J(e9 z?f_{Oe=|NwOEZ4BRL)>zVZsrKJ|L5wO-#w+?N2MJ*>*fS!2FY3Q~*Z3ux5oI30{$6gcYA&h#+2K~IV6nUU$ zmp7eCqJZiUDqcXTZ42 zn(u@@!D^=F`GGeEuL8}EtiidIZDpX=x(`I_KbR+lu@6(6PU~?&LLy~885GM zFxPE9AKO@M5xKLfHORo;5+X=%+@p33^b4<>Dq5fF&Gl(YgWgWk1*A;r>Hz^|Iz*y`{5V$ z!9j3|);6UaLBYLY&(rDRt#Xr|+n~C8!Y?7aspjq8 zzD#>9SW-7>)%l7O+C@fM4_pa-ZcSwP5v;YV;6xp%r^} zS{yVzbd_z7#*XM2KamN+Lq4+3(=jd#;7(b^qhpW0Z0b0Dn^FI>T;#UP~9O`BY6&2azcttJen17A=PqQ&Ky17N$N z4`orupVh*@Cp0jNc}W0c(`y!Me2JT!2Y!Th@giftxO~`7hiB`t87v4GGrhYOyHV{n zq$)lgZ)$5lA-J|m_n0 zYevarUJ1da-w@WnWxX~-74W7yl?P9_2D1v47HKsR0x!f`ct-%?UQxA=B6Zuh5#HD%DDL^Wf6#RyTf6IN`XQ_w!>m;1SDetD z-bP&KBnBVhv;O+riDygbJivIp#$e?V(LbxT7Qf)@FzBJK6znMB@r0LLGd@R+wh|iq zZ>XZdFyR`gYh~lwv;32fQ%F1S*%AcAbco%)BmI1%kJfDlX ziK;Ic?nTuc>mFFt-rbnY_1yR4=V+}s+gmM0PipacAKZi?u#-%H}$ywQ9$RB?S;LQhEEyf|Bh>D2gV?}Ybqw-xyrT@&e2EEhkOfbEr0 zO49({M4rZ=PjkVbMem)ZE;~pu?|S_Ocq`tZSfPQ<(|K>C-WJvkTCHjJj^i8Svh#LP zL&(fLZ&cu#D1ZE66CY%`UYB6o2-kN-?SK!qW4%Sg16}*}QKyxYne3AIB0;cSap4?w zC(VxOD*%m!t$6KMIZ@Zuda*^z_)pR`o;i1h)@WmeP9>CLV3BZW>y7GQE z9baF$*EZ0xK5qB^hQX|V|DW2H_-LF-nFUA1| zldJdnO35p|*e3vXZ$wIqI2>WqEOarZIIqA=oj|%^cq`(Q^YuMn>5RXw)K~mEA&^x$9$GGD$X}d)|kXY9k zF_6_joR;f6D66j*+u)ai*mZQP)BRGPOq63tj+hJk*!~MzW{h&jFK=sWgUAKO7yPuD zN?d==7LR@@NDIuAFkCA*rfgn@=iJ{C)cCIQ-Iou_zR*)7#5f1@ntSrK)rFQ>pdPiS zWa|BQW#xeCfWKcVGCAjzg?%|V&_h$+HGKO%R*9N&hh zu|TZqzP3(PGW6Bk*f{*dh;XWD{y2>qU(0W}QNe1r%)6sQF@pgc&iX)UsNx0ZO-=~o zSDpcYZZPZbGG~*ah`!ryGgUbQ+6^G#?5L9%1%vHEfrCwxa~1~oZ#0nm8IQbklqArp zyq9OyFXHZq?XBH%M7at=^jjV&{1@p6C{seNC?B5hs!3eETo&WEUb52UJlyfcL)aKQ zE*bi%b-M`kOyWIlW5RjPY(uR(XsJ^dW8B&#AAIy0M# zr5_oLYt1od&%K(|p zA0YqVpxS@D`{aI!vH1`SUit4X`)%fcK@4LDN|6s!?o9^$pZBHMy)5~i-*UY2KOg@0 z6W-ARuv}R&;Y!lq1N4u({r-$`8K_FPow6=o`QM%P*Z=U81Dz7)LZC> zlEtv&hqM2>nyRAVQV4Ls6gAb~)#(pPz5pt2+JAK+_aAqBDFt+r#V7eOpivTq*S72* z!~1DAWd6GQKYj`H)H{mHN%LI3^2YCG{I4G}nqD?-JuhSK|L&@t{Cvm{qqprw_2>Kt!;vd)h=?4ts|Bt(FF)UDa_>ZRg?%n(a&vPvo_B_WkM8rC+yu<-xdd(Wt*wzUn^1}KQQQN%*qDxx5uARr)3w%V8uSmJ#ySDd03aH$^-(JK3lRkm4jY!o``~yL7<Iz;wXydg(tji&%0T4GQ#s;iFePO_sxVtI1Xz90t&})7<$rVICgWC_Sc=ibKc!S&#EEp8-Zr0{Q%yEivM)zs|{vG|vt&hl1SJzd*) z3wNZ*pCP>g`$a#3RO$lm_GBKw-8upkD5!?bKuSpa7`AxWwb`)9ODr+Z_R~pHt07@n zTxrWXTyg2SDAnh&QpTyU78^gxxKpNd4nYY4GQeyxj-d_+!_-9lf_wK(k4@GXh2^DH zX}fnE5gU%;mG$0ZgJZ=u-7{5@`iTIPpl{{1T9P=di8i~x!8U~k0K*}+?rJ9n9F{=Q z5n*ax0`%%CvCC|6m^u9s8OLEByWB8b7uBhhV}d%!%>JNzK*6z10|ubWC**t!8E_V* zI;xae9w@<`qFR4lV!{@TSTfKcj3Sxt(_JaTZBaiIg@4%I2Xl9M7#+*C3G-y^R=e{T ztfwwB%6t*2bz8)1HU2HsXKdvxdZ_^|@9gyv zT1?{_Qa$8fnO6#q{j6L-b3UEH1r@(J{9La-uer=D&vfIBF@>LPmYMQ*BuKWZ*t%`uS$-yVLZ!aqQ*;^Qi?SJ1%_|~*^Jj?tM#Igk5CqS zGuor@Ege!bhRSw!J?C}D2bb2DLg;+NvB;lfjesJ(XYg=;TY>Km6j%yqz%QoEIH zL7SFIAq8B5c9+?I8b7SVMmo_K+f%jcDWeo(0VSix6seBjpt%eZVYsr_AA*RNQ;h~t zjBoy%Eatk`l^2EEeD}vX43$tjDz_!Gf6(Q>3j&K!qw#6|Y(ukcW_kR~TU`L1@pPax>6bNO?fyj5c&1j4%9U4J5Qz5h6{_j=F z6n#iM$eMNh2!}n*@@>8e=PtXAm1#EXWxn0JnAik846=(h-wBSJ$vkEwu*E%zy%_p4 zS4H{!-X-|zpGM|)m4I6Y!s`(2L5TIwk9L1cVsrzZ~FrEug1|om5NvpI!7dt2zYituCL)V{s=g*s8u#P2RxAOe;UpFL^>FWH zAMzC!bt}_KKa&ga%e!RVr`OCPyW*rrgdr$Pua&9tZYAEi+#JKYt}ptn(m%Z6-#W)? zFObz^UoU;!%UJ)(eb0F=de*7J@yK|lNnb(pxPuiV@_XVe^h@vjf>TPxuQ^hw~?9uqdErPfE>rb6Y(BnkR#{g zE@0yW-9YY0Zm`pcaBz?3d%^`T?t2Rr*L3b1FvMz`6Up%Ng@L>ICgs@hV}Ol?2uLzd z6)&_-)&JCy0KXP`!OQfM+ibh*O?j}mb~fsrKVAC2h2ZB1j*Am^a2OU3ADeyfbBPg9*z>JeLLq91<)<#$pNp*=0|JIvsI~&j zKgS<0_(1&erz`vCkNU016->+fF;oPr4`2Y8CKTOj9mzg^lw9C~s zG0!A(9Q&r7z1*vJ=IE7O3D5s?S-x+5tNmbYj!jqDFc!G3%SAE*U0=DJ9PN1RLag=7k5!<+e}x>Ed! zt^mURJKSla*SQ2a#%$62;_hyT4=V;(a&4yXLCq5k*BM?pF9GW*eN}L6X9e(n$Ioc4 zH~3@K8I@*p8Bg>HRAuiGapQa4H7jxb0AS6sK5Q}AKS29r6NLXlO4!C_)2$6M9a=HW8ejIdV)i z;ZC{35QBEPZaGRsn>qp?U|w>SGP+@F^}H&U!oJi{F&S2HPXUF$H{HhX*r<5i$-?hq z6BBFqnoq*oSU8$}1R*RA-o7*GjT(R)7+8}|yhd;^(1Ud%BQh786V zE1O+I5;u)=~`$1Ko+f>V4$qf3`ZU-5=2++@7kOJhu3as8VaB+ErbemUK^ z)}+1lWyWbMf~b)-w4E3FrjF8NNZW*I#|`U&>a|||BDr!E-=l?JAjYx`4Tu#!z!S$7 znhBX3@b2LgE(;&C^XGzBCiZ2)dectmnb|~}F`s7IW~l#I$AAt-%1JWmdKd$~b@O_F zol@7Dku7Wa2glJV7)0jf{5!X%(jmxJ)FB}LkJ5(Kh_D-^Jd{w-q7cD#$X!?pM!->) zBZLT{dGheKjuwwwhXmE5ml`hT>lIq=oQR$V^s_=ACmgP1B+wBQ=mZJil%nnLK{7N-G?OY)7CVqt-w$;xxTRl(LFy}OHE>4rF3WR@awtxAM(sL z=<}OqRH*yS`b$c94a=U@C@fvmlQ#e{`j?4Gv@@CT$-GN0Gu~ng09q;g>nNF6qDrkT zPC}f7&{nYN=ybmDP>h~>k7~HLUYL*}|BJJdGb>_r(H;OH4!9m?3ki4wa z0s#}9z>ciX?mc8ss@O0(u8r#Dimj-(G~<5Me#A&~%p|7+=uyXKBMX_){h87=#2G5x z{iQWK&Bx`r4F>ZLN_GlVxf{Ng0!cG8GhkMltMMoOC#?`ntg`Vw;CVrc`$B*wd!JS> zW1+p2uZT;MTv!nq7Lp5myvjX-zF$}3YN1`aDt4t^7$QulDyk;snBXfWa|bgURo98R zJSIb-G=o-XfuH5Ialq+lh%Nt)}Pku@tblJvpAjX#jhe|ZOg3DFh=BnA9MT1brdJ;S*^zj~2w z+jT|6btcYQ8W6|DsVgC$6o1cHu9;8vecibMMR7Ep8zl8S%=E3#*Pr7gffP+*F*YhR zLm&Z*C%4_N4ZHf7b(kH!RtnI_BhA;8(7>`7mmN2tF;av>wbe38?RAF{Diw*&glXI+ zs1ugEG~BbuJVFO{%3>A(>lrW=4V8*l1b=&Z#KPJL=p5T3KF}U-lf0P8^}M&jL*;3B zIC-)MKU^js_u=mr5!;RJ3*lWuU%!5pFxxV9Jt1Qf|K`?P)`QWE_ns3iw5IWin4dPj zDO_XX=0XL=vg%yiTLF_&PJqD#JXq*=l4cR+_tC<;L0qE7R&~&Gop;cle$VdLfl=FS zIvJq|ti#2Ou%A*t?n^VntuFC;s;>6F3vmc$`^+>sA!uCxlXCj^(Brek-jC4fy88Qi z-wo($005zv*Plsxe53=k7GWNJ#XMQ+`03s`mZeL~Mm?6J7V5D>E(6zRZiu;S#e1=z zNaeY7YDMdU*wu!gXHxg3zusX6$vAn&;JXiDWg5V=$_;WEyB>l}3#{*X&GQj&bt~Wwu2=%16^Q9Kt(oT^cU8sc`O?{bS(5@gFSoZ8r_b69tP6OZ5-q`}uHxTn$oYfFyeHLjOP4SF9Wv zt4Q5+U;9%`wms8dB4EcT_Jsyy{~v?G7MG#hGBTu^i4r30_F?(L1N4%vCV%->z@Gd7X}n)u1`0kai~gb; z)U>cELR}1%h*%u-(!pgm-D2ZDUruP!)(XwZ`Zw%|KoEl%nw_=?C(_EQ^C0L1a7weMu@lfzh5>Wz zJI&ky;l6FLYV~#%4ja4x*w;gw6WqU%ssXEC$)A*GaccDd;$CM$86yaA08L{~8tkXK zRMe2MA6sT^alak0E_`~;G1npW8`U(2_roLCi4nup@MYrL885j$<|n)iNQs3wZov6w zc|xD(I&4mY`nC5qse8Vxby4yS==@>(v}UVoYoU>>I9~A77|ST7geCG73h{f5H-*Jp zng9#xtk4;!2#--9=_molG@@_X?xLh3G8mD%iX*TX&vN2G|9*)3)KUD<7t^1_^<%G48R3^~4da;d^chXS~f z4h{Sv`C4NM87wV~l%O}fu2Y)eSMNQ+8-8cCDc^B?RO#MS7Trv;+ri@%!$W4(SSR^V zokp$XX9jP1l4NZvEQj21TWu!gwqH_f@7~~v=oU5*cD^2Rgg(HYyg7C_WTL1MP|Di| z-m_t(*Kyuu=C~}!iL$lAUmp7o>1y> z(=+p!LxZ-Ye-R78@0~evkv;LEW4YsvC4PZ^a&Q6Epb%;^hiA>_u&Za%*yBOl_2fZ+b5r5bn|tB2;f2R?4u z<-EO1{_l$1GHity!RdirTNNz16JfM2!rYr~<`MUTE7pxS>YNR7d0}l2<4BVldeTpe z+4ZFz25n8s9f%Vi7HFrM?$>&s@6oH76oa}7rp!V0(2pFJ_>}}5VrpMQIrg(#^*GNs zj~<-mr`yv3|Bunl=F~)vVN*i+P&wSBB8Qe?5RCLBjNhMsqM@WleiK@b@jm=$^h@9c zmD{Q@!$%Hl2FJo`!wg64##}yKv#04CFjw9=k!~c_HmGsgzsj*A%N2s!{Dgfw@|NKU zes51Qt<f%%Q(c>M2UE>|&P1 zNN|CuSvLk%R%a_{XA*=dO&$geo4NALOP5=XQxF*iiKgcMs=RPA+Wmm8hpUf1ohpW>H^JF{?b;Rb<4F8B)^ z&fb2xa>CzOMpmZG4mUng)b@Zi6_IS1jOgdjPxlxL9?+hv?ez{hvT4U)1p#)96QC=y zJI`m@U<8tfdp6n*U>U7_P>qiv0QOe?-i$@9KqPqtRB?^o^lB}S&=V(nHz3S7Oc@n< zBlraO`Zq#CRs6e(WxDl7gl&;vU*( zM7L%rw~V<{x9R7AzJo3C$D?(=rw2isfxSw_eY-D51L)6iDB2uW*PRUZ9aj-nR9^J} ziFAU1W)qWZ9xkiC48X9`8mkr+7tFdfcOPeXmd1Zmh;PyDhI5fa`9Gs#Hrx9 zo$eEg+Cz-o{`B<_3}qyFcDGoiyQG=hGxAVQasI6g^y;rEx*mr zA7BSfXwt=1cv*$Jk|ig^6&KfqNdOR=_u%O%KV?E?rbS7_soF3H@0j*l%ol7*t^9ky zmJ^Yv>y%B-)1DYPl`0gr1sE(>2K^x;1=X0B$rG5j&W2TxWZGuLo=sWPUh5u zz<7ycncr`|_NKr>zRZ)sliWOBQ>{LDth|qjVk{GPltAMdAJU_=AqnoJ2lV&>d~p|k z;r&bdjfXLqfy6~jAY0qM{%|y?>3AC5uA~LoK7Gen{0%7R%?mrX_!J>z9nY7odW3to4F0 zsN=Jln_6<2K~kUbnTkw`@4a>8Y}~|z2ng>lgf@`6(m3)f+UI*3XWPW|w8@`hQ73MO z%D-iq5N^igN%LCJcWuyYYEC=CbPP9Gp*4uYd%;@VLkuQDc+iCJ1h#@@luWD3e*m}U_eQ<>|zJWp;Uiq;jd{lo^L$%$u9Arl0|KS z9%Z@S99^0|mEj~{u=XKH!1hgEXH%xk1^BBR&z!z|!*;Qn>f}@`VKEBjQ>XiFTA7@T z6k-vqm<{aqcJ7-xT;~tvVZL5BVL+lE+h;vO7F*RPU*(FO&T+fkoQbbZz5HQ9y#X#h zkn8PP&M6q)caPsnF9TtjK|N!)0XPq!L%{DXHG0{%n7F41QTLjxD#D_=5>bLi=UyBJ zb?UAD8q|_M)gvs*i?7y#l}q?&w)sv+nqwb<(~PQe`{5n`YX%2AQaGj+^>kJ{2PT$@ zIW@y%O|%7WD}%qQ<}GdoFQZgAG1IX+VB2LXn?K$vG<=;Ky<%pGV`C8pz~fne8={LC z*qgGLIF9cHxd7%Y{incve4q1>&o~7D z0-TWp?I>cy8K%y|9hjxp?q=9)QdP47CMU;8X0H8E?8@k)^T!?29Uh-VRneDQ1zH9frMQgThz~zCjh6ejw7@&!_xEw`Nd*+Rl({i1KtpRaY|{y?^94G3`Lkh z4=D1vsYE>yYkUousu@m|YnGA^>bb|ot^h~LX{#O*P~zn@dQl( zyp!JwYL_{7ay|T5on~viGdV2_AB(_$=6i%D#(V5G<92L&4_ZAGbbr<@|9zKIRM4Tz zWq19yTztO-en9S4=txm=e$@|ekTTot{kdQ@-$9K@mAT3?c?S0@yj0?A$tN(3K(DA+ zT;}IxXQ%zy-J{nYM4MK+Z>%1+!tH@;P!?~bKF!=wdh12}VRP|0xrXxA8j;m0GZ`@x zG49XUASZ=&!}0<7-AZeTXZ5D~xyRQUMivJ=NGjK%7PU19FCD@Wa+*0ulDMsVShp#K zgd^m6hvqEOE62NE#s(t4M4Fm#tyhtrIAKYVt9~=RECeJ^t3I1EzeZ*$z2%qtelD*z zhBB$jF%s!b{*rTPRb~CVH_hY`MubF+2{nv+XxJY+6jgKo5?_nzG1IN4UoR`x;Er;Q z_7KWB#SR`9`)-P)@&$qcXN=R7!H8Pxmt~I)a@E78)t*kXB!Dy^6?eOCwac!56jfIl zth6-`POlEEY~uhXid-+4+Eq-xLJnRJqNFjeAo zF*FPI_GP_*Q|E)WMY;KNqt@b!+mwO#Wyznen`RFfXkv+8nPsCe{lcYM%UDu2^*8^J z!}T$*idEpep>*Ld&M&cQ**qOyy;7!CTI~MgYgN~X;&F8?hVr1%A03&kQxN`4{Gk zF`C_L)}Nkv`=wrfCY9MomOIVT9UsQ#4GL%NGQ`@+9?e!1tp3|wv^6QlWbgzix(g?o#H3f z8^&9IYU7yDCeG@_TL^pCUjeAEErDuBDw}Ucn-_Odw5^_*_VE)J2UXeX5Ig0dI_Da% z3l7U6RanJY4`dw{)@`{}(V>w#H;37wucHO+60f5=q=<CLWeb;=s!SpDb@%rcBT~e7cu&E|FjSY<9jQ+u+`e#NO#t}%HbJe!K%?zTRRA1Tj zb+L~p#4zU3pE7n>R`9GWSJsCys1QvKFEiNji5Cy~LfoCmf_s_JYlvwL^7KJ2xI49e zESzdrbM|t&CGu*y1K$=})BoDxv%ON>VyqHRYNC?8FU)EV=)N$!v$=T$nP@TQhoY*) z2MUjwS)&4DfCKAL;L?;7BLhPwba(WZBD?LLqtCsxiQ{nL{d@(MXe(Gr)v!=*nM1K` zFj=Tx9`%7Gw?`SCGa;QN`(!_=V_73trBfB&qAsx=(b)Uqh-)23<50C4p)M*Z8ag!h zj6_FRyhzT@vxe3PA3bjMhHsw8)%o0O`+l+q4Gq;r)#e%WG>NAVoAfY%wvEkio>p|% zFp^|t?eO3bW?}ad?0IZj+Snh2w!Cu2uSDrV+yI02M+9vGn)GyUUvLI|AyYK8;LPz{ z(YXf>ppC1c@CWt7;ngBXGj(5Ey}QE1rz@Zhxh6UTsB?wTY@Nw^}gSy zG|ZhEwsP!dThl)7(0y+g=c6``inTq|=XQZXwhl?Xkof?=n8iY5v^$dNgg`3Bc-erpX#%D0fNS2Kd5YBh^K-^zn9d@Y^ zI7a{i+q|ad7h(~Aa-NTnVU9>_YM~&%hAZacEf2t(!+0b5Bdtfjy^7=WRrHv6srdU_ z)E;fc>C=gdj*!>OGbg#ySAM&&_@aqV@4{lT3jg}&-!b+22ZfRXqT8x6FuV7jJ)yF~G)#{;4s=&)2ZOA7XbsP#NFwW8GojC@`t;rE#_3$JtMpFMTW z>x;SHxqsmGE8@F4{*@!{i5usIme?&U!X;Ff^B|YR0mZSlngBJahh~Y}~uAabK6ThYvGD z10J*QN;t{I{`|MyuE*_qvNIeqT zufK8pFN&!vO7fmj^=7z1n{TsQVz^($N;}BIx9Zo!Z(Wcsk=|O)waCSpTIaV*qIFOHB+vPnj%l=Hp+m!2WWJQWVTTZi-I5 zWQZQy+@M^+bz6m2rJp}xSgbf&+VIhps~x8{6yMV^m0P!d@wPz8)5t}qvHk&U8os;k zD>zv^Pdhrwy*cm7N_JFWRnyxW$M{|9>F}mjrP!Tc(yu$->ZFRQ4%4dI1y{dhRycpIeRuGTxp#7}P}94OvOhR%Mdyg$G#NhgVr_j8WQ zZnngBm1Y~~QcrrS@+PDDPrEC{=nF(qefT{)fPG@7`>EngpLmEmFC%1c{0WzYJd zI;Hm5Gw1$!{4t2vtBd9>8@1JY^4#`^#~OKQJ{vkCYCSfG;+Jp3JkxC5Us&qurRFtK zdN%_}U#wI5={KBx~_Ym8q=Do3`-wQIRO z7FHZ{&%L{rV1by}x~5@YK;HMKZ~-a1aAN1q%AvE_*MM-P?By})p$i@*Cri8_KPj0o z#r|$$WPN$lMspumV0xpvns8xyQT6;Yc#wr0uFvk=kp1HoMlUn?SV}n3iY9PQL|X&~ zcNE#3g&tk6ln9#bP&u@N3Q<$?oU58J!R8x52J+iyWt2)KEwK);^=1Fm%`LkRPjLIU zkgXr;lmIiL()li#tUGtIMv1Yu55+E~Wf z*V%Nw%i|(1q{>;k=u|T6awe(k;6_(1qpHH$V#uO zj&Jg5%%ipBdjI`eN~?VhFQ@C$yEI;$3`jEMLujZeY&^zySNlIBDJAMgq6F1evp3@M1KU#9=dF%c*@z}MpjjuNY;j%FuABRhq2`iz$ zH@t%87nwLyMoUQrio{1MUHu%jAg^~t5ZOp;tx9H1Mv zdHbr3B>4C&lF!9|7)&ogiLa}CSGoVvy#azJ1q9H)$z7EM{}&$KQ>@xEZDNJ;x)$>< z{;Q(SSwR`G)vNh%oznQX<@4#2EEW}$&hy7r@OY>t%1kwbdOW8Wl{(vft|4j9VcjrM zoM+9(YZ>gUT6;Xv*X>F?Z|r?d9l?ggj=gRB8wQfxA-w6;bCPHdb7SVzL{e!lRkH>% z9HFeu{B%{kWWMc8l9yr^sv<}z`{-It}{LBnU_Y_dX2M>U+ErmrxtB6F4s zqpSt8b&KEvQTZyQJsIw1SJr9#S~U~tR$UVnc`p+pjmBn^sJTS1lH^y{@(I(;=}7ft z>l0pzradV$&YePS4xdf^J7ez-!zJ&NhGEhgofH#2R0&rwpJ}cJrmmAH*RFc#PWW7O ztiOYcYbAdtd6*I%DUF~`%J;^cf!kqg+iTDpy4u#i%+h1i>=q-`Y7!+Jq+hzzrDx3% z>if!@3M*%=u*>9j<3KgLBzz~4lFhyx#PCM`bW5t#J~so%-pZ8IFo|zuA?5~uF3w<~ zh7oRh=G2!z{ zUnto#nBdpdhfPn_w3OVvxNq0#I}&XfLuAu=L);Ct228yq`NM-GD@pF)$E7y9{0E%n zXWwftZsiq`gT}r{_j)Of)_z#8k$sXUI8Yltu466ekD~)kPY(+@a$lymJOW1%N+EU> z`O8b_3Bx{wR$+^a@|t&A(!W;T z>7_7FXuU^vA6^^8IuiM?P9D8ahDPrvbPM{~hKZ9KZ$ryTFO%H)rYQEq()JeSr~c(o zeqX|R-Gc{Tn6TEQfzH$iZTFpj`_MGk$Oqr+sVo-2{aAT|X?SOOUZqraM;SdG@0GeE zARc^E*`a0lrvQAH1j~toOJy{_H~hRzKgh;kLh`Q<^_*N6t~ovRd~W@Vz(} zgwzLLZ@@Lto~&gb#)9&H8LZk9sVas$p*VT0-8%ZKBZ^2CW6u{Xyu`GE7Rh$J@iyBu zK@=FK|Y|@}}EH{qZh40-TukOks%SGXHQ% z%D%lV#JomcvHw0n;AwWbjd_op_=iin_w1SS#(p^v`{N^SfBG~N;K(`E;6wl65*sj* z0?s8^{RcbuZ=-(92Ve^HI7#&%E|CNyrNAp11%HsJe;f51L|GYS;_UOI(Ea-*@nEEG zxzR72+h%p!^LU5|fb)saHQSbN`+e3lz(_W{i!MKUwg2{D7%s4w*pcyV(!2cyz0ZJ= zN)8|@w$JLei^f>@w!r@_ulKxoa9{o8an9q1 z4jnqFargG4Lx<>O4jnowNKXq~G2yDD0{_rJRJBzP9V(AGv1@S*_&Vl%`j`szkRgLf7(Yu_zqk5QGmFD)DGsha=h6RMthyQWGCG^W> z@s}@0#HXfxo<`4n`fMa3b&=JzeylXi@o9-X;f?DT3BtX?fl!Ir{p`$4K7#~n!~cAu zrX}%}qMh&m=g012Vsp;_GTx96#V8~8-|?NCtJY;U|9!sUK$v)-_Wv^ey_zov)aXC! zF-(x2>@@HD&w4uF7cqlrkpnNkp#qmC#HZd^t@>2iKZ0-B7;p7AX;0*{85flB7I11I zcM1t$`Ev-$4c#rLTgLqx|D9rBC87CQ;J2@{(JP<UZ=J3`@fH0z?kp};Gqr?OG>%(8D+gHF{}0sGLcK8ON$LyFmMh z7OC7k+eMZQ-uEsOicI~=Vr1k!z-=>6&!jxah3IQq#*yK?a)=tVvK9ixznkO3(N}gq zxsPR&OzXZhTD8_XutEv-9?(u5X4$(wOSx8BRmCR?Yk(+I8a*Gm;%l=Zl~XqYy4pl` zXiN2uGs<@twPA!YX$|WJN@h(m@QPgJw0&M4n^SSmYCdj2Yj#Hmwm_R;qOWelbDt&; z=-xc#Rp}YD6sHHFX31i`a-LHCKP9j8{Z1(}2lSEM7$pz8Bb!)UP$Y%t^367nd%LtJ z)FttqvV%-BU#cj!pFpcg2Qx_d46sgeEA;ZHY1>Ce!p0U?`b@dDB{=yoBS1=1e)6Y-$=bAY6?{WwOmHw#;s}>wp{= z$i9Wgko`le%^}q*19ZjZenN=xiW+5FOyyF1jZy2DJD5on#*^%MJTZTBGkRq%1l!WC zBO4s=SbRz|Q8Z#>cCDsWc6@QZkL_rFb@ejJjPwv+H-#)%_^x{gwqLfF#5XA+gyK4e{U?4ES`@qMBLq_X1# z_gc&loUBirMX|KUzzSHP)z#I({f?zC7GV2vacL)vYoHNC5n7}Vih5N^(cYnJ>UGAN zIjO(K*y`o>&V1U+N?SI@lAgSysNBi03L5D-*IFZJ_MD;Ep4<8M<-=yjnPE)LW{wM{1k<0BPbV&+0^n^d}&@`0DGIH!sj+|UU}(z%6yg3G32(7 zE|Q*;qBw5b{H)j`YvT+$7pGO~v*P5#D20O$oD*S^L621{H&)CyIsNF#V23(vPj>c< zz=@@1+YVm)qfElXGwv%NnpfK2*GyLT4-m(w_6FOVilbyZ>b3FzXrWyeF6@L%vV+Ia zYeS0>KAjNLao_Vec-6+j%8$Iy6T!*im<^g3^=Ph>SS>gxB&s4EF7zd7^X4vmU>)WR z9!3o@w=2oXhh(`z#3KC#EUb^qFi7jZ)sXA(VC6xcLk(Ay>Xaqle7spTUrcU|z0)~G znf+q&fWfTu0jx~p0fzpj8S5A3o08H?4GZ1@pqol+T5CQa7fCU`Ml0l*o${+pB+%G5 zPlbU-JO0c?gY#L_PKKV^h-`eSSPeNoqJ-Dd#oza_^pJDedd?`NvJcfP0_+x( zbg&q>rtMI4{zHzLyX*#B8p(9-HoP{2Hv+pF+GQ~iGt|C@adtJaelP#IuKB0;jj0<$ zdSZCsp}foHaT`I%l>Iv?%GvG80)v!(skivz$hI~=FehLOSspFpQBu*} zPmPAKG6!7EsRm72D&F19Xq`-M%=`l zj?K5hl@7BXRS&?#i;P8}Nm5IEVtnk@$TKeQuO55-ZC2ZO${Am6u*kKG2TN}5U@`zwH!r++#5XWShUCrWqHE# zsDx^6YEZQ{TDDCRi!X}{zX&033gMsf!Y;g_L(X~rFZ&w+)CM*?vsvod{>Q61Fb7QI z#^<~D0@RMQXB?NwjZEiBZ>^h%!wOgV;LZYmwRmVuk&+P2% z$W8kej56q*`31SQMPRc*e4#NxrVSn#%1+dbqx(3)cye9@8Rx_Rb27tX>uGLa(r=w`<%F! zvhUj0T;~QV{&_gPY5Aj_ zhS;IgcIT9Ru8%s-Cf%-ZgzpUT1VK#yo`H}DJr*q%? zY9#FWJw^qRIqAb*;HqKD++ccV0*YJRzPDOZ*{b5J$gu_(yL@l>ie$(uF?~7|&ZxoD z@B9-K^}d|myVGO;aR`M*QF!w>Yb_PctxrF+%m~c_rA*vPT(>l(jgniNK2j*gEp=rm}Du^OqnBbufw_M z71)|yrZ4PE2B+!ZzyJ@(1K7-t=yQ!v{4-zNta+a^CX3p^#$7p;@3aQ3QMBm4FBf?)Lo=|`Hemo@!I&eakjtiQTT3KDK@JRQs`!srAUJAS% zQFPV;xbdlaS65e4L&D!TW@X$um)z22X(s>HJWjT{sw&tD6CDhQ_4@LYzn>5ra?0>> z^5wy&5+htrWg3!0-@cvMB#w_a82`DngTFZyp1yh-+plI1%%bGI{v6-n=}2_o|9gPV zkC%61H8`R7{(dTFd59?f>HT3Mic;cP^Y`W0>u#SeeZG8B=J&l(2DHfE->WGFYXALM z4ZgJIoZq`)#+v_Q<6)38p}$Ake3Zh!{(I#0NsfOmWtH)t(|P@Cy2f4_Bo_n|%{@WF zs?-$qo+V!;m;L>q5^bRX?dWp*;p(2j!NG>x!feZ?%NDJFuQ1C_H{3k#t3${eVZ~*O zW$VA!bJ7gBF=y>Hg0V62@2!?ZHRre;g^BSbt=y(g!eME`n_ zn?0jU_W4gfhJT&V+z>nTp6pBV`@h!MTP1;i?Qntk_uN@_R%Jq?H^w^ux&}3yVr;b0X83s~v~?8=jiqPmyC5&`IMu#UN*oVtxj4BW{7D9oE0qHqJfQG?3e`8WU3&2<%em;DG|u%6v(Z;2U^%C!)nFM%kk^ zK>KEw^Hh8AtEX47VzAiF)CoSx>v-Je!Num;_t}o2gHp4Y4OU(sg~bt61H`(9 z6{Fa?D_91pj&(7@!e&X&18$Yeb(vGJ_g zn}*9KwN9%A=C1pDR0+nBMwlztTrHrV?B^tpxU>W@I4e)gNMV&&$GjJcw|Eu@D+Hro zPQN)V9Pki%^rQ^Oj0StX(?rX%hiOzCG1EQlhiv^^bH&fVZiJjE(_pV5!H0G@i@ z69d}GlRSQ%Nk%?VX?nWR##jMS7mK^*Z^~D2LVW1ieR3mwO*^1 za=t&Fd$~-yS_~&DmK>KEH`;SZB8>z%%|F!_MS%B)I@sUxD0p379Ia5dLO@0mWC^=rTg<#_`y(CZDNl;4Lhq_Bs7S9b`E(r6EciH;IMBnT(v~+0z^) z=5Y^WX`+JO$ayaNGK#3|nBVf%F4J&IPLX$&ImlwS*5j}ZWQz7~s`clBnt`C~2z`^Y z(D%F!Nn`%O8Fh08c+wKeY3>pBdB^2r$Z=niw)Kv|O}PN=fLhsc-wMx8qWTR4bpr=i zlV@RMu`MxesP^pS0ivpL7$KE?nK{qJv>62JlD^NbszITrVGtRVwKsYHRHNv6faJtR z_*s#bBEt&W>%y@;GRG=t-o*)MhfnaA>ZB{{H1us37?z3>?aZr$n>FULLznkv8|2np zcj84zopbBnl0(D(BryUCJB`$)G#Lf|v#5pBwu_$$9cA@yW#ls|3Pm z(~tK`$s`9z9}5ZcQi8-zk}vNoM}52eTJ%1!0YFXt7hP8;+u|R@3z;*p_PL5S>y_Em z8@8wp$=eTA7UpEkAiQ>0F{N{;3m>hsW_5Lx&!%@WbYJ9s{8=a3SOlLU>u8p(3@aNP zmp>`^+n2j#N+7RD#XLFhMVC;48Ys%-yK{>UsYHlnMk=Dl&fZU*KTa$$I(!xbxG$_O=^b~h^1}`=GddDmKUW#jffcEB{ zi^q`Py|@ymD!^$`8lOIGfIOE?e9dOewQYH{!RkUC5FN1Rp{Sd~6Uz<65>9Auf;tcEa&;iNZ}wZeH^FTYU%t|Q-ti;I zTz*N1PjSvQr-lEJSjqV-zfWiVmiae<34T7Q&qW_}%qmh;;e4jIXpkn6mdr*h0CN)65vkSW6b&EB>dLO`2}|}4%D73j z0&QYj{&O7<^t0ne)fUD9-K-d&9O#T{vCy%!e4IdTHr%Zpvu6l{#1Q=PP|d z)U&y7vT1j?G`p$6Y-aS1sO@LO*ffGWV*RVSL}RvCJBe$@K6@FT7Wef(l~kYB+#c7FtR zsj2>`F>ek4O*~bOG`gHsNFM3-pG@MSsA(~&62c@UAT5yJ%;BbiGYnEHa!90ou^YDU$D|CyEtFAv^7=pGh0}% zJ38_2kj#%J>3+i8ljNKYwdgk{70RCHyq|aJ9YEh@Ooz(S^zNcRXsda*r7rFm-JxDql&g5Zcycm-lyw z?Y4A}^&dgCt;s*)v)KRBQTzYlB;nuFjlHBpigjWBi}(XqLMpk+h2rUXjs6PYvjF^2 zC_e0W*kr3mi+uC~2;zT*=UHlinE(I909fiNZY9q4s5DNXc$Cw?JLHoXvNAzQ|5-iy znOfYgNBou=03d~$<(`4ESfc8GL^;j%X4j6G>$W!kj?blq1G^Sp(;;J`-<+v-qzGIE zfMg(k3q9L8#$d&h!jh;g4STY+JWO@oCSg%$1VEc^294&1zWeylbb6!;MY3^qr{LPo z9>eMe4!jgeX>^*n>E+o!R2%6Ims*UF=^M$OAmXbDwqyp zQwZlievg~#PAA=S)ci*OI|Y4|DT!~qFN8OF?*zj)&ULciL#&Kp${&>_H@MBYcx|_3 zPpqn^F;X^pBAaJluS(s75b`_dyFbV1#PCV<*$v1_b4MH&%v1(qw6i5949>3!K@xr5 zy24s2Xy8XoNOLt0!lNU!_-`bWLE^WMO z?OKI+`=NNHPyKdC-|`q`N_BallPejS5_qjH4 z0`et>0<&3;EnDobnR4!D|DPw-X3m=1&>aNH?JA~0Dv8HkizPU&MSe-feu?0d^MqGG z9yAMNDl=T~3+wqjmBwI<-l`|(+0y)5(C+d7Bl-hQCr9tn|Kk8Ye!K~F;LQq?AF09s z5SX`~HtvM(vx@FWOtKMjL-$k?%lN1 z_H&8;Wtn>ENjtGMEw{+gF>OSZCx6-~+bj{+vgCO|KMJYLd^whxd4wH0CIKlwA#cy9F2`*9vHyvC z#P^?&0+-MrclWG*Sw6LTB~gyKW`TWwh#qGf+_zKP^T`17{ORT09yOP;?TGzVu*Jao zc0B0ILTWtkN1OYdS!QLQpR1`!=WOin#vxk(1(@`4*ry(iKm9VjaD4Brq>F2oDVmG0 z{xJwrNc&#A+f2_!cArlUjHxqi{PFxx{3h$wvvE(l7S<`qM_YxV-Uc_Xvi*bT3PRc5 zOWc6UzFq>5m3tq}b@@z_1G+4GKJMhRrWY3RGW6L;PXnlrPpaVk%@QHBck?U&cjN^n zrjWn#bv4;}2cWdm9T-%oj{D5G@y%hMo5ptPCbF@CUEXURj)RTGp80=*H{bZ8PNlAX zJ0abwolt?uQ-28nX#A2R&9mFCgsHoQ!wx7xL`E0ecBGp3RMlhno?QQN88T7v41((D zSJ3F$A5Lx^l!7eK>MxS3^q=UBVeaN(cwv%UKgRT#hb1|Mc`vrPG82^1wG`sl7h}aY z8I5}@rnWR14oviT>)kA0Dt>t;=nVEg@_bB}WcNM^TWv;iJxAwqANEP@^v&}JnE!p! zC&@9&SV5KkF}tHpY7Eqy`&rbrxc#I`9=nCncaFwMv1<*Eid?Fqt5a3UuX+I+5lCQJ92lMeI_025l19>$5l$qnF9j#9f~AAiaUl6$)(G5(fNM~|%tT=uF&W!l+{nfn>2BJ4PQ7Jm%6!CKlk0@*3W5v6PjZ4iP z9+k3SEZhG6IvHJ5q|>$<0Ul!dcmpd$Of<4Lm!EhdnPf-Pl6A)9OvE+OW3ez}c6T-N zMDHxgp<^O2=Tj*sLgx3_!zRnaRWGjjAP*(Z=}{IucY;=w zK9gUmt)8`8tDP%X{_fgB$gb61MSU99ETe$;oz>HqbXQUOY)uatw%6=h+#33qU!X@6 zgtcJ4JC8es0`Rt!$)R;N=o%LR`1|_jj$Vx6N}eaet9NTf?7+lz}}N z0Xd$}{J?%Pq}e1hdUI-`LjA}XXizd9`P4nzuD6A^BG}>K{${KNLvR-o zdfaSV-)tL&*YRt91`4(-rr54iS#Rm%<_b%6%p!k8_?r*Z^e^9uZj(?o?klfz2qKDg z6|YvLZLY4fR!rr&bO*w-XCk|;Ihs9q4oTMUc*)F{t5k|DNtvqb#Dc6j_OJZ$Dyadz zeYFuaUA*P_$IM9R+*miZ4t?;x>b%V6j9gCIZXELqwS}9N5{TMna|(ZY4V#tT^|Egd z?o*9)BtA|zRP1g%2z_%6u+LBB0=srkY^_^MCkeb1ZxTYeJ$v(EH^L*QWBSD6x&p8K z3nOq40sJA?{HSk!lkZ$d$EFZt9zRp?Bl=H?D=?><=3YhFV#~<4l7RA1v>1}7DSYfd z6#Dxca{L12a~r{OJi6loG@L@_owu^>n$)(#6cITu1FPSsPfA-e}lPO;PoQ?vo=ic!R)W!o11tT|-a#l_&YV(!&5uQ^He zI|cuB#@cE<+2U`K>)rd>#hl$vPN+ZJ zJmu!9H!kamv7Kq6FSts07DwTE#EA( zcI1zzG*_{Ih=S0P`DbTm2oHA|*K^0J3UY=v1Gk5jl3YtX&8l|Z4M}r$pk@Ee)9OKy zNnBqdt!@Qd%{I?ERRd{?@T&<%84lIQDLb*2^%xjde0i_YZ$I?P{yT+IV?Pp3PEt~T zL3438g5=%iGf1~JnJUEQ*sz_}kw}JRixWsM`Iu5-ec`Jos2Rs4=Fbv7B6R;gpf*sc zer26m&x6uZ{M9Bw(_GumSX;v51w78hZ#~ybbnj^A^Cs=K#+CB$$WR$ektbw7L0aO7ylI?~eX)CrFF5k$)2s{dB;311Gba0D zZugiIN6IQuXv*pBz4^xqj%p(yiHX6&0jag53nls2qMXNY+vS7$^(rBj!bYdBuW8*Y zIh#ISscmLMY2LNx`65psJl=)k<|Axzd9_7mC#Qd@Y`)3X{a$8dYyoGtIjc}(-As-m z#g{{SCR2iREz4m0dQjd5zsFu1tZA}oG0-a4MnwqqjnL^e^yX&w`i!ri#X}ToD;Uuv zPN7I8xmu$0!Qu8rxIKT9%nU}(`Hi!~+2pJRJ$u7POO3YNK4oOgjWaRvblTf_wd(5H z^HL}6LH5FT?%w`z=VkiM?p=kM^q5`tPuK|ejS}fU4<&t1dZ~0wI=_Fw2=kfzxREz} zo||p^bv<=7^b=XXbPe&;TR)9ie1)h`qU6%Wwy1j6b$^=v^c8)_Q?+*Tk1 zcbf1)o9>9M>99H2mt5ovfj3$2sHEB zgSd3Yb3F!M%El*ZP=R$ylEx>OvI0Va0t?J>@#bf+mB z&m~%(AIveM#2Mkj9z;WK=UAuvR<0JvWEGH8G|RM$QI-wsJY!fyftV&OK;tprc|Y_z zrT^e$)s@6a=)I}v+7ur-rp7fJXMz&hWk&ur7kbFOLxp!+;sIm*787Md>rSVuf~%~H z@X#KJcN>yFM6#t*Ji7#B;6=N9v}s~Py(2>ev%78Ai~Dl~pY1E%@@z@6iMN`*xOt;h zDaTVx7!+4IM3T_mpVgh;n;CcrSF6(>M&%6<5M=}tqT}cdN`Sl4uxsAl*xjG1IXBdF zjWp8{rh(vfXnP=e93v?cA{HTUtL`imIz9hbM^n2tccou?Cx+41f`au;^^SrdV7tu%(c?(^C% z=WKe6rQBo261l7Ud;lq{NBi(um2{vanZBf&l$@|yTDoS3^gE^d$Kxdh{;DCPh5BjA zJdyOu&mm&}yFW$tm}_O?g6TpMgP-mrKQ?va$#0)aR%Q~GR(X|AS1Xg(Bg&&PmwEI# zRA21Y&k$aMk(TCKNy!7wDzg@saZY`?#=-z^@byEnyf3BT9SQ?tO<^2yE!}6}y0q}allo;Ef3$@0qMiyoO_GM2 zah<#)Q3c&O^5#&8cs}V`koX5xia-p~%&=Q8Vli>1AN6s0UV&TPG&@M_P?=^cy?jOT9i`DwNNO*<>e^y{UMe3XRhN8MP|31ztr; zpKc+R81mWYLVVY+^!bz+Q)~=K%k}rr54NShsPMK~P5VDIs&S<(vwtg(T)6qN4R$WQ zFsT}QOEr9aaPPB*l!)%s`}FI!Wkc@wY^$mU+<%&sg3p0ZN>s>)$+yNlO&}$n#a03% zxX#3qq9THcJfu4nd*~uB#c!`SvG-JPKQ=Fe4=L<6Ek;Ch>@J)mO{e;ZhU-Rh^w#>%4s%&ux{;IzV^@+#@@pA zodKJq34Wc+{J63KurrZYEZrbR`l2W<6}OeUBMI>A6~R)!9SZmfstj_Qg)gq)SYVyE zg(+aG=`7#=sS{O0DnP!G(|CEeuv)AwPh+QWOu7K44^13vr3~tzMA0%ZiCC#9rSk<8 zs|yF3pfr7vC0@;*CGE+AdUurTG}-sEGvLX1GL3rLoEiS;U2E*Uazl6==f7-~o$nfP zr@_p0U2R0Xz%=(S@83qr;2b&tDhkM|L+bS*8_U7dyLBXs&B(-^MLzL8wQKdUFJrDr z?$a^13DZu&KBNc}SwY5aW%AdLGje6k583{XDT904|)OsAty`8Z1c5}82!kFiXvbIl49qY$^#gCEFH3q`LXZB zm|Ireaw2{)k@-o@{+>TOBapMc-%VPi44e8Kq^DDN#lo5Od_0JSO2g3){t(C69ShWG zDqHpr)Mf;T(`or&T25#HnQ5Anf6?x9%S`;zoNP<7j>>i;%ZF+Ej5RyGpFu(>xgy<{ zgU^;B*`c8ha$HL-SIlh1JKbYIQHHiR@#tfvCp<>{u-Y~qvp`Cckd2|*`5&#^VB+z< z-0u8Eac%G$KTtR(@0^?ep=(5uBbU_HB-(6>;qGoJ^ar)jJra2TG_J4ufTRy#u{n~K zr~6BE9~Bl?m1v?${SlxfP7e}985^XnjSsszAIW9fIIL)IJZ&Ga&!Rc+Zgf-{~J#IImO@QlmiSBP`-bI zoxl!&%YQL@4k`r+kKg^H>>kwt^pxBKF8`_>Tt2|i|Ff7biz3HOw_v(7xpQw~QuFJV zqn*W8AB=7Qvdi4{`Meqpva}e)43OD$vkzKNlH>}- z78Skf=QmYu|9gA#k#{~lE0e$Oo*B@E$*Bop73>bF7%fE_cL&*!hFjrrF~L^F`ruaH zcmLiYSHRH5=o~wOx`WDb8s_V=XnVs7ppu1wT`{t$j5R6)EU3Mlg@OyqTdNfov+p+L z?!A12F5*j3g2|Lvef*vTF=}+D`&9Tu%{c26OLKl;gZlOk%#=DdBQUq^Y_T5IkzHVy);-sbQ|%Q8S35|V*@YHTR|$@A4Z)) ztL)!XXOzY{C~=T)$#vE>RqlH6D_zDvRZ&DQ9mh0)(JQvjcH(75=wR2a6wSx>hy3vDt!YzKYFbH>`{pTsTNQ* z@Fv3Hr{85$O9p#5+CjrZqZo*lL6X|@M>Kvx9=qM3bgvA-ktuguro+}lY4&)12xQ}H zVC9PoFJHa2;o!T<>pgF<1+~+;VGIS2(IOzXpDM@#>oe!22~N@U=A~TtG15eK^X2uo zH^R_=MMv7_N1n;XhfFrIGbfi$aWQb~f|;^Z>^2TRW|UztW)QX!$Gx4}8Z@x+iEdq$ zPu$yio@WanAtkz|6DUPY&;haSs!8qXO`@`wX|sSmx0J2R*5~m(rCpMa!||V4gj`*8 zG^@p|6sZO4w=)bwf zW$DT)kB{yE#fZUAj)%zqV5r(%eMC@hZ$Wb`+#c|ZOJf_RCI_sq$77u`rJ@Gsy3-ZM z8>*`#*scOqwe%CN5b^NzRL#VzkClldK7|7zNCd)o#-zjq#quV)ItXY)h#^{0ADE*L z*olieN6HU?aNoVbfqUsD05^o%3?faUCiXJMf1n#n=f-Wr;i~)PqK>Wz?gPiuOTCrk z1rS9{WDNk~I7M-}o=G;SVQJ})!P^b!w6iIce&5_x_Xtn z$*r4zRc$$e+7-@_CY&Zfi&^u-)>nw&)D_hMW{**+a(<=VZ{-Fy-UF3ak2#-5$#LZR zi9Iy{?|^ZVT<>IP7qD}S!J zs>~I~{02xL_$>B1^I)t133z-=O016Rt_<~BQG+t>CCamOy!?Smv&ROvW(kcre zTxc>5b=vy=btTc5mtG{HczMs*>Jfmu$iIIGc%j^4J|~3J7iHOKze6-V0>4)`Y?_a>M&9!uf<8$ zdhWOoM$lxQHEr~g5KU^xV!6jU-?TbZa1Tr<*f^Fkn_8pHe#>@f_p|zW5&Qc%ukGG= z38fjtlpFxJ7Sp5$O7xh!UciVg7osWBxN*X}A!Qjoxq%-P!H_1OQ|njfB7DMJ#rCXd zfBEiu9$Mrry&#yD;=S5CN#rF|+CyJI&OWX&@HmHn^7y&Hit7dC>>nkB%c_>Ui(hRkX1Vs zW;4`}?v}PA8WqFB*x9GbxPZ6G8`~23l61(RE8AhBEiaWDHbW>xUr|w?jir@Nzim@4 z=Jw|mlUFnbBpyMRi-ISb8+cK#GI+=nfWAhGl;YC$N}zitYUTayOpbGeC7RiE*2Y?=McO^rv?)up7^SaiY^gGdO^>PjS|TkMhVfa<#Y;X?VI+r zN(&#Cr+cLdQTDe;X@ul+!{>9msjp~|SEA2M30^#raglHSP4sol*D#u(3uRWXFHf9o zX3@1}S=2kvttPj&#i!GL;~0%}kP2zGk>zND<#VHAv7jO$ucL=AcG7c3WbnY-B!^z# z{CKAKfd8OgCD9x8GM9bQ-&$*eL*2R83vvP`B zdsr08#2qzpo|-hKnVz+9|L$kEf_|qo8YGREh9CTuQ>u+R&(6#bx+Bn`PwrX!y?LX7 zbf+*kVy(Wam^iALuE|c{Aj8!uxv`S5c?I_48gSk2ZmD6&@hhqN+ywKKfx;B*J$)YRY1{H(E~N<(HJSnW#Hk z*~XkBqO!>7=UXWc-C5Hc5x@8Y&r$3QZuGkb@bmcKZa+RH_l&p*EDHvgZ~H842-@GJ zTzyi7u;jpU+S{p|H}fs#qphMarAD_&DFX7f)ZCgok#M$Kxs$L!^QcYcx8;pSNr{{BHvuS8;>@%O$Dr5p z$Ts^E5-P~!G(P)laAY+l7rk$-l$%aXM@RGBV-1koxuTKD$FNbbAxL>q@Hd3WD_QfM(#bIKt+8k3qcSS+oKX{JF|`hQKYj6xxH>jv)eo*g$XRk3?q zg-{sq7BDj&V7^2 zb;F<&Ru);!{4$%_xiac&n2jEHrf46%tXYLAXSv1Y`e>?Zc2uQ2F;j0&sqe>VkadwJ z74#CTS~YcPNkr&RE3g4#YpX6RVqX(1%2bKKOz>WqhHE`gvfhn+)Az+C=GtS|di`JT z)8+~XROSJ5H}lnVh3Fm<=21{|apDk zOtJKgfGS&GhUBp;x+#+HmNI;Jh@{A_N^t<(2e4sQhl3)}x4a-VB`YcA1_>4H_4z2$ zMH=h*``tnpwX8pmy;z<)N4cV3j4nu(?c2|J8HVd`X{;2#Sr$ai*}M7<&Ay7!tJE4i zUJV(q^mm`*$Tar}7`uK?>KKASb+n2W+2%S45Avh=2)icVgZf^7y5q?SBE3dknwI|; zdH%{eb~AzgfpX|Ipqc$YhZ5;t1vD`LE9^HO6g|WHx>>?lbJi3l zjMizam&)yx2(P{-K#}L~f}Ki&(zoRW=j7k%IeQza?Yb<(3=((3L6fM)f&AwS+<&V5 z01!larILJPCk)?cNsWv(`Q-$5=f0d_=MtF!<;ix5+FyQIGO+(EdAxq+CvtS4G##$j zblih;__`#n-^);YQ}X&@L?m>2@I_?O<=dZgJ^2|Y+(Bo>I?sJEWl01uwXg-!gKT`l znY?RnV!wxxSH>(q+~j3m+?^SqYpJhFZ?|5Is4i2+SOi^~Fsl0S3c?_%;IQ*eKK^vL zRgVmiRxKt1DA`MEC8<~W5JP<>ot-d{7B*1{KxchrE)uhg0ULHllz?yBumJ-t{S;XH z(|M~SqB$<`nVFeU+=8#y&WpivmAROsKyS{CaCjeWOXaZN{tY>=?@mxzT#{XC+DVPm z7`LXmAt|E-vdbRL1dadP5Q*_oi|l6d&~?wvcX*zbmR%#KCEKWKQQ^&F#EGWG%rLl= zQ7lmYO*^5X$PVjl>ZabQRrYDubgq9qz+BnuSwwNb?s<)zQ5&Fet$UgPJdgpq^ORC@ zU7klwQ;n^?wXJmt=6q>!0YLKuFv?U0g+6z}PLDv3z%tng(9Ek^y2fH3;{TMytPJx5 zl&yf*^5g7#{qg9&21#v^%x?Ou8%{MIT|2eW7a+f4>C&JpL6(tkrVSI7Fst#$I)#bH z(5#2n%)-a}c5DxWA%xHD-+Q7bH!tW}j2_^zVcJ8XmEtWtYS((xgaFzlMN{t7;mdtr z4|tmJ01gBs>k{FE4@0sCai_%7UgvH5ds~9W`D*)7W^(7Jo6*>?+X=49p;Gvdb0ty| zr!&#L*Bj;<87?q1KbKX-gT_vWDM(_L(;C*sXJ9P zmBJ9%_X$1eo&~RzO1%YQk$TLt%io4FN7@pt(|ziRbR~{e8~OVT))?TIGUlmwE@<;Q zgHqTq_v{PE@^9^_+%;%(){UVjMAwH)JbaA&BUk#$&xA9Pfjs{FJ=zhKFZ z0CSS#{wD89S>U_tPk#ZrKhaj<69L-n4glTSCXf9eV5#0YA$-up0oWQ6@J}e#`V}4W z+6d4~+PlFC{JcnyKhV*8QunWE-mybloDP~$aaNxIc()bvCk(824rnC#Imw(6F!V37 zsc?i*vjF44lHA0AK28>ZPk11^@pqccNh8}4AqO}v=k){j*DsXR`k^=;Xg09JfJEj`G19vK1-DPwkj4ZRC)MFPM7P`V$pKb78sbSw!+AJA_8Jqy54zNwv- zxsBc{-6z%pJ^y_~U`+3ge{T3hK$p_Dk2;98{S&+JvBNdV#5Y=*!V zb$@k#*RO5db8$;x*KvP%UvTd#u&R?kIazg8ZkA`M9D|YB9e|V^P z5GDQjNi?cdXm|B%LYb|pey#5|&^+5$3~61mGHxkpB9tDVWar=7jb)vv$$VqgwEGMq z=a(TUs@Uryp(K+jwmZ9*<8*+Re>Dc0Up18%63~9aT4{aE3ni-6?w>RoU51xnkI>N<6co%FK!7(e^TaaG6GuvwrQy#4HG-(L5XbSdg+P zl8Q6mso)vvyxVom(Jkaf#c*rAHpCmZ{@p9TY5i+NsplqxOpTup3fuIdZe4cX_1eFy z2#xo1>#v5=r9)5tU+leiRMYF*?zvSIP!t3eP>ODv6h%OK6I6;c6_hHWh)9#(NwP(X z0tON3B_bHaLhmJ!CL%&8LLih#4L#HZ2!VORcJKZBo%fwtXJ*ZL=bV{Yi@#VbAm8#l z_jBFX^|=c$!D3dG0YdZJAYLh1v8cj%wfuxDEcmCUm0{&Gm8SwZA&ThLB{j8$gYJGi-YyyjGR2dQ0PX~*ST3d{8^TG2~(-i_0PhoPM ztZ;Bo;X_dERLL*imZf9&g_!8V`TXwp0r8^e80c%oi$sOU)Qb_p!s|Sd-r|{@ErDBa z$APW>F~t$ypU9R(>uib7bT43dvz;!To88T7bp#*Z5$P>xwNpujG7K_+Bq|QQx=g$J z-=-z5{q!SD^ewF~dN0eKJCjXjnq+EWl4CAs5+DTZnmDXk^EKU?&#qda-b#o$5fbts zcv`84;}q(6>e-F7vW;uo8HJ0joXTUKarmoOQVNe!mHg3Lof`OSY+rRc!^RPreW%d< zBOINl29qit6S$A3fJSXDYqcTdSr$ZXHV(>`yr>>>4jubh&NghCcN}p?D=O6Od`I+W zDM*VVyB`Y8rvvYseIm#zGvmK?_F(*c(%D2C`GJi|ig6c;!sZ<6@X>Nv!xS5OuJe4o zu&v@_8p=&|m+cD8#p%&ejutgu>*TUa>B=~7YD& zS$asf)dcJeB#AAY`FI3q&Ph`jnaBB|VmxX-rT%%RGbq2}_CFLr#)0GU?~l(KH?d!O z)bAyXU}wj%opmh0P@o5MO8nph2LBkA`_6r(h(gw|#2J6K-`G*b zgfO>m^n2ZXR(_kydK+Mq0z(&=`J$=!*F>ljw9WJ1YK>m{ zq&pLvr+-|omWI2-Y34`j1+yNUKB{-K?W-{gbC4?ZYC&b4=KCF4)Jh)oLw?Y|o`y(6 zj<26yv?=*iW0c!YO1Yct{nJVz_k;!EJi{a=_?;rzUBCilNdl%vmJ@z=HQ$^H9-SY^ zgkQPUUX)mV2_yqYn~9vw9>ngqZMI!{1uYbHTGMqzSWyR3E=`6bQU&Bb9DKk`bx`WV zjVRtpVV7k+3FvhC#%-sH3WDl|nY=BkCdu~SBAD*(9Bs$ zLO$({H>0}BW12peLlWPjCuvqBUnKIGfU7YkQK(;~Z^XNxnHcG}8#0~O$K`R`>9E5v zmuP=FaM~~X@7J@InJu=iOcD5bZo2h=pr$mr2U57%-r?Q|5-Kr+>Y!&GI-dR~Sgq20(6+^0x~8XV#ZTz?MV*IvoiTH_qywqCh&- z)P@GnbP!uOs3aV|w>r zDbM3{m7aa&cn3wiBRoT~JM&_{c1O}`F8|9rbJb(Zcq1zWwp?F;PlMWH@~IYCE)<9q zoKfHjehqL+bJ(DVBE~CIgHl4IY;5VB%oGcEF}%tI1NP|TCTdgBx&WA<6dkp4*(WXr zlc>LCcBRIo>tlWEHXozDnbZyO$J~Cv4caDcPw#~;T*k+XKC71%y!=2=I6G~I7(}%T z#%m(S0*I5B++f-~W%`{r{^*e2LN0W_`cga9?{wHQbMTv*OZc3?ox^hKA-nM#Tetlg z>p-UK2G=XiHjrCl6;G_=0Lk8SA#0<#2_Cs+WvUV8qF3Hbby)NheLt~1b-@j_ zUA{5JR}X)qC%FQ=2WBskM@7c`KHRP4AJl=a)ki2ays}@4$4OutbKO9otA3`I$k`I- z#gHqBsl1F}8!isCN_XHb!oNQ; z9#d^8<8s{v>42&F*3qKK%%|C*+`KU-s?~RgLIc#Sf)`6N2K`&s<+nz}_+q^$ta=Dq zd(VfUt~v`)5q*}X!M^ipz(Xm*G0ZjQ_Jpj4*xM+03iPK)=L+Q8 zi`~>xzX+q=PsY^BEXFR}g%6#)T28?s5%p~KtP#KWGxW5wtGr$9_utTDSz}sWS=CVx zPQV-X-*}-t=vW(~qMtE%94B>nE!N(`&a4?I1N`uOs@B{HCy8IbQ1_`We_83h!@MFF zn`t?hn77$sW4Lpj+b)+ z5w?k-h!m-&w0OqyIU!meto`0YM1$aAC3e%>dPc1vUv0E!2O2r%$8aC%K4ig6kPBG} zbagk+LAM@g;n{bFcpWZ%PYkN&G$fOimnWpg`O+7NTQap3L2g6Xc+f937^YQ%I2G6T zTiYE3Rzw_su0>(%oX>Mse&;A$l)aZL{&RJ&I~??Sd9A>cc|G~-LzBg$T8sh*(`u2y z>0^`=u#xHDXtr0rDkbC+BMg(Vh{~BKox_C7i{DTDxqNl&*c2)}X&T>5V|WmwCGh!NsqMAXOb3|1cYA3fPfW1fDb!ukI_uw3 zndxZfRl5#Jf7TXF+=oa(49cV~Qp-7Zj^BNJHtu-3%9D7S^ok^ZvwSkRx0l|(Ebqc` zJSa2WdbcqWL^D5HFP<&MY!J=G{jEB?<>Hss-!DyWq#xO5F>ARMB^pr0sIYiM%N z!seCB?iGJ31QcCx$8Tfw+RfwDlB6oGL>&-FP(PEu7068Qo{&bxCC2=b1L>Y_I}w9& z3aQPtu zh&D_WxCN3WFCq#f3iH;@f>~^tE*N*dj8u+M6GvQWyn=c|v+FTUu+Jr)LqSnCGT~Zu z&tRqMsO2p@=irIKmUlNp1@;SS-qI99)v1njamUtQuDy3Jw=w((=h!>d_grdkSI-qr zj+|}U0v-nso88foQ?IVhp6L*h>?q5o1+Y+<<_t6EH-l49-n3G-v#73TG4m<`KXoR9 zZf#qKJO?TL(b2s4d!DK-HqgTrP6ynw3}&{DmF}9>l?hvWXAY+3D^m863~(>W-hBF1 z`jwlYKXf+(;a^Gm^h1)N;b+O`s<&N#k%D00n07RbEv_@y%z*`#fBi7&{klf#icbzT z>xqF@Df=vYpO^Zwa+btnuQFo%ZP5@Sr55S3aAcEQwrGiH@pQbPcKm8b88QPQV94Cp z<5GSuoOV8M%@?4GCn$@`4Tc~5_>p7DmfK}}n@F!HwrZds#5ywYuCRw1nSxuGcK0SCx zIM02WxXqmwBQ{h)v9U^`IErnlAfx0UEoB$23wH7$%csu`M>pH?hvgnv*gf$S^#?%L zbj8Qy+<)b}h5rAEJD?YB)kH8*+(huk0YbKJeFMI`Sy#B#flLCgv z(Rb57szm#=WUkWL!ltO?Iv>#{rx(oZDP!^m&(bsFbSpO(b~3Mo-CWBya2zZB-V5qD zswNtGdLSj!=t!npjT^3NBUsJim7_CzH~W{B)IZ{F%3NE9fK#Z_xjy*JWEMbRkxA&;6 zabKB>L%lJ;z2>?vefy>3^}A^|D(@Di>Ur1_DjYl-I z#l2YRa>R>a&qLv~8IaNBIj8$jXL(zHsd@KIT~F%>Sx1jWIB-!S>%4|znGHuTIXon| z;d_fCnIU05-k0YIIYTk{EokVj$1F1R46dX)KJv@Rqq+9RiG?eu9C<-th6JcOPPKU+ z#@bCkv8yz8OS?Q9Pdt@xMmALNo}z+JdZ@iZ`*0i6+j7^P_eM7(K=-!(ys>}xBShmI zxSHLsvRtp;3@I40VKk4>AvGJ>=MN=x(R8U zGe|3VCDq=fM$4%hf~bFJs}&~wTwFk;0$S#Mg@*mAt!2R8$^Jt)4;mAu4=UU3o3(KF zwYuh@5v-!;qjUk}KCR4OP=&2g5b76_POlfnBOr%+rubtY0}zzPSotu~uJYxj{V(HQ zGv{N?y-}pRwdF~Q?xh>=5-V7pF59Vv zvb9*?8vsIi9LAK%^b8!h9hL(Z_BxV?U7O&M&nNI8TYmXg|9bfu)4HGk2>N&- z!lZY<1DTg08rs-nmx_yz)a+Ct^ zdE>yvP z`WJ2^{W;*#ayTkS;jdy_kX-hAr%zx$-Uxj<{y_41*#)6R?( z{$qRIQuXb2YyFTz zT;z543>Hqw^^XEC(p7Q=`tFU#HkW&R?;Uol^!EbRWOwTF);UW)*HeS!`G#@Z`ZF1$ zGf6;Ir7uQH&17#+Ui9{qRb}v4(b9+Dv@t0P&xYFB`#&5<Qw zP#fZjg&bK3cpc0iU4u^tx9V^ugVp{VMcCaB5wNAHP?q*-XaGGwGJ7h zBf12nBvkfojk@Boxt{gQ{U7rIXD(nSTxirEQ!R5S@CSTnj=uq*M;gi{{UgMa&PYng z|7{p|nx4JXGUw7AC|+YdYro`GIWiPkG)j2 zVmctXYU5f+zJuQ2Te>tZHQz?T`WbxUv=7*5os#iXcfES0VkJ92fJakNQ+B^PxyN(t zHkVA@h1!iE$02m$4xl;eHZ*t>3&KoIGgv4_PDe9xFGjjkkgV_w4~a~9;_jkI?s#<{ zLT^-OwVA6NHZ*)URAc`=a*YdN^M=mGw2YhW?&uL;?Wk@9{Y-ia1C{p4sN#8IytN`Z zj&|>HA*Dy{6LZ5Cc(sHPjnWrf;S3=r1k&9l)1b{7a2!;q4AcDxSC)s2J7k{ zGB=iQ_B3<7I_}Coaa;?%ypyMp$`%-)QxrQ@PI^-4o7NH5;#l7DbD^oVEdfZg57YOg z*()o5Nwb~FK$`viw~ucz05AB4xs6$ZS&;eus&A?PT4_0#g7ny{at`NEsCUds{dEk8dRdHEy@ozo)#Uh?cJ1u-Qd8 zlm>MJs0yQZt)vPXIg@Kb%1-7&E@)qOLt~FY;W7UbLO7}lgpF%=uP!y8C}M~?Q=a}T z`=Z9pFDG)dk&mK%XV)mU=6p@Z!=xOXi*9%7=%j+$YL4lQ#@Q4YqwQ&1F{;5zXEFUI z_lY>E3Y-lz8ksUl=Xgxtti^yT>^uL#LfOSO?edCjmceEq=is$V>GrRL+hWVDqXaHt z@awc6rGb_YEn$3l3a19SUCGlA)%g~;#;+#)^a8xFtddpg5@N>D^izQP_Jli}4@cN0 zRYan)FVwlfxBqD?oCdtnt6_PjAowJg*cHQwSc_?z7>IRISLQ&~cHZ)C64o$F#g{#$ z(d{-CYDVMPPiSQIznIe<+8=C=0_|j?Cxojh6C~dx9`00 zG2AU*zW@HK^KFdmu7*MAx^A3exlI3jk!R!7jdz=KaH_FFP)?P9JEgkXFj99s^I1LL zdON~B7`MFvc8p(p&EC^wOns)8;AC2leO40OP^0o98ta_yBcbNPA^LP&#JHFJw-p)N zt-G3?A^bMn#)Swlpkt86+0Y}&8=HWYPKAm5Y>N$^noroMH6q{nlu=e_Fv9rmr0dFR z5MT!myq%9E#tY9_c!WGL*c!VLU^~^8VB<|cLo^@Z48|QB8Ym$)xDeRUU^0(ne;>Jkx+NeRMKVZ+zu4dxEGxAeboRxBWL4 zp{G8Z)1IyTLE{Y9A5v}-0{a6q7`+(m+eVa6=Dt#elD&{;5tn?vr6VeVBlWF9!opaE zQN(7O^7u~wqxF`1%T=+Y0(IWpI$q{_VRa0BvGw)wp4_JJqS^SMs0%y_LFd$5UVZjk z#W{{Y`~lXTn4X#9Kkwrq4p(XCcmi`ex9HYPUr7F1so1OOw~pKL{E zOC_B37~RGN;aG-1`>z2`YvE0Ro^TmSHx<)x;B(y>gAcLqtYZj6j+Rlcm+S7R?h|B! z1R9uG3?=@4Vv;*cm(O)%hsrp1{>NIXO}v}2g2ku)%+IpFI?te_qOae__U*4-<7ou} zkYhle;y&=v?yR#)t{mrt zph|1I2Nptum2| zVO7~}sCw0^Hgadmd{Zwz>tzt70nu0c7(viP!WH2=_kk<>SN{mQfWjNGxW-%GcX zc2{ba=1Z%jBn&hJH6{U==jkBerC&d<=(h6F%;|@)FW_Wo;_@=%DwQipizSO4gM2Xp zQkyA>$3LP(eOAHv51`Vq135Zxh3=sD6dREx(<2% zCaj0HZ*w_ORZQj!y5Dtzvt?Tp=omG3WmXLNIP&zSkU-<-FK)L}0KGJp1{X`*G=D9n zLl$0sf3bz#)eB7zf*>9J*j?E@rq1R5V^5*C;l<+O*j()Zl_KE^iF0=x(@;@ECg*Qj z0}*CpyA5Y;e!SwtT@huYW+ywwGp=&HZp`w#)CEs&H+$!y5KN$N#%xr*12En?{(+kD z49pZZp0}_h$YhpqzhIQT>0A5a?cCnn$E3c&I69|MQ9ARIZyYY4rNrY0c5 ze7=6VrF34h0uiNgf6)h*QV6W{m8>js{rA_-XY2F?KfnfvrmOp-DZHx3_?1or7dHic z*!M}hN{-;q3B}i~`5D1u-YVw**Z{hh=%=6iISgt2ptXUVG2f*F#2(SwA@(m!Lyidz zF6^L)qyzOohbz>(9ftu1tD#o0pb1%vV!FaC4?b=vWx|cRj-iRtQNzg~5KYfz*bfE} zvYu#rN{38J2>POaijC8AMxV_&C3MUd< z{=J3-clu>$dsyeAfe-GHbnltagur|Vor(r_#1E*)27p$;c^9}jkrWycvvaN%g#0^) z)@Xs&dW9*4{NxtcK*YF*3ZiW}9`p( zx%a>5aGp0xubCPz$PaVx&>Slwp3lig-tvze0K-UHVcls_Wf^MbCF^9;c& z{bzo5!8*o+$Wm5-)yckyrp4{y4R2tM_6D-%5|Nf{cQIus%AN7ip_wPnhp37`By2M!+C7y7vLVsF9j zg3;)@kbC$lv4euPy!vU*#I%ysA!Q3NS)d7WF^m?>YHeEBs;czh$HY=RV8(3_kwYh} zgKTwKJiU0qbb;@VQ{l+nwF+qH8#~L35HydHvQY$}K|bHdMom*4fBS6*01?^y^3C~4 z90++ZA%MQO;*JXT3vQ1F)FxeV0!x%9^5@1-aA_Qv^>jFkZw-axvqW-bvRIM zBne=RD8rD8-!tmYr!Oql{C7ta)!-GwAx0b9C7%-1sxrZbGl1$QX2P;}SI{W<2T}4t zT1#hog$cX)(+3SNn$~%M%kWft1FE&}LJS5#%Jo>EVe3 zS76pIg@MJZCILV5i=@)e>5s0{cZHYXZNU}CMwjZdM{l6q0#eO1^Gl{sIcT-eHV!^?YasIv$%96v zPk>=x?&eJ0NBcA7ld3e)LpJYlec9Tn_BAWFLTZ!F`hhaGVHnN_ZMU`W`X8JFtgm7f z?C1IF84VC9u=e#mL-Em*2-&TjXSSrw19f^^V|-&)wBQSSvSJP$1)rdk&5-64tSPCr zef-fQe5KIEp3ZaETD9+c@{;HAXBIK5MRX!*BONy9d5tT(Jt|w!yhW#|s^nAzBMGKr zT|*+zgW&7UbKN2DlH|>vL*z-d375KA2oU$N)XD>e<>E@lN4}4>QKZ zHAOz;5k%bnkK3yU0ix+lKI=MQ1KHU;4e=4g`E4iXia}pO#C?1zQqbi6(jD%=b^K*@ zv{nHVa^G6bB8mE>GtT=rY6n&3-1(ZhSl0CcR@`#tApcCyuVfxT$JADAQICJHGRhZs zdw`r*X4PRfTlJn8LZ+(7kPo^EryJS^r*Wk+{ik_6Ak2ZH^7Ms%hI73}yMu6(qt|_Cw|`64 z6oj!Cvz?ZHlMhamJkNEmJf(XsZoLeXXVBBn)4ln!p@3)f>v*h+d5T!V>xiYuu1W^k z=FUBUp`tw@mqxK!_58{esIJzhQg)b2*t50RLLaRr74 z=`zInxsWDdV>HTTmwblwWu4~ZrQ~l481Ct5K*$W$HAuzVKa- zDh9KT$~x<2dw;yjn`m#=PG!ey*d%FjO5+!iJ3C3s=`b>UM+tG>V1uPwTlX}t?mN>@ z>I3q|jnda8ve~&6znPTMQOhm*kDU>?u*{jv&sVngyBe{zzN>c@ILr*Zn$&#@_7=Uh z?L$Ukbsxu5-|nTwJklSB9D6zDSuguxFU2re=vIb3*cL_MqNtwph8ZnY1vpMJ4pSOu zswpmrEAcimw5wq}^sHFJ?O&I*l73moo*Eo2vwMB}!evp!OTn)<-XHX3t``z$ysgtb zEtd&5V;N?;#WZxNJ28Y?qm53}JtSZHP8YR$4oFfwtr@C$DlFh92UGVeHOP>$E1*Q5kumwW#x3y2 zc+jPj06Tzox_UZX2aa-dEza+gk#iC`C);)&W~2du2b>b1c90HstoL4bZ*ufRExEpi z*87DOTLD?=_k2~2(b)?1ccpQ1MCdEA49C-TX@xPqc?8B4#dm&IIZ&bI;MdlJ{iqS* zoBmTU_G+(%=>E(d2|khG0%nb6ATv7~%CxCAn6%GW$NgDZL`2h18xCLwrd*t4D|cQ3 z`iF=Q$TUjpL*D0h3WZCD_6J@-lB(pcI(7}1m~<)kbC;cr&BNVm^+(!6-sRR z(3dzi-4*i9V38Gd)#30>l!C*x!K)ON5h8b*69l;T=kZk{Lw>8O33<1epI1uul$=>z z)z9Wolry>*NMp3k73^X(@#ghEgtlP{6UK@!*&INR~0^)dM%F8rn(e^wgX zOE1-w{VCoz9(qgF>c9VFKVSoz;w{(lrnmomJwYiDYAXHzmlOJxh9#B9|nU7o@p zPc%LASp3iD=cw$j>T9K0S>s@RSA6_~FOnW`Dyv_IlN>hAZY8z2nqph8>% zh>sX`4urv(_q=I4IYti;Ah99N1TYQJ;iJ(xk=sbNrMIhC>crPkl){3>T+s!jhn=S0=!F+vh(QjfaB@2AT@Ifut0k*WR z9PIz+t%s6d*C^#_fgu)n7t+x^{7J2zhmMj#RB&p^*Do_>-?4@Bk61NN_C4p8 zRYZWkyAJjDT3ZHzg_MaXeBaqi#>F#hcl6!yy|-cdHv zsyTk;_(d{$< zIe2IZZ&{G+zgAb?OSLkoYbjoElEJAClfc8>5g(V=3}<`M^cxu=Wq9&Wr%uipa0&m4 z`q29abf*8`86^6(2I!~%{|ElBUx%u$df}Fm|1bvR{T=z2^zBQ2i}kNixNf)3zvPza zm;S#*X8M=V6}bKX?-?|*CzL%Z1&F9ddN)6WK8xXH77h%RBQGkl(bWKmzh5Q`ool~0 zabaguL_YkWcTyN25Pk6#XsrS*tPO4C>6-+4=gExzZqTof3lLs`1ot5J36(VxzB}-V zcIY~TA)PICaZK4+4{)z)03!@#pX z%=xg$b9Pwn%|f9gjmC%FT$H+pY9@c?{9G<4jBwyBl!KCBr0*#uqg}V+$2L7PpMXH$ zpX;BleB!U_$GA|(>@Eu&j7~WT+al};=Vou&iCA@nv6wFRSnTlUuvdu($giO8Y?=#@KBDd={%Wk+!*&+?snAcaH(v`qV9 z(50%T;7$>g=_dcL`dSZ#o$gNq6E+Cjq|MLKNiS#Mo06C#bY_t^A^%>9+C@j7xgql- z<2pS*kw^ccYbq0piM8S5xa?(6BFj~k`Z1*W&?YWbtK;Ms#|%|wNZfS|~-0KDbK z#nQ6k)Q$>)`5^aRd_UsHNC5^94CTJ30a$BNwN#$9SL<-5rK0bcx0Hv3-RXXM;h}_& z1^tvH2`fI~n3Pk7et@kp99_bBM{~|d4bPhF-1+9tJh=nBRKeTbuafFPe|1 zHRFajtYS;~@tGj$y(Vuw$ZnFku;|@a2Q1-q0lzVGm^!BRSyd;vueRi3Om(o;P^L%H zZ zgo|xzw24)&k)1_T|A@xOR68@B&)?Q^j}S4ZCr!>@X?TeOER5xRe2OAz$@Uk+r@Zh zY+66c`!`&KO+ul}!L;-v)G;e|l)muLeG{>I2Q({cfp-v!Syo$23w{I_EEmbjpr_2P z-u%1sL7!N3``Ihme!)wE*WNHaVL~#cGhH$+Z2gL%Pc1Oj>ZLzPj!nFSZ?8l;2ZY^;;W7COAPIymMn#GKcWuEwFgBlfp(;fY-FD3 z5Lk#r>ryO=%&>lc_wRgY^wL1orBK)A+SzOUlz|9Iv21Shj-YaZHQjwDZ$G_$snl*j zIbq>N?0bFc`BQT%}lZqWrIQv-|Vj zi$7%}qiVW31;?nK5*RfDAE9RCc$y07rH9x_0m0RAVw8X!a~)G<0BQnVM~Lmud0y%Z zDpJoM4tzny@Iwqy14eK>to76aDV1U^W zCx!JKAZz4`Iw&Tc_`X6dGrX>676j(qd_giV+!*Z6uXX!tH>wA7MQ`MM2d@6%ojk1@RQzJ$_j32S&~tQJ$jAp*WY*E76--& zHFrIqsZ^<0rVAknqVB+YufcrZi65JBzf`1UO2Jj0y{y8wsVn-3b%^Ytps>eb&17! z^nMu}2OCXhz%wjbS(4)x^L!AsNXqh%DPFf`rTo`zeR*iF`3mr9Hfv`9jqbVS7+8sH z;(wk;k`ubzYab}C&uq1Oz7i%8xf8>TMbE#1FumH__i-GxzJBDG&mT^?hOPtf@YN+P zt~=>behaDV3FGXp#fYvLFf2DV`C&ic9~GMmpQjY zAKKw6PSY5~P|F=BT=`r>;`5xhQ5PVX+NCKIn14OP|X%#1+86zsXH&u z5saVBKAQpt1;)(y;|qgLvGCx26qsC8X4@oW+|7P1boEBP@O1RDeP$HxbJ^r}cX>0_ z_Ah3|+EF@&V@ql{5=^=HRiVt$D-@l9(0}GnXVI1H|25l&|9Is7C$3C~rfuTK&Huwp z_ults=p(w~&d?Y&f4#7RPC4{(Bw$?BU*GsIvnu~x`usm=m;S%we|S6E(-kNhlZro$ zRu0JW*Y$PvCvMn}uDUq}C&frOD91)8<%ca0GFgHBlX9_2%@KQ#8BE)YR*xK9p%{fb za`KU&vR;>NT?TbHj*G zFBF_SHX<>b=4N0{+=$TCaeCABi?L+W7zJ2Z!pdCwA|X3%qy__Z%mpxAsMNpBYh3Rh zdg#-s2$rOLnWV;zl!XMzf#`zBo-3H2m_Erm&N^S|2);(QO05063_ueYQl2q0D;f0( zf!0xmcLX>aK&ZaC(5Lcw7yw^VPEtbar#h!etG;!#R%*a*!J^yyAN>4o%)lGWlUN9Q z4VTP62#rH(zdy5?qDwR1jw$8EoItg!^IdI%5O60J;cv=T}TOuYi_pS@`UTJgz1V+Y;cF4qK z$`HNHNAJ`$tvCD9!1?$A@mc)d_&GYpmqj|SbsQBLxfrOD4n`5u4@PObH1giLrM6_{ z$s*>tNDx6$p>)+^%f51(t$wO6gV^v&kvAbutQptn^cu1RX-gUj^AB-?{+wQdcPXb6 zJq5PeD8psHOH{?3Gw8ZP5??&{q&=&PKy-N*+J2%*>P;Z_@sZI24n%lho07P|cEqSrt2`)8_MY6pb%@GU?@k(z5vvAtjBlK!AB_fAuS-KZv+?hg z$h$*t#%+gDGIt=WLNf=9N+X)xL0d?a`jA9-K2|oqOH&85?7^o&3kM(9zgZ-Ejm*b( z{t@?Nrp*k!Eo?HW@DT#iEnqkHJ!|%X)jO@a!XZ^dZNI>}iq}lgMRQ;2@^HB>IZ&bW zphq?<#miYQEI?ydLG-i8?I$c?r5UjV%z{mHM- zLbfg_gI4=JnZY_chg+=)P=vGlxvLa0RPPR18_am7O978M4Vu?WqS5N6N!}{S)XIL& zs-4BFv;Do=lJ&_cb|_bN3@5L0A6OM9o!9j=W}fD#R3pPx|{B_D}DaS=r6%w%nP=%rM|n) zns7F@aFuQyq;rukj*AqSXBw?<(M`4A0kC;n^vbLS&?RP1&L5=;7gPZ~#1GcNk3MPpQ#KNswH>i zZ8%(@e!^LI@}c#K(f8=KF!h#m3_qhM>CuEiG^I1#!-!H2kTWh@-8I(yp_1Q`<{l~` z-O!MjAFE&A`Z92(8`EJhQ-uV2xZcf*vYE1RUDa5aolRxM$C5?2vK(++0IG#I+AWJ8 zjFCAjROi3o=s32yGGeEE$eZw3av`v8Ct7CozQ6J`R;B+wWckSEBK2vP`H->-dE|lF{R*1#A7L-O) zh08u*ytS6mZ?MdA{lNRFCOrN66KJ7 z@0wJbT<{vZos#QoxY)AA>{@7TEX6xBW(!rvaF+o$gj`dkh*QtZ*I6%Qj^R~l-OB+6 zhw#XX5&g4$Y@m@}$nD?qoHdN!UhN+zhY#g^@i z8`uI5#+YBJWL@Ov_@!kqLFO3JZR7WdSEWvEQ*QKw&yB*s>CbV13L-uh46w}n=pARr zjj>h9aVoR6I$hV8m%eksxDT~D!K1+0@$5y?WG#|bs9s)oe1kncPERyU4>G?J?#6ei zDAVRF>-1)+v0Deadd=BmKqnvq1Wj-39BkWdu6jb?B= z_f#<$Ei!+Dsv&jr>m6n7t+4T2zrJe100aiD((?DIkYcUukFZBBKDr7g#yC?>3X~!E z<^tvT_Z?_?pgPAi=eMBD-}qF%IyQkEk7%pH)j*gYDEPyK)0OACNzHRt2=W!{)@zS8 z$-SMZovoq~Xwx!!WO$Vl?zX|xZPbg`y!jQ{Iiyq>oI!{l^oAE@+8-@dcggc(m2SxJ zc=iQ5zVB{Y1Js0GikALZ(TPjkes_mwS(WC zBfQxT>Pg($drk~f@AdgO}^y$Dj*HBu-g=8E?B46o!_2=V-)FF zb#xN4_Q7WG2i%thIl5T2K=6MjZ3jjf`|eqreSz+E7tH_s7Mn0~wesuq4U^z64-U(j z;|QMCkd?bEO3bQ?hZeFkYzFisC71yl`BM!3K3x8=Kp)r1#)q7!clIVGFB$cF(Gv`# z0&OXVu|EZ&aVh1Jusnkz!tZDnhbRs7wF2_#dFAIydMlE4ap;>4KaAiD4y9Pr8Z<$j zYC!y)RNjm4C|Z%MyH&ud@$?1!u{t|efPip_nfg{B0&cOF8(^g`xA|%w#3$n8kdU2` zZGSq8g+xNztAg~9?yNGbIM#yL)iofqUjpu~G}7IxDiFW2W2dYY}_J6Rn*(qj!-MLgP1PDpe;m6X|_q9#u4%$r`3+0Zlz+eS_$MP1ejJ3q1*MTJq zQ05z?s5RK={9OJj3R89R>G)vJ#zYxUL2fgM8a}@DT0cHU5345>3H<0kd#ok#D)^Q@+3k-w#z3=V%_Ck=i0ruVC80>}nAiko_%a1$UT_brO=GCA(WvDjk-DCm+h~wt)oHfAuL5N*TUU* z9AKZ+?nkMkE`8T#K}@XbtUv|lqrr`h!ytW|uY}!FjX88<;VV`W?q0;ZJt5mhts$9V zVFU1&TL|QON49lgilm-dA86*z!Hfoln!OJVPAt6l@913=XOm#NBoRz8Vc}ttQanY( zdaoo``+hsZE355#7fhr0oEaCaAD`amZ8%f={$AwH;_92MX>EKfbu;qN5S-!CN5>p< zwRPXyB)%-wud&8%)F2ST-uWoBt}&~pbx``C*DQycJ%HtG$*ha7VD;-@jsrGN+8@IZ zpmjo|CFgkad?%(7sB>-3ICMV}thE!_8XV;gKEm0oJ%Q_gRcfR(yECgeS4*H6mA4k? z-l`_sSgJLK^_E7vq%ywPET{2uhLr;7Rx~zWsMcX>|JH{}$esojk-AsHB0i+8kOmqsahZ+*Y>*uJ62sS#MI8e2Pzp`qQ*ByQKj6#UxaH`Ydwn<`Mk z0(+O+=Gbb%*jnYLpZtEv63S_rUF-2%Ny}A9_f7WFkspue1$K*(lJeYnFEg(uPGE?M zACWojSaGM!e9N?pT!Xe8{Y8ch=KEl>Ap*5vy z82{wMa?l5}>%l?ppD>)bRYk*Vz8dpM7=*o zq1bGl*&A)z?XkCBf3Kf*y2hM*10^-d`vLBZO^19!`wXuB|9`ji9@E*32IankW zaF6OVgurcr0dYmTOL`t{exBiywuShh2qCE&h~R=4 z((73S-$dID^pyuWUve$6Ukd-23`sF(VNCXmb(-sQRp7<2I0W@*Bb|=Wv4bsRw3yd# zzI05RBgW>!pcKwm6Oea(E|OdWjZCNU&0lY#PH#5~Tv)CP*#`x=<^;52udgDAsipPK zRd0o)5Fq?$5U)kD?Gsp5iF$tzyB!@cL2|@|Frz`CUBnuI;vo@bArYLI5G=vcwZ&#p zxsb5kwpB~oQf6K38f+;KYlk48)2>2xKu`=x6Iv4zAS#HI(;m1R)pQ5;))3}0;4dpc z5u5J^dFy@7=I4fMTQJjEgu}tR_cz4~pkR=0<&SSqW=Qp)`CgX^bA$kr)|mZe z$YFcNKWkYe#lb==ZBu`V;(*^?+~pmCy5bGYw9X~}2iiPhGS zz8Z3A8s*aa7k-61X4JR+j^2rxPpJ-yv<712XLWVS#f%`@J(9O6n5rjl0&9;ha2n)q z3BmAhNg&QHsHqK3;KI;?ucvt8Ak?I4l0N{2B#d#FY?_pAUwsaeaQ1{^q(#dlcixcv z%xY7)KB1kUJ<{eFg&AGrr+nCxcK@^@0g1aG?PkidF6TDH4k6Y*@C@p=d{ixi{v~ne z2#}BpJ9GX$=l6Nf=N$j=BLdHJKlgpz*Ydr-*L4S5 zHcV-VgHxP33amPj*U8jmjt33sA(N@$BT3h&@xU20OrU~DH$gNhW4Dtvr#Sf2<7{J6 zNAhz<=ANb6IoM*%)v^=1DNP+Y5(n8k5GS&()-OQR9~P z*wxoVz5{}CjPj*TIiIRpaE_^9Q`7o-*fSXHxrtPljp}au3Hq$ zA;zL$e!pakh@=7zpWXxAGyODXL&ck)vt_E-)B~v->+e;YQDAQpc+%t+vFDxXQaHUq zxU2tC^TOvSr)f~aUiCtOQOX9#3X>OSRe@Z=Spn)RXb^Db*!ai1?%t<=3gVkMLpSiO zH1*S6!CFykpUDXSYX_ePS9T#iBEfi$O1NgU?I_#ow|j(3i+V>xV<@*4%sajvS-A`uMs7T33U0 zbQ5&9>|f3VF%jKd2Tx&B!tjvUR*>{}Lrkd;Mmced&I4v#W$90*x9=M=R?(? z#dlU_1+Co(^x0c{apPDuNPA86(#03Q7!Ri2SZ?!`=EarAx?k?4dGX>n$ma3ZYwnuA zmjspk+ubI%M?+{{!wiBY}f9l;ON_5?_w)w`53`EN()c}YU}UVsE~Vc1=(f+7KGPrb+Bo<5Z6|4 zVkf1o-^`B`hmLLijOw|8f~co^T(U1V6p$1`RJ9w>-s@WbHCnPEb5Qxor;Mnr58k9h z?j|VVZ-yE;d1V9fY=YqsigqJN+!bzv$QfE_t$+N9S`{NeeWH4EyoyZ)>B^6@LLRB# zaBjqE&6kZ+?*eRyNbA9cE-7QWvSl`VCTS|8_-*#yYI!TKbx%)_xK6#0B^;nMH_u6Y zz?6DLXEAv&U(Tr`-qW`Q*_c@PU89k|UK4Gw6CYoV4Vqo=Q#rw-5aH1suh+GZTI@fT z|GZQ$p<0kB~p@J{cyC==$mJjb*-EoAx3_o;gK10Uj-;pA+6Hk7F!y;5!QOE4N3+i`GyV@(* zN$Ts4Z23h*oNiuUVlm`7Q7gjh>lX?G&fa0VirBEZs=2AcUN|?#@u5)4PeIwIl%6G* zC(V7}1DvMHPS>D*5t^VZi|UWJRb65%urT{xT0l$qTcIRjI-G-G1o*NXc0LYr-q%0v zPfBKr)G1?^tFbmup$+-W@Rw~Ybrbq5WuV5dIlj#$Jq9`u&v&Th#+f52?B}vqy0C&~ zl9{MVnM{fyCnKc-0eD>X`NLab9)}9$%uB_2iINSHX-~g6N8_LhF{~v9X?Hp!bP}GA zIv*eD%+tJAIvIA46)?y828au!+anKp)x+x;+P)L9d55 zGFnJ(>iJ8>S#St4F&Jd;X*bg;th5)IV;(xHax+OX2DvvMDbEwGP{+oDbOun>u&Vnt zl{v`aO9DsEQSQE$6#5*CUZqmlb~NMj6N6<7FOWJp1aHOK=t}<@V!b2OzvTk0e5H-m zx!oV%@ytlXC>Ug5bKK;tB~tlThlI$P-m$e0Z^*WKDSRpDe{d1lzdEk@aqWuGv%E3! zv$)f63Mm}PJWaqt2lkK8P@uapTaYobbQrbYvDIIDW$_9d%1ox?)u^R4$p*t`mgZ@l z-*s9v)nCr>Wn~cBj2O|o4|Kh3B*58-sY#exd_!o@FQ3Y;V0pc)U5jAs`l@G;7#>)k zl)^#TpO88BoFl4=DWG4>n}ye56L&TnADLQqj_JY5e{Ze4{CR@LX=DU?rC-`SlY8t1 zmQZvrQgEm>jP7!Q2mmC^75Ra_Xbt*c?xuHW%M1`3KxqiyIPx=<9oSW(HPsJ6^H&i61T$E$) z+bZes>?qr|?TVv&a=V_OlM|zSZQH61wLIX+dB#YIK;-X}epLP0U#Vya#rFh=nP>xa z9@z=3yWD&bsoegeQK!G+*ZeVGe+74^c-9JU)k3IC zH>DBkFy&m^o}jpT2I#n2p3`;=5*AX8{ZQ4hqgv}Q{O2>h$BVzs5}7I$%?p9oyR>{k ze!9X_%!K}mt--^u&X_@3>P*%1zfC89bEnYK{Nw3sPmX-bc{KZsuXeJmsHB8(ZjObk zyNW~$P{tD=wXd`w9u_pkp z6z|>HWA!5eX1g_N*ob(kXKo-xo%HTf=kjj(UT!C2$G~Qi4lUxdpG#bSRQO!V*T6=7 z@loSR>2=?t-RAXus5T6ozemSiXocsSnmnC$7X=Rm zS&NTw@s6<Pn;xiuW?2`Fx*d%D}A}H!9Kk zN+~9xA}b?|>67icygt5fm>QbIaqiQpD^ryhBqw!;H0N{R^4JFM3OjhjolNDs9_cH0iRFA|1$&I6EqiZw~7R{ zpLc#k-!_;xDWAf(-5#~5V|o}Y^W!S)I5sX-H{aDJMvJ2E65wER5M$)GD0>A|gc~Pn zr0U8mjk6y6Tnm@DP2im~d}g>XaU!O28upymFTBMtIJvYH5T^Fw8m!cZhsfVwy0 z#VJg{dHYO>(O3Qlt5pOmLHqabg;s2ZYGYhnTKc(hDPMf0mB0QuK{LWXq{K{*y@jdQ!tqYUTB%kbo3(f7 z$w??ZYb-Qef5rMKnJ_V?y;k^o1bv1e`+zshzt}Q@acU~hk%;Yjq{f4tMxkqA1`+>Ac;FSzJgpsL`Nx!gc;$}ZlP~kN4)Xnf+Y`%oSz1Q!T2f*d=B<<%CWgf!x)YNP zLW#(>N+CuEwxe(!FliRhGyQ7Et*7KkfD=0wcd-1K9PFi;X}3O1v^~3n06rrPjud#J zNl7n*tYS^046Po{vh4fe>bSRex^ECG$|>^e`Jq1t)7#;Q%3WWLlT&}~$rslb8m^lU z?TvOe4`Xh8X}sU9BvED-MU|h#3jGgLDk34*UIP@#Kw_+mFBsoLPBU->+|)55RdGr@ zI0(3SPN>c98aovNQ8$l0WT4Q=Bg+66Tk7x-L5xB@f${<#y5Nb?RbJJR$p*oS4{LE9 z39-XgwN_ije{p2`uepa&w042cl%{UW{05iNl>oY=yZq^HJc-~|ru%$!O`%JEZMl}Y z62A7``JerUU2>&ghc)FuxK^bXf3BX?*S;bTKHUeQQ1JCeMjrrwx)|(>D6NjJ-wb+? z4h}i2_c-@>OsUHA`#ll`H~_4>Z+n^>E-u>gk4v>dR*yf>5EbUoBbRf7#0s!lktk>s zQ&83Asdqtc#R7C1AiPck$ITye4(UD*s}BP|qT0A{*J4XW`BQ8>q4F|195HV!%5ma9 zACebKp5|vt!fDb8T`a)&?MeUB2Yv^Dn{n#m~o<_Z!M`_g5SKL9n@ zL!5&bqw`JQeF^ASc_#jR!2kl7W!tQ-T#*^qYgilWh16VT$<)@x03PU|F?`2S)Ym!U zh5;g+*%GFFgZJVG0B7?1FLBQQ0R;aC5NJf=PiW}q-TsOIE>b*rlDM8naj^MjQC#Np z7onns^Zu9GwK+Lpw14C0X=Bb^28ESBSJ6XdAZyOgbPFGd@|ccuFVyMoveBpDi2<) z%xq^5of`$tY*>W1u3#1ec!0gP<-zJnGRbfgZg!@33v7EZ>uRr#O;54b8I04y=-|-p zdj$B5y(_Vh5`WNET4YZG;^?f%x-L0fpV))G7}z*|5An9hO}Kv)=~s(e7z7aww7m zQO};ig8NW%0*uMZNAy>b@OULoNGyZ>q!2B`fgI7XEC$IQ)>52Wbn+k1;kk&+p=Mio zZpaf*<#U6?@!K#uW4D1JXKxm&8XGqxiUM~I<*dP3bj^Z(Mklv=8%LMR73fb4Oaj

    mlrun-flow


    **In this section** diff --git a/docs/install/remote.md b/docs/install/remote.md index 39a3249fd6f..7611ad1e58e 100644 --- a/docs/install/remote.md +++ b/docs/install/remote.md @@ -3,6 +3,8 @@ You can write your code on a local machine while running your functions on a remote cluster. This tutorial explains how to set this up. +This release of MLRun supports only Python 3.9 for both the server and the client. + **In this section** - [Prerequisites](#prerequisites) - [Set up a Python 3.9 client environment](#set-up-a-python-3-9-client-environment) @@ -152,7 +154,7 @@ For more explanations read the documentation [mlrun.set_environment](https://doc ### Using your IDE (e.g. PyCharm or VSCode) -Use these procedures to access MLRun remotely from your IDE (PyCharm or VSCode). +Use these procedures to access MLRun remotely from your IDE. These instructions are for PyCharm and VSCode. #### Create environment file diff --git a/docs/monitoring/initial-setup-configuration.ipynb b/docs/monitoring/initial-setup-configuration.ipynb index 93ea1143dc7..738062f1077 100644 --- a/docs/monitoring/initial-setup-configuration.ipynb +++ b/docs/monitoring/initial-setup-configuration.ipynb @@ -28,16 +28,24 @@ "## Enabling model monitoring\n", "\n", "Model activities can be tracked into a real-time stream and time-series DB. The monitoring data\n", - "is used to create real-time dashboards and track model accuracy and drift. \n", - "To set the tracking stream options, specify the following function spec attributes:\n", + "is used to create real-time dashboards, detect drift, and analyze performance. \n", + "\n", + "To monitor a deployed model, apply {py:meth}`~mlrun.runtimes.ServingRuntime.set_tracking` on your serving function and specify the function spec attributes:\n", + "\n", " \n", - " `fn.set_tracking(stream_path, batch, sample)`\n", + " `fn.set_tracking(stream_path, batch, sample, tracking_policy)`\n", " \n", "- **stream_path**\n", " - Enterprise: the v3io stream path (e.g. `v3io:///users/..`)\n", " - CE: a valid Kafka stream (e.g. `kafka://kafka.default.svc.cluster.local:9092`)\n", "- **sample** — optional, sample every N requests\n", "- **batch** — optional, send micro-batches every N requests\n", + "- **tracking_policy** — optional, model tracking configurations, such as setting the scheduling policy of the model monitoring batch job\n", + "\n", + "If a serving function is configured for model-monitoring tracking, use this procedure to change the parameters of the tracking (for example changing the `default_batch_intervals` of the `tracking_policy`):\n", + "1. Delete the \"model-monitoring-batch\" job function (can be found under ML functions).\n", + "2. Delete the \"model-monitoring-batch\" schedule job (can be found under Jobs and Workflows -> Schedule).\n", + "3. Redeploy the serving function with new model-monitoring tracking parameters.\n", " \n", "## Model monitoring demo\n", "Use the following code to test and explore model monitoring." @@ -184,7 +192,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.7" + "version": "3.9.13" }, "vscode": { "interpreter": { diff --git a/docs/projects/automate-project-git-source.ipynb b/docs/projects/automate-project-git-source.ipynb index 69b2e1e1b89..81594cd1bd4 100644 --- a/docs/projects/automate-project-git-source.ipynb +++ b/docs/projects/automate-project-git-source.ipynb @@ -145,7 +145,7 @@ "# project = mlrun.get_or_create_project(name='new-ci-cd-proj',context='./',init_git=True,secrets={\"GIT_TOKEN\":})\n", "```\n", "\n", - "See more details in {py:class}`~mlrun.projects.get_or_create_project` and {ref}`secrets`." + "See more details in {py:class}`~mlrun.projects.get_or_create_project` and {ref}`secrets`.\n" ] }, { @@ -2015,9 +2015,7 @@ "### Creating a Git remote\n", "\n", "If you do not clone any files and you do not have any git remotes configured in your local folder you can use {py:class}`~mlrun.projects.MlrunProject.create_remote`. This method creates a git remote and adds the remote to the project as the project source. \n", - "```{admonition} Prerequisite\n", - "You must include `init_git=True` when creating the project, before creating the git remote.\n", - "```\n", + "\n", "For example:\n", "```\n", "project.create_remote(url='https://github.com/mlrun/example-ci-cd.git',name='mlrun-remote',branch='master')\n", diff --git a/docs/projects/project-setup.md b/docs/projects/project-setup.md index c6079ef5369..c6cdbb5f6e4 100644 --- a/docs/projects/project-setup.md +++ b/docs/projects/project-setup.md @@ -1,3 +1,4 @@ +(project-setup)= # MLRun project bootstrapping with `project_setup.py` ## Overview diff --git a/docs/runtimes/configuring-job-resources.md b/docs/runtimes/configuring-job-resources.md index ed31f7513b7..eca811f597e 100644 --- a/docs/runtimes/configuring-job-resources.md +++ b/docs/runtimes/configuring-job-resources.md @@ -366,5 +366,6 @@ See: - {py:meth}`~mlrun.runtimes.KubeResource.set_state_thresholds` ```{admonition} Note -State thresholds are not supported for Nuclio runtimes as nuclio provides it's own monitoring and for dask runtime which can be monitored by the client. +State thresholds are not supported for Nuclio/serving runtimes (since they have their own monitoring) or for the Dask runtime (which can be monitored by the client). ``` + diff --git a/docs/runtimes/create-and-use-functions.ipynb b/docs/runtimes/create-and-use-functions.ipynb index 1c6fe909b35..c4b43606806 100644 --- a/docs/runtimes/create-and-use-functions.ipynb +++ b/docs/runtimes/create-and-use-functions.ipynb @@ -145,7 +145,7 @@ "```\n", "\n", "A good place to start is one of the default MLRun images:\n", - "- `mlrun/mlrun`: An MLRun image includes preinstalled OpenMPI. Useful as a base image for simple jobs.- \n", + "- `mlrun/mlrun`: An MLRun image includes preinstalled OpenMPI and other ML packages. Useful as a base image for simple jobs.\n", "- `mlrun/mlrun-gpu`: The same as `mlrun/mlrun` but for GPUs, including Open MPI.\n", "\n", "Dockerfiles for the MLRun images can be found [**here**](https://github.com/mlrun/mlrun/tree/development/dockerfiles)." @@ -319,7 +319,11 @@ "id": "0c5adbe0", "metadata": {}, "source": [ - "If you already have an MLRun function that you want to import, you can do so from multiple locations such as YAML, Function Hub, and MLRun DB." + "If you already have an MLRun function that you want to import, you can do so from multiple locations such as YAML, Function Hub, and MLRun DB.\n", + "\n", + "```{admonition} Note\n", + "In the UI, running a batch job from an existing function executes the generated spec merged with the function spec. Therefore, if you remove a function spec, for example env vars, it may re-appear in the final job spec.\n", + "```" ] }, { diff --git a/docs/runtimes/dask-mlrun.ipynb b/docs/runtimes/dask-mlrun.ipynb index ab16fc0e55d..fb5eef30c7c 100644 --- a/docs/runtimes/dask-mlrun.ipynb +++ b/docs/runtimes/dask-mlrun.ipynb @@ -284,7 +284,7 @@ "metadata": {}, "source": [ "%nuclio config kind = \"job\"\n", - "%nuclio config spec.image = \"mlrun/mlrun\"" + "%nuclio config spec.image = \"mlrun/ml-base\"" ] }, { diff --git a/docs/runtimes/databricks.ipynb b/docs/runtimes/databricks.ipynb index 205f2ebb025..225c5fdd7fe 100644 --- a/docs/runtimes/databricks.ipynb +++ b/docs/runtimes/databricks.ipynb @@ -4,7 +4,6 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "(databricks)=\n", "# Databricks runtime\n", "\n", "The databricks runtime runs on a Databricks cluster (and not in the Iguazio cluster). The function raises a pod on MLRun, which communicates with the Databricks cluster. The requests originate in MLRun and all computing is in the Databricks cluster.\n", @@ -14,13 +13,29 @@ "\n", "You can run the function on:\n", "- An existing cluster, by including `DATABRICKS_CLUSTER_ID`\n", - "- A job compute cluster, created and dedicated for this function only. Omit `DATABRICKS_CLUSTER_ID` to create a job compute cluster, and set the [cluster specs](https://docs.databricks.com/en/workflows/jobs/jobs-2.0-api.html#newcluster) by using the task parameters when running the function. For example:\n", + "- A job compute cluster, created and dedicated for this function only. \n", + "\n", + "Params that are not related to a new cluster or an existing cluster:\n", + "- `timeout_minutes`\n", + "- `token_key`\n", + "- `artifact_json_dir` (location where the json file that contains all logged mlrun artifacts is saved, and which is deleted after the run)\n", + "\n", + "Params that are related to a new cluster:\n", + "- `spark_version`\n", + "- `node_type_id`\n", + "- `num_workers`\n", + "\n", + "## Example of a job compute cluster\n", + "\n", + "To create a job compute cluster, omit `DATABRICKS_CLUSTER_ID`, and set the [cluster specs](https://docs.databricks.com/en/workflows/jobs/jobs-2.0-api.html#newcluster) by using the task parameters when running the function. For example:\n", " ```\n", - " params['task_parameters'] = {'new_cluster_spec': {'node_type_id': 'm5d.xlarge'}, 'number_of_workers': 2, 'timeout_minutes': 15, `token_key`: non-default-value}\n", + " params['task_parameters'] = {'new_cluster_spec': {'node_type_id': 'm5d.large'}, 'number_of_workers': 2, 'timeout_minutes': 15, `token_key`: non-default-value}\n", " ```\n", "Do not send variables named `task_parameters` or `context` since these are utilized by the internal processes of the runtime.\n", "\n", - "Example of running a databricks job from a local file on the existing cluster: DATABRICKS_CLUSTER_ID." + "## Example of running a Databricks job from a local file\n", + "\n", + "This example uses an existing cluster: DATABRICKS_CLUSTER_ID." ] }, { @@ -52,12 +67,25 @@ "metadata": {}, "outputs": [], "source": [ - "project = mlrun.get_or_create_project(\"project-name\", context=\"./\", user_project=False)\n", + "def add_databricks_env(function):\n", + " job_env = {\n", + " \"DATABRICKS_HOST\": os.environ[\"DATABRICKS_HOST\"],\n", + " \"DATABRICKS_CLUSTER_ID\": os.environ.get(\"DATABRICKS_CLUSTER_ID\"),\n", + " }\n", + "\n", + " for name, val in job_env.items():\n", + " function.spec.env.append({\"name\": name, \"value\": val})" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "project_name = \"databricks-runtime-project\"\n", + "project = mlrun.get_or_create_project(project_name, context=\"./\", user_project=False)\n", "\n", - "job_env = {\n", - " \"DATABRICKS_HOST\": os.environ[\"DATABRICKS_HOST\"],\n", - " \"DATABRICKS_CLUSTER_ID\": os.environ.get(\"DATABRICKS_CLUSTER_ID\"),\n", - "}\n", "secrets = {\"DATABRICKS_TOKEN\": os.environ[\"DATABRICKS_TOKEN\"]}\n", "\n", "project.set_secrets(secrets)\n", @@ -76,23 +104,95 @@ "\n", "function = function_ref.to_function()\n", "\n", - "for name, val in job_env.items():\n", - " function.spec.env.append({\"name\": name, \"value\": val})\n", + "add_databricks_env(function=function)\n", "\n", "run = function.run(\n", " handler=\"print_kwargs\",\n", - " project=\"project-name\",\n", + " project=project_name,\n", " params={\n", " \"param1\": \"value1\",\n", " \"param2\": \"value2\",\n", " \"task_parameters\": {\"timeout_minutes\": 15},\n", " },\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Logging a Databricks response as an artifact" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import pandas as pd\n", + "from pyspark.sql import SparkSession\n", + "\n", + "\n", + "def main():\n", + " df = pd.DataFrame({\"A\": np.random.randint(1, 100, 5), \"B\": np.random.rand(5)})\n", + " path = \"/dbfs/path/folder\"\n", + " parquet_df_path = f\"{path}/df.parquet\"\n", + " csv_df_path = f\"{path}/df.csv\"\n", + "\n", + " if not os.path.exists(path):\n", + " os.makedirs(path)\n", + "\n", + " # save df\n", + " df.to_parquet(parquet_df_path)\n", + " df.to_csv(csv_df_path, index=False)\n", + "\n", + " # log artifact\n", + " mlrun_log_artifact(\"parquet_artifact\", parquet_df_path)\n", + " mlrun_log_artifact(\"csv_artifact\", csv_df_path)\n", + "\n", + " # spark\n", + " spark = SparkSession.builder.appName(\"example\").getOrCreate()\n", + " spark_df = spark.createDataFrame(df)\n", + "\n", + " # spark path format:\n", + " spark_parquet_path = \"dbfs:///path/folder/spark_df.parquet\"\n", + " spark_df.write.mode(\"overwrite\").parquet(spark_parquet_path)\n", + " mlrun_log_artifact(\"spark_artifact\", spark_parquet_path)\n", + "\n", + " # an illegal artifact does not raise an error, it logs an error log instead, for example:\n", + " # mlrun_log_artifact(\"illegal_artifact\", \"/not_exists_path/illegal_df.parquet\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "function = mlrun.code_to_function(\n", + " name=\"databricks-log_artifact\",\n", + " kind=\"databricks\",\n", + " project=project_name,\n", + " filename=\"./databricks_job.py\",\n", + " image=\"mlrun/mlrun\",\n", ")\n", - "assert (\n", - " run.status.results[\"databricks_runtime_task\"][\"logs\"]\n", - " == \"kwargs: {'param1': 'value1', 'param2': 'value2'}\\n\"\n", + "add_databricks_env(function=function)\n", + "run = function.run(\n", + " handler=\"main\",\n", + " project=project_name,\n", ")" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "project.list_artifacts()" + ] } ], "metadata": { diff --git a/docs/runtimes/image-build.md b/docs/runtimes/image-build.md index 7b1e0f99b64..91a92e91be4 100644 --- a/docs/runtimes/image-build.md +++ b/docs/runtimes/image-build.md @@ -150,6 +150,12 @@ project.build_function( ) ``` +When using an ECR registry and not providing a secret name, MLRun assumes that an EC2 instance role is used to authorize access to ECR. +In this case MLRun clears out AWS credentials provided by project-secrets or environment variables (AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY) +from the Kaniko pod used for building the image. Otherwise Kaniko would attempt to use these credentials for ECR access instead of using the +instance role. This means it's not possible to build an image with both ECR access via instance role and S3 access using a different set of +credentials. To build this image, the instance role that has access to ECR must have the permissions required to access S3. + #### Using self-signed registry If you need to build your function and push the resulting container image to an external Docker registry that uses a self-signed SSL certificate, you can use Kaniko with the `--skip-tls-verify` flag. diff --git a/docs/runtimes/images.md b/docs/runtimes/images.md index b2bf1c15202..3dc33f79840 100644 --- a/docs/runtimes/images.md +++ b/docs/runtimes/images.md @@ -3,6 +3,8 @@ Every release of MLRun includes several images for different usages. The build and the infrastructure images are described, and located, in the [README](https://github.com/mlrun/mlrun/blob/development/dockerfiles/README.md). They are also published to [dockerhub](https://hub.docker.com/u/mlrun) and [quay.io](https://quay.io/organization/mlrun). +This release of MLRun supports only Python 3.9. + **In this section** - [Using images](#using-images) - [MLRun images](#mlrun-images) @@ -21,11 +23,18 @@ All images are published to The images are: -- `mlrun/mlrun`: An MLRun image includes preinstalled OpenMPI and other ML packages. Useful as a base image for simple jobs. +- `mlrun/mlrun`: An MLRun image includes preinstalled OpenMPI and other ML packages. Useful as a base image for simple jobs. - `mlrun/mlrun-gpu`: The same as `mlrun/mlrun` but for GPUs, including Open MPI. - `mlrun/ml-base`: Image for file acquisition, compression, dask jobs, simple training jobs and other utilities. - `mlrun/jupyter`: An image with Jupyter giving a playground to use MLRun in the open source. Built on top of jupyter/scipy-notebook, with the addition of MLRun and several demos and examples. +```{admonition} Note +When using the `mlrun` or `mlrun-gpu` image, use PyTorch versions up to and including than 2.0.1, but not higher. +You can build your own images with newer CUDA for later releases of PyTorch. +``` + + + ## Building MLRun images To build all images, run this command from the root directory of the mlrun repository: diff --git a/docs/runtimes/load-from-hub.md b/docs/runtimes/load-from-hub.md index 9627dab89fc..a9fa3da6e4b 100644 --- a/docs/runtimes/load-from-hub.md +++ b/docs/runtimes/load-from-hub.md @@ -5,7 +5,7 @@ This section demonstrates how to import a function from the hub into your projec **In this section** - [MLRun Function hub](#mlrun-function-hub) -- [Private function hub](#private-function-hub) +- [Custom function hub](#custom-function-hub) - [Setting the project configuration](#setting-the-project-configuration) - [Loading functions from the hub](#loading-functions-from-the-hub) - [View the function params](#view-the-function-params) @@ -24,7 +24,7 @@ You can search and filter the categories and kinds to find a function that meets ![Hub](../_static/images/marketplace-ui.png) -## Private function hub +## Custom function hub You can create your own function hub, and connect it to MLRun. Then you can import functions (with their tags) from your custom hub. ### Create a custom hub @@ -72,7 +72,7 @@ To add a hub, run: ```python import mlrun.common.schemas -# Add a private hub to the top of the list +# Add a custom hub to the top of the list private_source = mlrun.common.schemas.IndexedHubSource( order=-1, source=mlrun.common.schemas.HubSource( @@ -144,7 +144,7 @@ To load the `describe` function from the MLRun function hub: project.set_function('hub://describe', 'describe') ``` -To load the same function from your private hub: +To load the same function from your custom hub: ```python project.set_function('hub:///describe', 'describe') ``` diff --git a/docs/runtimes/serving-function.md b/docs/runtimes/serving-function.md index 7a72326dfe1..b24b82b28fd 100644 --- a/docs/runtimes/serving-function.md +++ b/docs/runtimes/serving-function.md @@ -4,14 +4,65 @@ Deploying models in MLRun uses the function type `serving`. You can create a serving function using the `set_function()` call from a notebook. You can also import an existing serving function/template from the {ref}`load-from-hub`. -This example converts a notebook to a serving function, adds a model to it, and deploys it: +## Creating a basic serving model using Scikit-learn -```python -serving = project.set_function(name="my-serving", func="my_serving.ipynb", kind="serving", image="mlrun/mlrun", handler="handler") -serving.add_model(key="iris", model_path="https://s3.wasabisys.com/iguazio/models/iris/model.pkl", model_class="ClassifierModel") -project.deploy_function(serving) +The following code shows how to create a basic serving model using Scikit-learn. + +``` python +import os +import urllib.request +import mlrun + +model_path = os.path.abspath('sklearn.pkl') + +# Download the model file locally +urllib.request.urlretrieve(mlrun.get_sample_path('models/serving/sklearn.pkl'), model_path) + +# Set the base project name +project_name_base = 'serving-project' + +# Initialize the MLRun project object +project = mlrun.get_or_create_project(project_name_base, context="./", user_project=True) + +serving_function_image = "mlrun/mlrun" +serving_model_class_name = "mlrun.frameworks.sklearn.SklearnModelServer" + +# Create a serving function +serving_fn = mlrun.new_function("serving", project=project.name, kind="serving", image=serving_function_image) + +# Add a model, the model key can be anything we choose. The class will be the built-in scikit-learn model server class +model_key = "scikit-learn" +serving_fn.add_model(key=model_key, + model_path=model_path, + class_name=serving_model_class_name) +``` + +After the serving function is created, you can test it: + +``` python +# Test data to send +my_data = {"inputs":[[5.1, 3.5, 1.4, 0.2],[7.7, 3.8, 6.7, 2.2]]} + +# Create a mock server in order to test the model +mock_server = serving_fn.to_mock_server() + +# Test the serving function +mock_server.test(f"/v2/models/{model_key}/infer", body=my_data) +``` + +Similarly, you can deploy the serving function and test it with some data: + +``` python +# Deploy the serving function +serving_fn.apply(mlrun.auto_mount()).deploy() + +# Check the result using the deployed serving function +serving_fn.invoke(path=f'/v2/models/{model_key}/infer',body=my_data) ``` + +## Using GIT with a serving function + This example illustrates how to use Git with serving function: ```python diff --git a/docs/serving/custom-model-serving-class.md b/docs/serving/custom-model-serving-class.md index 6147409ca1e..ec3f23433cc 100644 --- a/docs/serving/custom-model-serving-class.md +++ b/docs/serving/custom-model-serving-class.md @@ -167,8 +167,9 @@ You can also deploy a model from within an ML pipeline (check the various demos ## Model monitoring Model activities can be tracked into a real-time stream and time-series DB. The monitoring data -is used to create real-time dashboards and track model accuracy and drift. -To set the tracking stream options, specify the following function spec attributes: +is used to create real-time dashboards, detect drift, and analyze performance. + +To monitor a deployed model, apply `set_tracking()` on your serving function and specify the function spec attributes: fn.set_tracking(stream_path, batch, sample) @@ -176,3 +177,4 @@ To set the tracking stream options, specify the following function spec attribut (e.g. kafka://kafka.default.svc.cluster.local:9092) * **sample** — optional, sample every N requests * **batch** — optional, send micro-batches every N requests +* **tracking_policy** — optional, model tracking configurations, such as setting the scheduling policy of the model monitoring batch job diff --git a/docs/serving/graph-ha-cfg.md b/docs/serving/graph-ha-cfg.md index c8c674cc1a0..37a842a7c14 100644 --- a/docs/serving/graph-ha-cfg.md +++ b/docs/serving/graph-ha-cfg.md @@ -43,7 +43,7 @@ The number of replicas per function depends on the source: - `function.spec.max_replicas = 3`. Default = 4 and the number of workers is set with: - - `KafkaSource(attributes={"max_workers": 1})` + - `KafkaSource(attributes={"max_workers": 1})`. Default = 1 The consumer function has one buffer per worker, measured in number of messages, holding the incoming events that were received by the worker and are waiting to be processed. Once this buffer is full, events need to be processed so that the function is able to receive more events. The buffer size is diff --git a/docs/store/artifacts.md b/docs/store/artifacts.md index f0dfc27088c..71a415c1e29 100644 --- a/docs/store/artifacts.md +++ b/docs/store/artifacts.md @@ -26,6 +26,8 @@ Artifacts metadata is stored in the MLRun database. - {ref}`working-with-data-and-model-artifacts` - {ref}`models` - {ref}`logging_datasets` +- [Logging a Databricks response as an artifact](../runtimes/databricks.html#logging-a-databricks-response-as-an-artifact). + ## Viewing artifacts diff --git a/docs/store/datastore.md b/docs/store/datastore.md index e41a6636f9f..1592a18a9d5 100644 --- a/docs/store/datastore.md +++ b/docs/store/datastore.md @@ -308,7 +308,7 @@ ParquetTarget(path="ds://profile-name/aws_bucket/path/to/parquet.pq") ### See also - {py:class}`~mlrun.projects.MlrunProject.list_datastore_profiles` - {py:class}`~mlrun.projects.MlrunProject.get_datastore_profile` -- {py:class}`~mlrun.datastore.datastore_profile.register_temporary_client_datastore_profile` +- {py:class}`~mlrun.datastore.datastore_profile.register_temporary_client_datastore_profile` - {py:class}`~mlrun.projects.MlrunProject.delete_datastore_profile` The methods `get_datastore_profile()` and `list_datastore_profiles()` only return public information about diff --git a/mlrun/runtimes/serving.py b/mlrun/runtimes/serving.py index 7190ea22d69..10da62b5d78 100644 --- a/mlrun/runtimes/serving.py +++ b/mlrun/runtimes/serving.py @@ -309,7 +309,8 @@ def set_tracking( stream_args: dict = None, tracking_policy: Union[TrackingPolicy, dict] = None, ): - """set tracking parameters: + """apply on your serving function to monitor a deployed model, including real-time dashboards to detect drift + and analyze performance. :param stream_path: Path/url of the tracking stream e.g. v3io:///users/mike/mystream you can use the "dummy://" path for test/simulation. From bf9bd39dc4c46e2f2563ee27d58ac7e6a8fa3c43 Mon Sep 17 00:00:00 2001 From: jillnogold <88145832+jillnogold@users.noreply.github.com> Date: Sun, 25 Feb 2024 09:04:46 +0200 Subject: [PATCH 049/119] [Docs] Add v1.6.0 change log (#5038) --- docs/change-log/index.md | 210 ++++++++++++++++++++++++++++++--------- docs/store/datastore.md | 10 +- 2 files changed, 167 insertions(+), 53 deletions(-) diff --git a/docs/change-log/index.md b/docs/change-log/index.md index 76f22c75926..a271f8c4059 100644 --- a/docs/change-log/index.md +++ b/docs/change-log/index.md @@ -1,5 +1,7 @@ (change-log)= # Change log + +- [v1.6.0](v1-6-0-22-february-2024) - [v1.5.2](#v1-5-2-30-november-2023) | [v1.5.1](#v1-5-1-2-november-2023) | [v1.5.0](#v1-5-0-23-october-2023) - [v1.4.1](#v1-4-1-8-august-2023) | [v1.4.0](#v1-4-0-23-july-2023) - [v1.3.4](#v1-3-4-23-august-2023) | [v1.3.3](#v1-3-3-7-jun-2023) | [v1.3.2](#v1-3-2-4-jun-2023) | [v1.3.1](#v1-3-1-18-may-2023) | [v1.3.0](#v1-3-0-22-march-2023) @@ -11,6 +13,98 @@ - [Deprecations](#deprecations-and-removed-code) +## v1.6.0 (22 February 2024) + +### Data store +| ID |Description | +|----|-----------------------------------------------------------------------------------------------------| +|ML-3618|Integrate MLflow: seamleassly integrate and transfer logs from MLflow to MLRun. Tech Preview. See [MLflow tracker tutorial](../tutorials/mlflow.html). | +|ML-4343|Datastore profiles (for managing datastore credentials) now support Azure, DBFS, GCS, Kafka, and S3. See [Using data store profiles](../store/datastore.html#using-data-store-profiles).| + +### Feature store +| ID |Description | +|---------|-----------------------------------------------------------------------------------------------------| +|ML-4622|Feature set and feature vector APIs are now class methods. See examples in {ref}`feature-sets`.| +|ML-5109|You can set `min_replicas` and `max_replicas` for `KafkaSource`. See [Consumer function configuration](../serving/graph-ha-cfg.html#consumer-function-configuration).| + +### Model monitoring +| ID |Description | +|---------|-----------------------------------------------------------------------------------------------------| +|ML-4620|New Grafana Model Monitoring Applications dashboard that includes charts and KPIs that are relevant to a specific monitoring application (under a specific model endpoint). The graphs are: Draft status by category, Average drift value result, Latest result, Aopplication summary, Result value by time, Drift detection history. See [Model Monitoring Applications dashboard](../monitoring/model-monitoring-deployment.html#model-monitoring-applications-dashboard).| + +### Runtimes + +| ID |Description | +|---------|-----------------------------------------------------------------------------------------------------| +|ML-3379,4997|New `state_thresholds` used to identify pod status and abort a run. See [Preventing stuck pods](../runtimes/configuring-job-resources.html#preventing-stuck-pods) and {py:meth}`~mlrun.runtimes.DaskCluster.set_state_thresholds`. +|ML-3728|Labels added to pods that are running as part of KFP to facilitate monitoring. [View in Git](https://github.com/mlrun/mlrun/pull/4485/). | +|ML-4032|You can now disable the automatic HTTP trigger creation in Nuclio and MLRun. See [Serving/Nuclio triggers](../cheat-sheet.html#serving-nuclio-triggers). | +|ML-4182|Support for notifications on remote pipelines. See [Configuring Notifications For Pipelines](../concepts/notifications.html#configuring-notifications-for-pipelines).| +|ML-4623|You can now [Log a Databricks response as an artifact](../runtimes/databricks.html#logging-a-databricks-response-as-an-artifact).| + +### UI +| ID |Description | +|---------|-----------------------------------------------------------------------------------------------------| +|ML-1855|New **Train Model** wizard. | +|ML-2336|You can now delete Jobs in the UI (and not just from the SDK). | +|ML-4506|You can now delete artifacts, models, and datasets in the UI (and not just from the SDK). | +|ML-4667|**Project monitoring** is now the default project view. The previous default page is now named **Quick actions**, and is the second tab in the **Projects** page.| +|ML-4916|You can now add a tag when registering an artifact in the Register Artifact, Register Dataset, and Register Model dialogs. | + +### Infrastructure + +| ID |Description | +|---------|-----------------------------------------------------------------------------------------------------| +|ML-3921|The Docker image for installation of mlrun was modified, resulting in better compatibility with external packages.| +|ML-5193|Support for Pandas 2.0. | + +### Documentation +| ID |Description | +|---------|-----------------------------------------------------------------------------------------------------| +|ML-3663|New: How to build a docker image externally using a dockerfile and then use it. See [Building a docker image using a dockerfile and using it](../runtimes/images.html#building-a-docker-image-using-a-dockerfile-and-using-it).| +|ML-4048|New: Creating and using a custom function hub. See [Private function hub](../runtimes/load-from-hub.html#private-function-hub).| +|ML-5260|New: [Load code at runtime using a non-default source](../runtimes/create-and-use-functions.html#load-code-at-runtime-using-a-non-default-source). | +|ML-5602, ML-5680|Improved feature store documentation including sources and targets, and partitioning. See {ref}`sources-targets`.| +|NA|New: {ref}`project-setup`.| +|NA|Improved serving function example, and new example of a serving function with Git integration. See {ref}`serving-function`. + + +### Closed issues +| ID |Description | +|----------|---------------------------------------------------------------------------| +|ML-1373|Incorrect service names do not result in stuck pods during ContainerCreating.| +|ML-1835|The index record is not duplicated in the datasets metadata. | +|ML-3714|Runs that complete successfully do not show errors in Kubeflow. | +|ML-3856|Documentation: Add how to update a feature set with appending ingestion (and not create a new FS on every ingest). See [Ingest data locally](../data-prep/ingest-data-fs.html#ingest-data-locally).| +|ML-4093|Documentation: Improved description of [handlers](../runtimes/create-and-use-functions.html#using-set-function) and {ref}`functions`. | +|ML-4370|Hyper-param and single runs no longer generate artifacts with the same name. | +|ML-4563|Local jobs can now be aborted in the UI. | +|ML-4613|UI: Fixed the map type hint in the Batch Inference Parameters. | +|ML-4642|The UI no longer gets stuck when there is a high number of query results. | +|ML-4678|When tagging a specific version of a model using the SDK, it does not clear the tags from the rest of the versions.| +|ML-4690|Enabling the Spark event log (sj.spec.spark_conf["spark.eventLog.enabled"] = True) no longer causes the job to fail. | +|ML-4920|Documentation: improve description of `log_artifact`. See {ref}`artifacts` and {py:meth}`~mlrun.projects.MlrunProject.log_artifact`.| +|ML-4608|The artifact `db_key` is now forwarded when registering an artifact.| +|ML-4617|Fixed error message when using a feature vector as an input to a job without first calling `get_offline_features` on the vector.| +|ML-4714|Logs are not truncated in the MLRun UI logs page for jobs that have a high number of logs or run for over day. | +|ML-4922|Preview and Metadata tabs now indicate when there are more columns that are not displayed.| +|ML-4967|The **Deploy** button in the **Project > Models** page now creates a new endpoint/serving function.| +|ML-4992|Fixed starting a spark job from source archive (using `with_source_archive()`).| +|ML-5001|The **Monitoring workflows** page now states that it includes only workflows that have already been run. | +|ML-5042|After creating and deleting a project, a new project cannot be created in the same folder with the same context. | +|ML-5048|UI Edit function dialog: When selecting **Use an existing image** and pressing **Deploy**, the existing image is used, as expected.| +|ML-5078|`project.create_remote()` is no longer dependant on setting `init_git=True` on project creation. | +|ML-5089|When trying to delete a running job, an error opens that a running job cannot be deleted and it needs to be aborted first.| +|ML-5091|Monitoring does not recreate a deleted run. | | +|ML-5146|Resolved OOM issues by reducing the memory footprint when monitoring runs. | +|ML-5481|You can now use `build_image` using the project source. See the example in [build_image](../projects/run-build-deploy.html#build-image).| +|ML-5576|FeatureSet can now ingest data that contains single quotes.| +|ML-5746|Labels no longer create partial projects that cannot be deleted.| + + + + + ## v1.5.2 (30 November 2023) ### Closed issues @@ -18,6 +112,7 @@ |----------|---------------------------------------------------------------------------| |ML-4960|Fixed browser caching so the **Members** tab is always presented for projects.| + ## v1.5.1 (2 November 2023) ### Closed issues @@ -25,8 +120,8 @@ |----------|---------------------------------------------------------------------------| |ML-3480|Add details about `label_feature` parameter. See [Creating a feature vector](../feature-store/feature-vectors.html#creating-a-feature-vector).| |ML-4839/4844|Running `project.build_image` now always reads the requirements.txt file. | -|ML-4860 |Fixed creating and running functions with no parameters from the UI. | -|ML-4872 |Fixed synchronizing functions from project yaml. | +|ML-4860|Fixed creating and running functions with no parameters from the UI. | +|ML-4872|Fixed synchronizing functions from project yaml. | ## v1.5.0 (23 October 2023) @@ -47,8 +142,9 @@ |ML-3370|Accessing the MLRun hub is now available through a service API. This will enable implementing better function version selection and combining hub functions from different sources. Tech Preview. [View in Git](https://github.com/mlrun/mlrun/pull/3384). | |ML-3644|Support for self-signed docker registries. See [Using self-signed registry](../runtimes/image-build.html#using-self-signed-registry) and [view in Git](https://github.com/mlrun/mlrun/pull/4013). | |ML-4132|The `invoke` function can now receive any parameter supported in the `requests.request` method. See [invoke](../api/mlrun.runtimes.html#mlrun.runtimes.RemoteRuntime.invoke) and [view in Git](https://github.com/mlrun/mlrun/pull/3872). | +|NA|From v1.5, clients must be running Python 3.9.| + -. ### Runtimes | ID |Description | |---------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| @@ -64,9 +160,10 @@ |ML-2815|New Batch Run wizard that replaces the previous New job page. | |ML-3584|The Model Endpoints page now displays the Function Tag. | |ML-4066|The Online types list of the Target Store now includes Redis. | -|ML-4167|The Projects page now supports downloading the .yaml file. | +|ML-4167|The Projects page now supports downloading the .yaml file. Tech Preview. | |ML-4571|The Model Endpoints page now displays the drift threshold and the drift actual value. | |ML-4756|The Recents list in Jobs and Workflows (Projects pane) now displays a maximum of the last 48 hours. | +|ML-4511|You can now change the image and add new requirements (such as xgboost) in the Batch Infer wizard. | ### Documentation | ID |Description | @@ -102,7 +199,7 @@ See [Deprecations and removed code](#deprecations-and-removed-code). |ML-4323|Fixed: pipeline step failed with "Read timed out.: get log" | |ML-4391|Consumer group UI now shows complete details. | |ML-4501|Fixed: UI shows error after deleting a function, then viewing a related job. | -|ML-4533|In the UI, ML functions can now be created with upper-case letters. | +|ML-4533|UI: ML functions can now be created with upper-case letters. | ## v1.4.1 (8 August 2023) @@ -730,49 +827,57 @@ with a drill-down to view the steps and their details. [Tech Preview] | ID| Description|Workaround |Opened in | |--------|----------------------------------------------------------------|-----------------------------------------|-----------| -|ML-1373|When attempting to ingest data with RemoteSpark using an incorrect service name, the pods get stuck during ContainerCreating. | Verify the correct service name. | v0.9.0 | -|ML-1835|The index record is duplicated in the datasets metadata. | NA | v1.0.0 | -|ML-2030|Need means of moving artifacts from test to production Spark. |To register artifact between different environments, e.g. dev and prod, upload your artifacts to a remote storage, e.g. S3. You can change the project artifact path using MLRun or MLRun UI. `project.artifact_path='s3:`function = mlrun.new_function("serving", image="python:3.9", kind="serving") function.with_commands([ "python -m pip install --upgrade pip", "pip install 'mlrun[complete]' scikit-learn==1.1.2", ])`|v1.3.0 | |NA|The feature store does not support schema evolution and does not have schema enforcement.| NA| v1.2.1 | |ML-3521|Cannot schedule a workflow without a remote source. | NA| v1.2.1 | +|ML-3526|Aggregation column order is not always respected (storey engine).| NA | v1.3.0| +|ML-3626|The "Save and ingest" option is disabled for a scheduled feature set. |NA | v1.3.0| +|ML-3627|The feature store allows ingestion of string type for the timestamp key resulting in errors when trying to query the offline store with time filtration.|Use only timestamp type.| v1.2.1 | +|ML-3636|`get_online_feature_service` from Redis target returns truncated values. | NA | v1.3.0| |ML-3640|When running a remote function/workflow, the `context` global parameter is not automatically injected.| Use `get_or_create_ctx`| v1.3.0 | -|ML-3714|It can happen that an MLrun pod succeeds but there's an error in Kubeflow. | NA | v1.3.0 | +|ML-3646|MapValues step on Spark ingest: keys of non-string type change to string type, sometime causing failures in graph logic.| NA | v1.2.1| +|ML-3680|The function spec does not get updated after running a workflow. |NA | v1.3.0| |ML-3804|A serving step with no class does not inherit parameters from the function spec. |Create a class to forward the parameters. See [Create a single step](../serving/writing-custom-steps.html#create-a-single-step). | v1.3.1 | -|ML-4107| On scheduled ingestion (storey and pandas engines) from CSV source, ingests all the source on each schedule iteration. | Use a different engine and/or source. | v1.4.0 | -|ML-4153|When creating a passthrough features-set in the UI, with no online target, the feature-set yaml includes a parquet offline target, which is ignored.| NA | v1.4.0 | -|ML-4166|Project yaml file that is vrey large cannot be stored. |Do not embed the artifact object in the project yaml. | v1.4.0 | -|ML-4370|The same artifact was generated by 2 runs: one with multiple iterations (using hyper-params) and one with a single iteration. | Do not generate the same artifact name across hyper-param runs and single runs. For example, add a timestamp or other unique id to the name of the artifact.| v1.3.2 | +|ML-4107| On scheduled ingestion (storey and pandas engines) from CSV source, ingests all of the source on each schedule iteration. | Use a different engine and/or source. | v1.4.0 | +|ML-4153|When creating a passthrough feature-set in the UI, with no online target, the feature-set yaml includes a parquet offline target, which is ignored.| NA | v1.4.0 | +|ML-4166|Project yaml file that is very large cannot be stored. |Do not embed the artifact object in the project yaml. | v1.4.0 | +|ML-4186|on `get_offline_features` ('local'/pandas engine) with passthrough, a source parquet column of type BOOL has dtype "object" or "bool" in the response | | v1.4.0| |ML-4442|After a model is deployed without applying monitoring (`set_tracking()` was not set on the serving function), monitoring cannot be added.|Delete the existing model endpoint (`mlrun.get_run_db().delete_model_endpoint()`), then redeploy the model.| v1.5.0 | +|ML-4582|Custom packagers cannot be added to projects created previous to v1.4.0 | NA | v1.6.0| |ML-4585|The `mlrun/mlrun` image does not support mpijob. | Create your own image that includes mpijob. | v1.5.0 | -|ML-4613|UI: The Batch Inference Parameters has an incorrect hint on map type. The correct hint is "The 'dict' values should be in JSON key:value format, e.g. {"hello":"world"}" | NA | v1.5.0 | -|ML-4617|Incorrect error message when using a feature vector as an input to a job without first calling `get_offline_features` on the vector. |Apply `get_offline_features()` on the feature vector and provide a target.| v1.5.0 | -|ML-4642|The UI can get stuck when query results are too large to display. |Add a filter (or narrow it) to retrieve fewer results.| v1.5.0| -|ML-4678|When tagging a specific version of a model using the SDK, it clears the tags from the rest of the versions.|First add a tag, (`replace=False`), then delete the old tag: `mlrun.get_run_db().tag_artifacts(project=project.name,artifacts=model1, tag_name="newtag", replace=False)`,`mlrun.get_run_db().delete_artifacts_tags(project=project.name, artifacts=[model1], tag_name=model1.metadata.tag)` | v1.5.0 | +|ML-4655|Timestamp entities are allowed for feature store, but format is inconsistent. |NA|v1.5.0| +|NL-4685|When using columns with type "float" as feature set entities, they are saved inconsistently to key-value stores by different engines.| Do not use columns with type float as feature set entities. |v1.5.0 | |ML-4698|Parameters that are passed to a workflow are limited to 10000 chars.| NA, external Kubeflow limitation. | v1.5.0 | -|ML-4714|Logs got truncated in the MLRun UI logs page for jobs that have a high number of logs or run for over day. | NA | v1.5.0 | +|ML-4725|ML functions show as if they are in the "Creating" status, although they were created and used.|NA|v1.4.1| |ML-4740|When running function `batch_inference_v2` from the SDK, the `ingest()` function accepts 3 parameters as Data-item or other types: `dataset`, `model_path` and `model_endpoint_sample_set`. If you provided these parameters as non Data-items and later on you want to rerun this function from the UI, you need to provide these parameters as Data-item.|Prepare suitable Data-item and provide it to the batch-rerun UI.| v1.5.0 | |ML-4758|In rare cases, deleting a heavy project is unsuccessful and results in a timeout error message while the project moves to offline state.| Delete again.| v1.5.0 | -|ML-4767|Torch 2.1.0 is not compatible with mlrun-gpu image. | NA | v1.5.0 | -|ML-4821|Sometimes very big project deletions fail with a timeout due to deletion of project resources taking too long.|Delete the project again | NA | v1.5.0 | +|ML-4769|After deleting a project, data is still present in the Artifacts and Executions of pipelines UI. | NA | v1.4.0 | +|ML-4810|Cannot rerun a job when the "mlrun/client_version" label has "+" in its value. | Ensure the "mlrun/client_version" label does not include "+". | v1.6.0 | +|ML-4821|In some cases, deleting a very big project fails with a timeout due to the time required to delete the project resources.|Delete the project again | NA | v1.5.0 | +|ML-4846|With Docker Compose the V3IO_ACCESS_KEY is required for Parquet target. |replace this line: `feature_set.set_targets(targets=[mlrun.datastore.ParquetTarget()], with_defaults=False)` with a command that specifies the target path for the Parquet target. For example: `feature_set.set_targets(targets=[mlrun.datastore.ParquetTarget(path="/some/path/to/parquet/file")], with_defaults=False)` | v1.5.0 | +|ML-4857|Local runs can be aborted in the UI, though the actual execution continues.|NA | v1.5.0 | |ML-4858|After aborting a job/run from the UI, the logs are empty. | NA | v1.5.0 | +|NL-4881|Kubeflow pipelines parallelism parameter in dsl.ParallelFor() does not work (external dependency). |NA| v1.4.1| +|ML-4934|Modifying the parameters of a serving-function (for example changing `default_batch_intervals`) that is configured for model-monitoring tracking requires a specific workflow. |See [Enable model monitoring](../monitoring/initial-setup-configuration.html#enabling-model-monitoring). |v1.6.0| |ML-4942|The Dask dashboard requires the relevant node ports to be open. |Your infrastructure provider must open the ports manually. If running MLRun locally or CE, make sure to port-forward the port Dask Dashboard uses to ensure it is available externally to the Kubernetes cluster. | v1.5.0 | -|ML-4953|Cannot build image through the UI on external registry, as no credentials are passed.| NA | v1.5.0 | -|ML-4967|The **Deploy** button in the **Project > Models** page does create a new endpoint/serving function, or add a model to any function, even though it responds "Model deployment initiated successfully". | NA | v1.5.1 | |ML-4956|A function created by SDK is initially in the "initialized" state in the UI and needs to be deployed before running it. | In **Edit**, press **Deploy** | v1.5.1 | -|ML-4992|When a source archive is specified, the docker image's working directory is no longer automatically set to the target directory of that source archive.|Set target dir, and change workdir back: `sj.with_source_archive(`, `source=project.source, pull_at_runtime=False, target_dir="/igz/mlrun_code"`, `)`, ` sj.spec.build.extra = "WORKDIR /igz"`| v1.5.0 | -|ML-5001|The list of workflows in the **Monitoring workflows** page includes only workflows that have already been run. | NA | v1.5.1 | -|ML-5042|After creating and deleting a project, a new project cannot be created in the same folder with the same context. | NA | v1.5.1 | -|ML-5048|UI Edit function dialog: After selecting **Use an existing image**, when pressing **Deploy**, a new image is created.| NA | v1.5.1 | -|ML-5078|Cannot only use `project.create_remote()` if `init_git=True` was set on project creation. | Set `init_git=True` on project creation.| v1.5.1 | |ML-5079|Cannot update git remote with `project.create_remote()`| NA | v1.5.1 | +|ML-5204|The **Projects>Settings** does not validate label names. Errors are generated from the back end. |Use [Kubernetes limitations](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set). | v1.6.0 | +|ML-5776|Concurrent request to project deletion may fail thought first call would gracefully finish the flow, without experiencing any error. Other concurrent requests would not impact the project deletion flow.|NA| v1.6.0| + ## Limitations | ID |Description |Workaround |Opened in| @@ -784,15 +889,19 @@ with a drill-down to view the steps and their details. [Tech Preview] |ML-3731|When trying to identify a failed step in a workflow with `mlrun.get_run_db().list_pipelines('project-name')`, the returned error is `None`. |To see the error, use `mlrun.db.get_pipelines()` instead. | |ML-3743|Setting AWS credentials as project secret cause a build failure on EKS configured with ECR. |When using an ECR as the external container registry, make sure that the project secrets AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY have read/write access to ECR, as described in the [platform documentation](https://www.iguazio.com/docs/latest-release/services/app-services/docker-registry/#create-off-cluster-registry)| |ML-4386|Notifications of local runs aren't persisted. | NA | v1.5.0| -|ML-4767|When using mlrun-gpu image, use PyTorch versions up to and including than 2.0.1, but not higher. | NA | v1.5.0| +|ML-4767|When using mlrun-gpu image, use PyTorch versions up to and including than 2.0.1, but not higher. | You can build your own images with newer CUDA for a later release of PyTorch. | v1.5.0| |ML-4855|MLRun supports TensorFlow up to 2.13.1. |ML-4907|MLRun Client does not support Win OS. | Use WSL instead. | v1.3.0 | +|ML-5274|PySpark 3.2.x cannot always read parquet files written by pyarrow 13 or above. MLRun ingest might fail when `ingest()` is called with engine="spark" and a ParquetSource that points to parquet files that were written by pyarrow 13 or above. |Call `df.to_parquet()` with version="2.4" so that parquet files are backwards compatible.|v1.6.0| +|ML-5669|When using mlrun.mlrun image, use PyTorch versions up to and including than 2.0.1, but not higher. See [MLRun runtime images](../runtimes/images.html#mlrun-runtime-images) | You can build your own images with newer CUDA for a later release of PyTorch. |v1.6.0| +|ML-5732|When using an MLRun client previous to v1.6.0, the workflow step status might show completed when it is actually aborted. | Upgrade the client to v1.6.0 or higher. | v1.6.0 | ## Deprecations and removed code | In |ID |Description | |--------|---------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| v1.6.0 |ML-5137|The Create/edit function pane is removed from the UI.| | v1.5.0 |ML-4010|Unused artifact types: BokehArtifact, ChartArtifact | | v1.5.0 |ML-4075|Python 3.7 | | v1.5.0 |ML-4366 |MLRun images `mlrun/ml-models` and `mlrun/ml-models-gpu` | @@ -803,32 +912,36 @@ with a drill-down to view the steps and their details. [Tech Preview] ### Deprecated APIs | Will be removed|Deprecated|API |Use instead | -|---------------|------------|-------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------| +|---------------|------------|----------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------| +| v1.8.0 |v1.6.0 |HTTPDB: `last` parameter of `list_runs` | NA. Was not used.| +| v1.8.0 |v1.6.0 |Feature store: `get_offline_features` |`FeatureVector.get_offline_features()`| +| v1.8.0 |v1.6.0 |Feature store: `get_online_feature_service` |`FeatureVector.get_online_feature_service()`| +| v1.8.0 |v1.6.0 |Feature store: `preview` |`FeatureSet.preview()`| +| v1.8.0 |v1.6.0 |Feature store: `deploy_ingestion_service_v2` |`FeatureSet.deploy_ingestion_service()`| +| v1.8.0 |v1.6.0 |Feature store: `preview` |`FeatureSet.preview()`| +| v1.8.0 |v1.6.0 |Feature store: `ingest` |`FeatureSet.ingest()`| +| v1.8.0 |v1.6.0 |Artifacts: `uid` parameter of `store_artifact` | `tree` parameter of `store_artifact` (artifact uid is generated in the backend)| +| v1.8.0 |v1.6.0 |Runtimes: `with_requirements` — `requirements` param as a requirements file |`requirements_file` param | +| v1.6.2 |v1.6.0 |`dashboard` parameter of the RemoteRuntime `invoke` |NA. The parameter is ignored. | | v1.7.0 |v1.5.1 |`skip_deployed` parameter of `MLrunProject.build_image` |NA. The parameter is ignored. | | v1.7.0 |v1.5.0 |`/files` and `/filestat` |`/projects/{project}/filestat` | -| v1.6.0 |v1.4.0 |`MLRunProject.clear_context() ` |This method deletes all files and clears the context directory or subpath (if defined). This method can produce unexpected outcomes and is not recommended. | -| v1.6.0 |v1.4.0 |MLRunProject object legacy parameters |metadata and spec | -| v1.6.0 |v1.4.0 |`BaseRuntime.with_commands` and `KubejobRuntime.build_config` `verify_base_image` param|`prepare_image_for_deploy` | -| v1.6.0 |v1.4.0 |`run_local` |`function.run(local=True)` | -| v1.6.0 |v1.4.0 |CSVSource's time_field parameter |Use `parse_dates` to parse timestamps | -| v1.6.0 |v1.4.0 |Feature-set `set_targets()`, `default_final_state` |`default_final_step` | -| v1.6.0 |v1.3.0 |`new_pipe_meta` |`new_pipe_metadata` | -| v1.6.0 |v1.3.0 |ttl param from pipeline |`cleanup_ttl` | -| v1.6.0 |v1.3.0 |objects methods from artifacts list |`to_objects` | - - -### Deprecated CLIs -| Will be removed|Deprecated|CLI |Use instead | -|---------------|------------|------------------------------|-------------------------------------------------------| -| v1.6.0 |v1.3.0 |dashboard (nuclio/deploy) |No longer supported on client side | -| v1.6.0 |v1.3.0 |overwrite schedule (project)|Not relevant. Running a schedule is now an operation | - +| v1.7.0 |v1.3.0 |`LegacyArtifact` and all legacy artifact types that inherit from it (`LegacyArtifact`, `LegacyDirArtifact`, `LegacyLinkArtifact`, `LegacyPlotArtifact`, `LegacyChartArtifact`, `LegacyTableArtifact`, `LegacyModelArtifact`, `LegacyDatasetArtifact`, `LegacyPlotlyArtifact`, `LegacyBokehArtifact`)|`Artifact` or other artifact classes that inherit from it | ### Removed APIs | Version|API |Use instead | |---------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------| +| v1.6.0 |`dashboard` parameter of `project.deploy_function`, `RemoteRuntime.deploy`, `RemoteRuntime.get_nuclio_deploy_status`, `ServingRuntime.with_secrets`| NA. The parameter was ignored. | +| v1.6.0 |`MLRunProject.clear_context()` |This method deletes all files and clears the context directory or subpath (if defined). This method can produce unexpected outcomes and is not recommended. | +| v1.6.0 |MLRunProject object legacy parameters |metadata and spec | +| v1.6.0 |`BaseRuntime.with_commands` and `KubejobRuntime.build_config` `verify_base_image` param|`prepare_image_for_deploy` | +| v1.6.0 |`run_local` |`function.run(local=True)` | +| v1.6.0 |CSVSource's `time_fields` parameter |Use `parse_dates` to parse timestamps | +| v1.6.0 |Feature-set `set_targets()`, `default_final_state ` |`default_final_step` | +| v1.6.0 |`new_pipe_meta` |`new_pipe_metadata` | +| v1.6.0 |ttl param from pipeline |`cleanup_ttl` | +| v1.6.0 |objects methods from artifacts list |`to_objects` | v1.5.0 |user_project- and project-related parameters of `set_environment`. (Global-related parameters are not deprecated.) |The same parameters in project-related APIs, such as `get_or_create_project` | | v1.5.0 |`KubeResource.gpus` |`with_limits` | | v1.5.0 |Dask `gpus` |`with_scheduler_limits` / `with_worker_limits` | @@ -836,7 +949,6 @@ with a drill-down to view the steps and their details. [Tech Preview] | v1.5.0 |Spark runtime `gpus` |`with_driver_limits` / `with_executor_limits` | | v1.5.0 |`mount_v3io_legacy` (mount_v3io no longer calls it) |`mount_v3io` | | v1.5.0 |`mount_v3io_extended` |`mount_v3io` | -| v1.5.0 |`LegacyArtifact` and all legacy artifact types that inherit from it (`LegacyArtifact`, `LegacyDirArtifact`, `LegacyLinkArtifact`, `LegacyPlotArtifact`, `LegacyChartArtifact`, `LegacyTableArtifact`, `LegacyModelArtifact`, `LegacyDatasetArtifact`, `LegacyPlotlyArtifact`, `LegacyBokehArtifact`)|`Artifact` or other artifact classes that inherit from it | | v1.5.0 |`init_functions` in pipelines |Add the function initialization to the pipeline code instead | | v1.5.0 |The entire `mlrun/mlutils` library |`mlrun.framework` | | v1.5.0 |`run_pipeline` |`project.run` | @@ -856,4 +968,6 @@ with a drill-down to view the steps and their details. [Tech Preview] | Version|CLI | |---------|------------------------------------------------------------| -| v1.5.0 |`--ensure-project` flag of the `mlrun project` CLI command | \ No newline at end of file +| v1.6.0 |deploy `--dashboard` (nuclio/deploy) |No longer supported on client side. Configure using the MLRun API. | +| v1.6.0 |project `--overwrite-schedule` |Not relevant. Running a schedule is now an operation. | +| v1.5.0 |`--ensure-project` flag of the `mlrun project` CLI command | | \ No newline at end of file diff --git a/docs/store/datastore.md b/docs/store/datastore.md index 1592a18a9d5..2fa53b1d742 100644 --- a/docs/store/datastore.md +++ b/docs/store/datastore.md @@ -135,6 +135,11 @@ Not supported by the spark and remote-spark runtimes. ## Using data store profiles +```{admonition} Notes +- Datastore profile does not support: v3io (datastore, or source/target), snowflake source, DBFS for spark runtimes, Dask runtime. +- Datastore profiles are not part of a project export/import. +``` + You can use a data store profile to manage datastore credentials. A data store profile holds all the information required to address an external data source, including credentials. You can create @@ -167,11 +172,6 @@ redis_profile = project.get_datastore_profile("my_profile") local_redis_profile = DatastoreProfileRedis(redis_profile.name, redis_profile.endpoint_url, username="mylocaluser", password="mylocalpassword") register_temporary_client_datastore_profile(local_redis_profile) ``` -```{admonition} Note -Datastore profile does not support: v3io (datastore, or source/target), snowflake source, DBFS for spark runtimes, Dask runtime. -``` - - ### Azure data store profile ``` profile = DatastoreProfileAzureBlob(name="profile-name",connection_string=connection_string) From 7b8ae745e820a5270aa4f9efe74935f04b5be115 Mon Sep 17 00:00:00 2001 From: Liran BG Date: Sun, 25 Feb 2024 10:48:15 +0200 Subject: [PATCH 050/119] [MLRun] Finalizing docs for 1.6.0 (#5191) --- docs/cli.md | 6 +++--- docs/install/compose.with-jupyter.yaml | 4 ++-- docs/install/compose.yaml | 4 ++-- docs/projects/ci-integration.md | 2 +- docs/runtimes/images.md | 6 +++--- hack/local/README.md | 8 ++++---- hack/local/mljupy.yaml | 2 +- hack/local/mlrun-local.yaml | 4 ++-- hack/mlrun-all.yaml | 4 ++-- hack/mlrunapi.yaml | 2 +- hack/mlrunui.yaml | 2 +- 11 files changed, 22 insertions(+), 22 deletions(-) diff --git a/docs/cli.md b/docs/cli.md index b98e034d6fc..2fe162176f0 100644 --- a/docs/cli.md +++ b/docs/cli.md @@ -280,7 +280,7 @@ spec: image: .mlrun/func-default-remote-demo-ps-latest image_pull_policy: Always build: - base_image: mlrun/mlrun:1.5.1 + base_image: mlrun/mlrun:1.6.0 source: git://github.com/mlrun/mlrun ``` @@ -310,7 +310,7 @@ spec: image_pull_policy: Always build: commands: [] - base_image: mlrun/mlrun:1.5.1 + base_image: mlrun/mlrun:1.6.0 source: git://github.com/mlrun/ci-demo.git ``` @@ -338,7 +338,7 @@ spec: image_pull_policy: Always build: commands: [] - base_image: mlrun/mlrun:1.5.1 + base_image: mlrun/mlrun:1.6.0 ``` Next, run the following MLRun CLI command to build the function; replace the `<...>` placeholders to match your configuration: diff --git a/docs/install/compose.with-jupyter.yaml b/docs/install/compose.with-jupyter.yaml index 6eaaed1f7c7..5c5fdcbafea 100644 --- a/docs/install/compose.with-jupyter.yaml +++ b/docs/install/compose.with-jupyter.yaml @@ -39,7 +39,7 @@ services: - nuclio-platform-config:/etc/nuclio/config jupyter: - image: "mlrun/jupyter:${TAG:-1.5.1}" + image: "mlrun/jupyter:${TAG:-1.6.0}" ports: - "8080:8080" - "8888:8888" @@ -61,7 +61,7 @@ services: - mlrun mlrun-ui: - image: "mlrun/mlrun-ui:${TAG:-1.5.1}" + image: "mlrun/mlrun-ui:${TAG:-1.6.0}" ports: - "8060:8090" environment: diff --git a/docs/install/compose.yaml b/docs/install/compose.yaml index 859564b3880..586fd4aa695 100644 --- a/docs/install/compose.yaml +++ b/docs/install/compose.yaml @@ -39,7 +39,7 @@ services: - nuclio-platform-config:/etc/nuclio/config mlrun-api: - image: "mlrun/mlrun-api:${TAG:-1.5.1}" + image: "mlrun/mlrun-api:${TAG:-1.6.0}" ports: - "8080:8080" environment: @@ -61,7 +61,7 @@ services: - mlrun mlrun-ui: - image: "mlrun/mlrun-ui:${TAG:-1.5.1}" + image: "mlrun/mlrun-ui:${TAG:-1.6.0}" ports: - "8060:8090" environment: diff --git a/docs/projects/ci-integration.md b/docs/projects/ci-integration.md index 9aa224d108a..91498d2d64b 100644 --- a/docs/projects/ci-integration.md +++ b/docs/projects/ci-integration.md @@ -122,7 +122,7 @@ pipeline { } agent { docker { - image 'mlrun/mlrun:1.5.1' + image 'mlrun/mlrun:1.6.0' } } steps { diff --git a/docs/runtimes/images.md b/docs/runtimes/images.md index 3dc33f79840..fdeefaaf052 100644 --- a/docs/runtimes/images.md +++ b/docs/runtimes/images.md @@ -100,8 +100,8 @@ This flow describes how to build the image externally, put it your private repo, ## MLRun images and external docker images There is no difference in the usage between the MLRun images and external docker images. However: -- MLRun images resolve auto tags: If you specify ```image="mlrun/mlrun"``` the API fills in the tag by the client version, e.g. changes it to `mlrun/mlrun:1.5.1`. So, if the client gets upgraded you'll automatically get a new image tag. -- Where the data node registry exists, MLRun Appends the registry prefix, so the image loads from the datanode registry. This pulls the image more quickly, and also supports air-gapped sites. When you specify an MLRun image, for example `mlrun/mlrun:1.5.1`, the actual image used is similar to `datanode-registry.iguazio-platform.app.vm/mlrun/mlrun:1.5.1`. +- MLRun images resolve auto tags: If you specify ```image="mlrun/mlrun"``` the API fills in the tag by the client version, e.g. changes it to `mlrun/mlrun:1.6.0`. So, if the client gets upgraded you'll automatically get a new image tag. +- Where the data node registry exists, MLRun Appends the registry prefix, so the image loads from the datanode registry. This pulls the image more quickly, and also supports air-gapped sites. When you specify an MLRun image, for example `mlrun/mlrun:1.6.0`, the actual image used is similar to `datanode-registry.iguazio-platform.app.vm/mlrun/mlrun:1.6.0`. These characteristics are great when you’re working in a POC or development environment. But MLRun typically upgrades packages as part of the image, and therefore the default MLRun images can break your product flow. @@ -110,5 +110,5 @@ These characteristics are great when you’re working in a POC or development en For production, **create your own images** to ensure that the image is fixed. ``` -- Pin the image tag, e.g. `image="mlrun/mlrun:1.5.1"`. This maintains the image tag at the version you specified, even when the client is upgraded. Otherwise, an upgrade of the client would also upgrade the image. (If you specify an external (not MLRun images) docker image, like python, the result is the docker/k8s default behavior, which defaults to `latest` when the tag is not provided.) +- Pin the image tag, e.g. `image="mlrun/mlrun:1.6.0"`. This maintains the image tag at the version you specified, even when the client is upgraded. Otherwise, an upgrade of the client would also upgrade the image. (If you specify an external (not MLRun images) docker image, like python, the result is the docker/k8s default behavior, which defaults to `latest` when the tag is not provided.) - Pin the versions of requirements, again to avoid breakages, e.g. `pandas==1.4.0`. (If you only specify the package name, e.g. pandas, then pip/conda (python's package managers) just pick up the latest version.) diff --git a/hack/local/README.md b/hack/local/README.md index a6373bddfd8..876d0413bc7 100644 --- a/hack/local/README.md +++ b/hack/local/README.md @@ -28,12 +28,12 @@ To use MLRun with your local Docker registry, run the MLRun API service, dashboa ``` SHARED_DIR=~/mlrun-data -docker pull mlrun/jupyter:1.5.1 -docker pull mlrun/mlrun-ui:1.5.1 +docker pull mlrun/jupyter:1.6.0 +docker pull mlrun/mlrun-ui:1.6.0 docker network create mlrun-network -docker run -it -p 8080:8080 -p 8888:8888 --rm -d --network mlrun-network --name jupyter -v ${SHARED_DIR}:/home/jovyan/data mlrun/jupyter:1.5.1 -docker run -it -p 4000:80 --rm -d --network mlrun-network --name mlrun-ui -e MLRUN_API_PROXY_URL=http://jupyter:8080 mlrun/mlrun-ui:1.5.1 +docker run -it -p 8080:8080 -p 8888:8888 --rm -d --network mlrun-network --name jupyter -v ${SHARED_DIR}:/home/jovyan/data mlrun/jupyter:1.6.0 +docker run -it -p 4000:80 --rm -d --network mlrun-network --name mlrun-ui -e MLRUN_API_PROXY_URL=http://jupyter:8080 mlrun/mlrun-ui:1.6.0 ``` When the execution completes — diff --git a/hack/local/mljupy.yaml b/hack/local/mljupy.yaml index a9baea454f6..ae1c53adb8a 100644 --- a/hack/local/mljupy.yaml +++ b/hack/local/mljupy.yaml @@ -77,7 +77,7 @@ spec: spec: containers: - name: jupyter-notebook - image: mlrun/jupyter:1.5.1 + image: mlrun/jupyter:1.6.0 env: - name: MLRUN_NAMESPACE valueFrom: diff --git a/hack/local/mlrun-local.yaml b/hack/local/mlrun-local.yaml index 18b2bd429a7..71126e0de1f 100644 --- a/hack/local/mlrun-local.yaml +++ b/hack/local/mlrun-local.yaml @@ -31,7 +31,7 @@ spec: spec: containers: - name: mlrun-api - image: mlrun/mlrun-api:1.5.1 + image: mlrun/mlrun-api:1.6.0 env: - name: MLRUN_NAMESPACE valueFrom: @@ -86,7 +86,7 @@ spec: spec: containers: - name: mlrun-ui - image: mlrun/mlrun-ui:1.5.1 + image: mlrun/mlrun-ui:1.6.0 env: - name: MLRUN_API_PROXY_URL value: http://mlrun-api:8080 diff --git a/hack/mlrun-all.yaml b/hack/mlrun-all.yaml index b2b1021a4e0..39544d42853 100644 --- a/hack/mlrun-all.yaml +++ b/hack/mlrun-all.yaml @@ -31,7 +31,7 @@ spec: spec: containers: - name: mlrun-api - image: mlrun/mlrun-api:1.5.1 + image: mlrun/mlrun-api:1.6.0 env: - name: MLRUN_NAMESPACE valueFrom: @@ -91,7 +91,7 @@ spec: spec: containers: - name: mlrun-ui - image: mlrun/mlrun-ui:1.5.1 + image: mlrun/mlrun-ui:1.6.0 env: - name: MLRUN_API_PROXY_URL value: http://mlrun-api:8080 diff --git a/hack/mlrunapi.yaml b/hack/mlrunapi.yaml index dd19d1b36f0..904a3147fba 100644 --- a/hack/mlrunapi.yaml +++ b/hack/mlrunapi.yaml @@ -31,7 +31,7 @@ spec: spec: containers: - name: mlrun-api - image: mlrun/mlrun-api:1.5.1 + image: mlrun/mlrun-api:1.6.0 env: - name: MLRUN_HTTPDB__BUILDER__DOCKER_REGISTRY value: "default registry url e.g. index.docker.io/, if repository is not set it will default to mlrun" diff --git a/hack/mlrunui.yaml b/hack/mlrunui.yaml index c6c9079cb9d..2ebef244e15 100644 --- a/hack/mlrunui.yaml +++ b/hack/mlrunui.yaml @@ -30,7 +30,7 @@ spec: spec: containers: - name: mlrun-ui - image: mlrun/mlrun-ui:1.5.1 + image: mlrun/mlrun-ui:1.6.0 env: - name: MLRUN_API_PROXY_URL value: http://mlrun-api:8080 From 9c3bb2ad5ad8e553e3d44de8fbf12038d3e38e15 Mon Sep 17 00:00:00 2001 From: Liran BG Date: Sun, 25 Feb 2024 12:21:25 +0200 Subject: [PATCH 051/119] [Version] Bump 1.6.x to 1.6.1 (#5193) --- automation/version/unstable_version_prefix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/automation/version/unstable_version_prefix b/automation/version/unstable_version_prefix index dc1e644a101..9c6d6293b1a 100644 --- a/automation/version/unstable_version_prefix +++ b/automation/version/unstable_version_prefix @@ -1 +1 @@ -1.6.0 +1.6.1 From d43a3022a1afd7e52a5e3c855c3dade12e543c72 Mon Sep 17 00:00:00 2001 From: TomerShor <90552140+TomerShor@users.noreply.github.com> Date: Mon, 26 Feb 2024 13:39:00 +0200 Subject: [PATCH 052/119] [Artifacts] Persist db key when migrating to artifact v2 table [1.6.x] (#5200) --- server/api/initial_data.py | 12 ++- tests/api/db/test_artifacts.py | 129 +++++++++++++++++++-------------- 2 files changed, 83 insertions(+), 58 deletions(-) diff --git a/server/api/initial_data.py b/server/api/initial_data.py index 6fa7baffacb..e3cf70f65d5 100644 --- a/server/api/initial_data.py +++ b/server/api/initial_data.py @@ -571,14 +571,18 @@ def _migrate_artifacts_batch( # project - copy as is new_artifact.project = artifact_metadata.get("project", None) - # key - the artifact's key, without iteration if it is attached to it - key = artifact_metadata.get("key", "") - new_artifact.key = key - # iteration - the artifact's iteration iteration = artifact_metadata.get("iter", None) new_artifact.iteration = int(iteration) if iteration else 0 + # key - retain the db key to ensure BC of reading artifacts by the index key. + # if iteration is concatenated to the key, remove it as this was only handled in the backend, + # and now the iteration is saved in a separate column + key = artifact.key + if iteration and key.startswith(f"{iteration}-"): + key = key[len(f"{iteration}-") :] + new_artifact.key = key + # best iteration # if iteration == 0 it means it is from a single run since link artifacts were already # handled above - so we can set is as best iteration. diff --git a/tests/api/db/test_artifacts.py b/tests/api/db/test_artifacts.py index cfb154af625..2434142579c 100644 --- a/tests/api/db/test_artifacts.py +++ b/tests/api/db/test_artifacts.py @@ -1075,16 +1075,7 @@ def test_migrate_artifacts_to_v2(self, db: DBInterface, db_session: Session): artifact_tag = "artifact-tag-1" project = "project1" - # create project - db.create_project( - db_session, - mlrun.common.schemas.Project( - metadata=mlrun.common.schemas.ProjectMetadata( - name=project, - ), - spec=mlrun.common.schemas.ProjectSpec(description="some-description"), - ), - ) + self._create_project(db, db_session, project) # create an artifact in the old format artifact_key_1 = "artifact1" @@ -1137,14 +1128,7 @@ def test_migrate_artifacts_to_v2(self, db: DBInterface, db_session: Session): tag=legacy_artifact_tag, ) - with tempfile.TemporaryDirectory() as temp_dir: - # change the state file path to the temp directory for the test only - mlrun.config.config.artifacts.artifact_migration_state_file_path = ( - temp_dir + "/_artifact_migration_state.json" - ) - - # perform the migration - server.api.initial_data._migrate_artifacts_table_v2(db, db_session) + self._run_artifacts_v2_migration(db, db_session) # validate the migration succeeded query_all = db._query( @@ -1229,17 +1213,7 @@ def test_migrate_many_artifacts_to_v2(self, db: DBInterface, db_session: Session # create 10 artifacts in 10 projects for i in range(10): project_name = f"project-{i}" - db.create_project( - db_session, - mlrun.common.schemas.Project( - metadata=mlrun.common.schemas.ProjectMetadata( - name=project_name, - ), - spec=mlrun.common.schemas.ProjectSpec( - description="some-description" - ), - ), - ) + self._create_project(db, db_session, project_name) for j in range(10): artifact_key = f"artifact-{j}" artifact_uid = f"uid-{j}" @@ -1265,14 +1239,7 @@ def test_migrate_many_artifacts_to_v2(self, db: DBInterface, db_session: Session ).all() assert len(old_artifacts) == 100 - with tempfile.TemporaryDirectory() as temp_dir: - # change the state file path to the temp directory for the test only - mlrun.config.config.artifacts.artifact_migration_state_file_path = ( - temp_dir + "/_artifact_migration_state.json" - ) - - # perform the migration - server.api.initial_data._migrate_artifacts_table_v2(db, db_session) + self._run_artifacts_v2_migration(db, db_session) # validate the migration succeeded old_artifacts = db._query( @@ -1307,15 +1274,7 @@ def test_migrate_artifact_v2_tag(self, db: DBInterface, db_session: Session): project = "project1" # create project - db.create_project( - db_session, - mlrun.common.schemas.Project( - metadata=mlrun.common.schemas.ProjectMetadata( - name=project, - ), - spec=mlrun.common.schemas.ProjectSpec(description="some-description"), - ), - ) + self._create_project(db, db_session, project) # create an artifact in the old format artifact_body = self._generate_artifact(artifact_key, artifact_uid, "artifact") @@ -1338,14 +1297,7 @@ def test_migrate_artifact_v2_tag(self, db: DBInterface, db_session: Session): old_artifacts = query_all.all() assert len(old_artifacts) == 1 - with tempfile.TemporaryDirectory() as temp_dir: - # change the state file path to the temp directory for the test only - mlrun.config.config.artifacts.artifact_migration_state_file_path = ( - temp_dir + "/_artifact_migration_state.json" - ) - - # perform the migration - server.api.initial_data._migrate_artifacts_table_v2(db, db_session) + self._run_artifacts_v2_migration(db, db_session) # validate the migration succeeded query_all = db._query( @@ -1370,6 +1322,54 @@ def test_migrate_artifact_v2_tag(self, db: DBInterface, db_session: Session): assert artifacts[0]["metadata"]["project"] == project assert artifacts[0]["metadata"]["uid"] != artifact_uid + def test_migrate_artifact_v2_persist_db_key_with_iteration( + self, db: DBInterface, db_session: Session + ): + artifact_key = "artifact" + artifact_tree = "some-tree" + artifact_tag = "artifact-tag-1" + project = "project1" + db_key = "db-key-1" + iteration = 2 + + # create project + self._create_project(db, db_session, project) + + # create artifacts in the old format + artifact_body = self._generate_artifact(artifact_key, artifact_tree, "artifact") + artifact_body["metadata"]["key"] = artifact_key + artifact_body["metadata"]["iter"] = iteration + artifact_body["metadata"]["project"] = project + artifact_body["spec"]["db_key"] = db_key + + # store the artifact with the db_key + db.store_artifact_v1( + db_session, + db_key, + artifact_body, + artifact_tree, + project=project, + tag=artifact_tag, + iter=iteration, + ) + + # validate the artifact was stored with the db_key + key = f"{iteration}-{db_key}" + artifact = db.read_artifact_v1(db_session, key, project=project) + assert artifact["metadata"]["key"] == artifact_key + + # migrate the artifacts to v2 + self._run_artifacts_v2_migration(db, db_session) + + # validate the migration succeeded and the db_key was persisted + query_all = db._query( + db_session, + server.api.db.sqldb.models.ArtifactV2, + ) + new_artifact = query_all.one() + assert new_artifact.key == db_key + assert new_artifact.iteration == iteration + def test_update_model_spec(self, db: DBInterface, db_session: Session): artifact_key = "model1" @@ -1471,3 +1471,24 @@ def _mark_best_iteration_artifact( project=project, producer_id=artifact_tree, ) + + @staticmethod + def _create_project(db: DBInterface, db_session: Session, project_name): + project = mlrun.common.schemas.Project( + metadata=mlrun.common.schemas.ProjectMetadata( + name=project_name, + ), + spec=mlrun.common.schemas.ProjectSpec(description="some-description"), + ) + db.create_project(db_session, project) + + @staticmethod + def _run_artifacts_v2_migration(db: DBInterface, db_session: Session): + with tempfile.TemporaryDirectory() as temp_dir: + # change the state file path to the temp directory for the test only + mlrun.config.config.artifacts.artifact_migration_state_file_path = ( + temp_dir + "/_artifact_migration_state.json" + ) + + # perform the migration + server.api.initial_data._migrate_artifacts_table_v2(db, db_session) From 5a17157324b07fafc81eeafd597f27a6659c98ea Mon Sep 17 00:00:00 2001 From: Jonathan Daniel <36337649+jond01@users.noreply.github.com> Date: Thu, 29 Feb 2024 14:10:00 +0200 Subject: [PATCH 053/119] [Tests] Skip 3 chronically failing model monitoring system tests [1.6.x] (#5213) --- tests/system/model_monitoring/test_model_monitoring.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/system/model_monitoring/test_model_monitoring.py b/tests/system/model_monitoring/test_model_monitoring.py index bbc8f5725cb..92f5ff61662 100644 --- a/tests/system/model_monitoring/test_model_monitoring.py +++ b/tests/system/model_monitoring/test_model_monitoring.py @@ -235,6 +235,7 @@ def random_labels(): ) +@pytest.mark.skip(reason="Chronically fails, see ML-5820") @TestMLRunSystem.skip_test_if_env_not_configured @pytest.mark.enterprise class TestBasicModelMonitoring(TestMLRunSystem): @@ -329,6 +330,7 @@ def _assert_model_endpoint_metrics(self): assert total > 0 +@pytest.mark.skip(reason="Chronically fails, see ML-5820") @TestMLRunSystem.skip_test_if_env_not_configured class TestModelMonitoringRegression(TestMLRunSystem): """Train, deploy and apply monitoring on a regression model""" @@ -485,6 +487,7 @@ def test_model_monitoring_with_regression(self): assert expected_uri == monitoring_feature_set.uri +@pytest.mark.skip(reason="Chronically fails, see ML-5820") @TestMLRunSystem.skip_test_if_env_not_configured @pytest.mark.enterprise class TestVotingModelMonitoring(TestMLRunSystem): From 9526318de56c0011460298b3eaed258b121e0fdc Mon Sep 17 00:00:00 2001 From: Liran BG Date: Thu, 29 Feb 2024 22:14:35 +0200 Subject: [PATCH 054/119] [Docs] Add V1.6.1 to change log [1.6.x] (#5216) --- docs/change-log/index.md | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/docs/change-log/index.md b/docs/change-log/index.md index a271f8c4059..1b4f9461aca 100644 --- a/docs/change-log/index.md +++ b/docs/change-log/index.md @@ -1,7 +1,7 @@ (change-log)= # Change log -- [v1.6.0](v1-6-0-22-february-2024) +- [v1.6.1](#v1-6-1-29-february-2024) | [v1.6.0](#v1-6-0-22-february-2024) - [v1.5.2](#v1-5-2-30-november-2023) | [v1.5.1](#v1-5-1-2-november-2023) | [v1.5.0](#v1-5-0-23-october-2023) - [v1.4.1](#v1-4-1-8-august-2023) | [v1.4.0](#v1-4-0-23-july-2023) - [v1.3.4](#v1-3-4-23-august-2023) | [v1.3.3](#v1-3-3-7-jun-2023) | [v1.3.2](#v1-3-2-4-jun-2023) | [v1.3.1](#v1-3-1-18-may-2023) | [v1.3.0](#v1-3-0-22-march-2023) @@ -13,6 +13,13 @@ - [Deprecations](#deprecations-and-removed-code) +## v1.6.1 (29 February 2024) + +### Closed issue +| ID |Description | +|----------|---------------------------------------------------------------------------| +|ML-5799|The artifact `db_key` is not overwritten after upgrade.| + ## v1.6.0 (22 February 2024) ### Data store @@ -24,7 +31,7 @@ ### Feature store | ID |Description | |---------|-----------------------------------------------------------------------------------------------------| -|ML-4622|Feature set and feature vector APIs are now class methods. See examples in {ref}`feature-sets`.| +|ML-4622|Feature set and feature vector APIs are now class methods. See examples in {ref}`feature-sets` and {ref}`create-use-feature-vectors`.| |ML-5109|You can set `min_replicas` and `max_replicas` for `KafkaSource`. See [Consumer function configuration](../serving/graph-ha-cfg.html#consumer-function-configuration).| ### Model monitoring @@ -876,6 +883,7 @@ with a drill-down to view the steps and their details. [Tech Preview] |ML-4956|A function created by SDK is initially in the "initialized" state in the UI and needs to be deployed before running it. | In **Edit**, press **Deploy** | v1.5.1 | |ML-5079|Cannot update git remote with `project.create_remote()`| NA | v1.5.1 | |ML-5204|The **Projects>Settings** does not validate label names. Errors are generated from the back end. |Use [Kubernetes limitations](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set). | v1.6.0 | +|ML-5732|When using an MLRun client previous to v1.6.0, the workflow step status might show completed when it is actually aborted.|Abort the job from the SDK instead of from the UI, or upgrade the client. |1.6.0| |ML-5776|Concurrent request to project deletion may fail thought first call would gracefully finish the flow, without experiencing any error. Other concurrent requests would not impact the project deletion flow.|NA| v1.6.0| @@ -913,12 +921,11 @@ with a drill-down to view the steps and their details. [Tech Preview] | Will be removed|Deprecated|API |Use instead | |---------------|------------|----------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------| -| v1.8.0 |v1.6.0 |HTTPDB: `last` parameter of `list_runs` | NA. Was not used.| +| v1.8.0 |v1.6.0 |HTTPDB: `last` parameter of `list_runs` | NA. Was not used.| | v1.8.0 |v1.6.0 |Feature store: `get_offline_features` |`FeatureVector.get_offline_features()`| | v1.8.0 |v1.6.0 |Feature store: `get_online_feature_service` |`FeatureVector.get_online_feature_service()`| | v1.8.0 |v1.6.0 |Feature store: `preview` |`FeatureSet.preview()`| | v1.8.0 |v1.6.0 |Feature store: `deploy_ingestion_service_v2` |`FeatureSet.deploy_ingestion_service()`| -| v1.8.0 |v1.6.0 |Feature store: `preview` |`FeatureSet.preview()`| | v1.8.0 |v1.6.0 |Feature store: `ingest` |`FeatureSet.ingest()`| | v1.8.0 |v1.6.0 |Artifacts: `uid` parameter of `store_artifact` | `tree` parameter of `store_artifact` (artifact uid is generated in the backend)| | v1.8.0 |v1.6.0 |Runtimes: `with_requirements` — `requirements` param as a requirements file |`requirements_file` param | From 548b5a1f1772df879dd8beeb27cdc20fb73222ea Mon Sep 17 00:00:00 2001 From: Liran BG Date: Thu, 29 Feb 2024 23:31:23 +0200 Subject: [PATCH 055/119] [MLRun] Finalizing docs for 1.6.1 (#5217) --- docs/cli.md | 6 +++--- docs/install/compose.with-jupyter.yaml | 4 ++-- docs/install/compose.yaml | 4 ++-- docs/projects/ci-integration.md | 2 +- docs/runtimes/images.md | 6 +++--- hack/local/README.md | 8 ++++---- hack/local/mljupy.yaml | 2 +- hack/local/mlrun-local.yaml | 4 ++-- hack/mlrun-all.yaml | 4 ++-- hack/mlrunapi.yaml | 2 +- hack/mlrunui.yaml | 2 +- 11 files changed, 22 insertions(+), 22 deletions(-) diff --git a/docs/cli.md b/docs/cli.md index 2fe162176f0..2d6b1648b0f 100644 --- a/docs/cli.md +++ b/docs/cli.md @@ -280,7 +280,7 @@ spec: image: .mlrun/func-default-remote-demo-ps-latest image_pull_policy: Always build: - base_image: mlrun/mlrun:1.6.0 + base_image: mlrun/mlrun:1.6.1 source: git://github.com/mlrun/mlrun ``` @@ -310,7 +310,7 @@ spec: image_pull_policy: Always build: commands: [] - base_image: mlrun/mlrun:1.6.0 + base_image: mlrun/mlrun:1.6.1 source: git://github.com/mlrun/ci-demo.git ``` @@ -338,7 +338,7 @@ spec: image_pull_policy: Always build: commands: [] - base_image: mlrun/mlrun:1.6.0 + base_image: mlrun/mlrun:1.6.1 ``` Next, run the following MLRun CLI command to build the function; replace the `<...>` placeholders to match your configuration: diff --git a/docs/install/compose.with-jupyter.yaml b/docs/install/compose.with-jupyter.yaml index 5c5fdcbafea..b83aed8a449 100644 --- a/docs/install/compose.with-jupyter.yaml +++ b/docs/install/compose.with-jupyter.yaml @@ -39,7 +39,7 @@ services: - nuclio-platform-config:/etc/nuclio/config jupyter: - image: "mlrun/jupyter:${TAG:-1.6.0}" + image: "mlrun/jupyter:${TAG:-1.6.1}" ports: - "8080:8080" - "8888:8888" @@ -61,7 +61,7 @@ services: - mlrun mlrun-ui: - image: "mlrun/mlrun-ui:${TAG:-1.6.0}" + image: "mlrun/mlrun-ui:${TAG:-1.6.1}" ports: - "8060:8090" environment: diff --git a/docs/install/compose.yaml b/docs/install/compose.yaml index 586fd4aa695..44afb70c4b5 100644 --- a/docs/install/compose.yaml +++ b/docs/install/compose.yaml @@ -39,7 +39,7 @@ services: - nuclio-platform-config:/etc/nuclio/config mlrun-api: - image: "mlrun/mlrun-api:${TAG:-1.6.0}" + image: "mlrun/mlrun-api:${TAG:-1.6.1}" ports: - "8080:8080" environment: @@ -61,7 +61,7 @@ services: - mlrun mlrun-ui: - image: "mlrun/mlrun-ui:${TAG:-1.6.0}" + image: "mlrun/mlrun-ui:${TAG:-1.6.1}" ports: - "8060:8090" environment: diff --git a/docs/projects/ci-integration.md b/docs/projects/ci-integration.md index 91498d2d64b..b128571d79f 100644 --- a/docs/projects/ci-integration.md +++ b/docs/projects/ci-integration.md @@ -122,7 +122,7 @@ pipeline { } agent { docker { - image 'mlrun/mlrun:1.6.0' + image 'mlrun/mlrun:1.6.1' } } steps { diff --git a/docs/runtimes/images.md b/docs/runtimes/images.md index fdeefaaf052..8986004dcf9 100644 --- a/docs/runtimes/images.md +++ b/docs/runtimes/images.md @@ -100,8 +100,8 @@ This flow describes how to build the image externally, put it your private repo, ## MLRun images and external docker images There is no difference in the usage between the MLRun images and external docker images. However: -- MLRun images resolve auto tags: If you specify ```image="mlrun/mlrun"``` the API fills in the tag by the client version, e.g. changes it to `mlrun/mlrun:1.6.0`. So, if the client gets upgraded you'll automatically get a new image tag. -- Where the data node registry exists, MLRun Appends the registry prefix, so the image loads from the datanode registry. This pulls the image more quickly, and also supports air-gapped sites. When you specify an MLRun image, for example `mlrun/mlrun:1.6.0`, the actual image used is similar to `datanode-registry.iguazio-platform.app.vm/mlrun/mlrun:1.6.0`. +- MLRun images resolve auto tags: If you specify ```image="mlrun/mlrun"``` the API fills in the tag by the client version, e.g. changes it to `mlrun/mlrun:1.6.1`. So, if the client gets upgraded you'll automatically get a new image tag. +- Where the data node registry exists, MLRun Appends the registry prefix, so the image loads from the datanode registry. This pulls the image more quickly, and also supports air-gapped sites. When you specify an MLRun image, for example `mlrun/mlrun:1.6.1`, the actual image used is similar to `datanode-registry.iguazio-platform.app.vm/mlrun/mlrun:1.6.1`. These characteristics are great when you’re working in a POC or development environment. But MLRun typically upgrades packages as part of the image, and therefore the default MLRun images can break your product flow. @@ -110,5 +110,5 @@ These characteristics are great when you’re working in a POC or development en For production, **create your own images** to ensure that the image is fixed. ``` -- Pin the image tag, e.g. `image="mlrun/mlrun:1.6.0"`. This maintains the image tag at the version you specified, even when the client is upgraded. Otherwise, an upgrade of the client would also upgrade the image. (If you specify an external (not MLRun images) docker image, like python, the result is the docker/k8s default behavior, which defaults to `latest` when the tag is not provided.) +- Pin the image tag, e.g. `image="mlrun/mlrun:1.6.1"`. This maintains the image tag at the version you specified, even when the client is upgraded. Otherwise, an upgrade of the client would also upgrade the image. (If you specify an external (not MLRun images) docker image, like python, the result is the docker/k8s default behavior, which defaults to `latest` when the tag is not provided.) - Pin the versions of requirements, again to avoid breakages, e.g. `pandas==1.4.0`. (If you only specify the package name, e.g. pandas, then pip/conda (python's package managers) just pick up the latest version.) diff --git a/hack/local/README.md b/hack/local/README.md index 876d0413bc7..a4d38936d44 100644 --- a/hack/local/README.md +++ b/hack/local/README.md @@ -28,12 +28,12 @@ To use MLRun with your local Docker registry, run the MLRun API service, dashboa ``` SHARED_DIR=~/mlrun-data -docker pull mlrun/jupyter:1.6.0 -docker pull mlrun/mlrun-ui:1.6.0 +docker pull mlrun/jupyter:1.6.1 +docker pull mlrun/mlrun-ui:1.6.1 docker network create mlrun-network -docker run -it -p 8080:8080 -p 8888:8888 --rm -d --network mlrun-network --name jupyter -v ${SHARED_DIR}:/home/jovyan/data mlrun/jupyter:1.6.0 -docker run -it -p 4000:80 --rm -d --network mlrun-network --name mlrun-ui -e MLRUN_API_PROXY_URL=http://jupyter:8080 mlrun/mlrun-ui:1.6.0 +docker run -it -p 8080:8080 -p 8888:8888 --rm -d --network mlrun-network --name jupyter -v ${SHARED_DIR}:/home/jovyan/data mlrun/jupyter:1.6.1 +docker run -it -p 4000:80 --rm -d --network mlrun-network --name mlrun-ui -e MLRUN_API_PROXY_URL=http://jupyter:8080 mlrun/mlrun-ui:1.6.1 ``` When the execution completes — diff --git a/hack/local/mljupy.yaml b/hack/local/mljupy.yaml index ae1c53adb8a..5bb48085078 100644 --- a/hack/local/mljupy.yaml +++ b/hack/local/mljupy.yaml @@ -77,7 +77,7 @@ spec: spec: containers: - name: jupyter-notebook - image: mlrun/jupyter:1.6.0 + image: mlrun/jupyter:1.6.1 env: - name: MLRUN_NAMESPACE valueFrom: diff --git a/hack/local/mlrun-local.yaml b/hack/local/mlrun-local.yaml index 71126e0de1f..69bf82ff9e4 100644 --- a/hack/local/mlrun-local.yaml +++ b/hack/local/mlrun-local.yaml @@ -31,7 +31,7 @@ spec: spec: containers: - name: mlrun-api - image: mlrun/mlrun-api:1.6.0 + image: mlrun/mlrun-api:1.6.1 env: - name: MLRUN_NAMESPACE valueFrom: @@ -86,7 +86,7 @@ spec: spec: containers: - name: mlrun-ui - image: mlrun/mlrun-ui:1.6.0 + image: mlrun/mlrun-ui:1.6.1 env: - name: MLRUN_API_PROXY_URL value: http://mlrun-api:8080 diff --git a/hack/mlrun-all.yaml b/hack/mlrun-all.yaml index 39544d42853..a5414424265 100644 --- a/hack/mlrun-all.yaml +++ b/hack/mlrun-all.yaml @@ -31,7 +31,7 @@ spec: spec: containers: - name: mlrun-api - image: mlrun/mlrun-api:1.6.0 + image: mlrun/mlrun-api:1.6.1 env: - name: MLRUN_NAMESPACE valueFrom: @@ -91,7 +91,7 @@ spec: spec: containers: - name: mlrun-ui - image: mlrun/mlrun-ui:1.6.0 + image: mlrun/mlrun-ui:1.6.1 env: - name: MLRUN_API_PROXY_URL value: http://mlrun-api:8080 diff --git a/hack/mlrunapi.yaml b/hack/mlrunapi.yaml index 904a3147fba..64adbdf3b81 100644 --- a/hack/mlrunapi.yaml +++ b/hack/mlrunapi.yaml @@ -31,7 +31,7 @@ spec: spec: containers: - name: mlrun-api - image: mlrun/mlrun-api:1.6.0 + image: mlrun/mlrun-api:1.6.1 env: - name: MLRUN_HTTPDB__BUILDER__DOCKER_REGISTRY value: "default registry url e.g. index.docker.io/, if repository is not set it will default to mlrun" diff --git a/hack/mlrunui.yaml b/hack/mlrunui.yaml index 2ebef244e15..f630cec533f 100644 --- a/hack/mlrunui.yaml +++ b/hack/mlrunui.yaml @@ -30,7 +30,7 @@ spec: spec: containers: - name: mlrun-ui - image: mlrun/mlrun-ui:1.6.0 + image: mlrun/mlrun-ui:1.6.1 env: - name: MLRUN_API_PROXY_URL value: http://mlrun-api:8080 From 80404fadf38217c0390793770297cb046629b439 Mon Sep 17 00:00:00 2001 From: Liran BG Date: Thu, 29 Feb 2024 23:36:57 +0200 Subject: [PATCH 056/119] [Version] Bump 1.6.x to 1.6.2 (#5218) --- automation/version/unstable_version_prefix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/automation/version/unstable_version_prefix b/automation/version/unstable_version_prefix index 9c6d6293b1a..fdd3be6df54 100644 --- a/automation/version/unstable_version_prefix +++ b/automation/version/unstable_version_prefix @@ -1 +1 @@ -1.6.1 +1.6.2 From d80c08bed8ea0b2d28d3866846696b62c6410803 Mon Sep 17 00:00:00 2001 From: Liran BG Date: Thu, 29 Feb 2024 23:39:05 +0200 Subject: [PATCH 057/119] [Security] Remedy vulnerabilities [1.6.x] (#5205) --- dockerfiles/jupyter/Dockerfile | 10 +++++---- dockerfiles/jupyter/requirements.txt | 2 +- dockerfiles/mlrun-api/requirements.txt | 2 +- .../api/framework/test_logging_middleware.py | 4 ++-- tests/api/api/framework/test_middlewares.py | 6 ++--- tests/api/api/test_datastore_profiles.py | 22 +++++++++---------- tests/api/api/test_functions.py | 22 ++++++++++++------- tests/api/api/test_projects.py | 4 ++-- tests/api/api/test_runs.py | 19 ++++++++-------- tests/api/api/test_submit.py | 8 +++---- 10 files changed, 53 insertions(+), 46 deletions(-) diff --git a/dockerfiles/jupyter/Dockerfile b/dockerfiles/jupyter/Dockerfile index 15715629866..494be177b77 100644 --- a/dockerfiles/jupyter/Dockerfile +++ b/dockerfiles/jupyter/Dockerfile @@ -22,11 +22,13 @@ RUN apt-get update && \ DEBIAN_FRONTEND=noninteractive apt-get -y upgrade && \ rm -rf /var/lib/apt/lists/* -RUN apt-get update && apt-get install --no-install-recommends -y \ - graphviz \ - && rm -rf /var/lib/apt/lists/* +RUN apt-get update \ + && apt-get install --no-install-recommends -y \ + graphviz \ + curl \ + apt-transport-https \ + && rm -rf /var/lib/apt/lists/* -RUN apt-get update && apt-get install -y curl apt-transport-https # Download and install kubectl RUN curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" && \ chmod +x kubectl && \ diff --git a/dockerfiles/jupyter/requirements.txt b/dockerfiles/jupyter/requirements.txt index 648a71c5f20..86db1a6ad67 100644 --- a/dockerfiles/jupyter/requirements.txt +++ b/dockerfiles/jupyter/requirements.txt @@ -17,7 +17,7 @@ wheel~=0.38 setuptools~=68.2 tornado~=6.3 requests~=2.31 -cryptography~=41.0 +cryptography~=42.0 certifi~=2023.7 mpmath~=1.3 oauthlib~=3.2 diff --git a/dockerfiles/mlrun-api/requirements.txt b/dockerfiles/mlrun-api/requirements.txt index 132efe7de37..d106d262350 100644 --- a/dockerfiles/mlrun-api/requirements.txt +++ b/dockerfiles/mlrun-api/requirements.txt @@ -5,7 +5,7 @@ sqlite3-to-mysql~=1.4 objgraph~=3.5 igz-mgmt~=0.0.10 humanfriendly~=9.2 -fastapi~=0.103.2 +fastapi~=0.110.0 # in sqlalchemy>=2.0 there is breaking changes (such as in Table class autoload argument is removed) sqlalchemy~=1.4 pymysql~=1.0 diff --git a/tests/api/api/framework/test_logging_middleware.py b/tests/api/api/framework/test_logging_middleware.py index 2c3b8356f49..347b06e3d97 100644 --- a/tests/api/api/framework/test_logging_middleware.py +++ b/tests/api/api/framework/test_logging_middleware.py @@ -136,8 +136,8 @@ def test_logging_middleware(db: Session, client: TestClient, stream_logger) -> N stream: io.StringIO has_logger_middleware = False for middleware in client.app.user_middleware: - if "logger" in middleware.options: - middleware.options["logger"] = logger_instance + if "logger" in middleware.kwargs: + middleware.kwargs["logger"] = logger_instance has_logger_middleware = True client.app.middleware_stack = client.app.build_middleware_stack() diff --git a/tests/api/api/framework/test_middlewares.py b/tests/api/api/framework/test_middlewares.py index dbc239fa738..87f585e1c66 100644 --- a/tests/api/api/framework/test_middlewares.py +++ b/tests/api/api/framework/test_middlewares.py @@ -47,7 +47,7 @@ def test_ui_clear_cache_middleware( ) -> None: for middleware in client.app.user_middleware: if "UiClearCacheMiddleware" in str(middleware.cls): - middleware.options["backend_version"] = backend_version + middleware.kwargs["backend_version"] = backend_version client.app.middleware_stack = client.app.build_middleware_stack() with unittest.mock.patch.object( @@ -78,8 +78,8 @@ def test_ensure_be_version_middleware( db: sqlalchemy.orm.Session, client: fastapi.testclient.TestClient ) -> None: for middleware in client.app.user_middleware: - if "backend_version" in middleware.options: - middleware.options["backend_version"] = "dummy-version" + if "backend_version" in middleware.kwargs: + middleware.kwargs["backend_version"] = "dummy-version" client.app.middleware_stack = client.app.build_middleware_stack() response = client.get("client-spec") assert ( diff --git a/tests/api/api/test_datastore_profiles.py b/tests/api/api/test_datastore_profiles.py index 6fcf4191b22..9f9e4431249 100644 --- a/tests/api/api/test_datastore_profiles.py +++ b/tests/api/api/test_datastore_profiles.py @@ -31,7 +31,7 @@ "private": None, } legacy_api_projects_path = "projects" -api_datastore_path = f"/api/v1/projects/{project}/datastore-profiles" +api_datastore_path = f"projects/{project}/datastore-profiles" def _create_project(client: TestClient, project_name: str = project): @@ -52,7 +52,7 @@ def test_datastore_profile_create_ok( _create_project(client) resp = client.put( api_datastore_path, - data=json.dumps(datastore), + json=datastore, ) assert resp.status_code == HTTPStatus.OK.value @@ -71,14 +71,14 @@ def test_datastore_profile_update_ok( _create_project(client) resp = client.put( api_datastore_path, - data=json.dumps(datastore), + json=datastore, ) assert resp.status_code == HTTPStatus.OK.value datastore_updated = datastore datastore_updated["object"] = "another version of body" resp = client.put( api_datastore_path, - data=json.dumps(datastore_updated), + json=datastore_updated, ) assert resp.status_code == HTTPStatus.OK.value @@ -97,7 +97,7 @@ def test_datastore_profile_create_fail( # No project created resp = client.put( api_datastore_path, - data=json.dumps(datastore), + json=datastore, ) assert resp.status_code == HTTPStatus.NOT_FOUND.value @@ -105,7 +105,7 @@ def test_datastore_profile_create_fail( _create_project(client) resp = client.put( api_datastore_path, - data={}, + json={}, ) assert resp.status_code == HTTPStatus.UNPROCESSABLE_ENTITY.value @@ -121,9 +121,9 @@ def test_datastore_profile_get_fail( # Not existing profile _create_project(client) - resp = client.put( + client.put( api_datastore_path, - data={}, + json={}, ) resp = client.get( api_datastore_path + "/invalid", @@ -161,7 +161,7 @@ def test_datastore_profile_delete( # Create the profile resp = client.put( api_datastore_path, - data=json.dumps(datastore), + json=datastore, ) assert resp.status_code == HTTPStatus.OK.value @@ -202,9 +202,9 @@ def test_datastore_profile_list( assert json.loads(resp._content) == [] # Create the profile - resp = client.put( + client.put( api_datastore_path, - data=json.dumps(datastore), + json=datastore, ) expected_return = [{"project": project, **datastore}] diff --git a/tests/api/api/test_functions.py b/tests/api/api/test_functions.py index 126c58eddb6..ff509b5d9c1 100644 --- a/tests/api/api/test_functions.py +++ b/tests/api/api/test_functions.py @@ -284,7 +284,7 @@ def test_redirection_from_worker_to_chief_only_if_serving_function_with_track_mo monkeypatch, ): mlrun.mlconf.httpdb.clusterization.role = "worker" - endpoint = f"{ORIGINAL_VERSIONED_API_PREFIX}/build/function" + endpoint = "/build/function" tests.api.api.utils.create_project(client, PROJECT) function_name = "test-function" @@ -316,7 +316,7 @@ def test_redirection_from_worker_to_chief_deploy_serving_function_with_track_mod db: sqlalchemy.orm.Session, client: fastapi.testclient.TestClient, httpserver ): mlrun.mlconf.httpdb.clusterization.role = "worker" - endpoint = f"{ORIGINAL_VERSIONED_API_PREFIX}/build/function" + endpoint = "/build/function" tests.api.api.utils.create_project(client, PROJECT) function_name = "test-function" @@ -344,9 +344,9 @@ def test_redirection_from_worker_to_chief_deploy_serving_function_with_track_mod expected_response = test_case.get("expected_body") body = test_case.get("body") - httpserver.expect_ordered_request(endpoint, method="POST").respond_with_json( - expected_response, status=expected_status - ) + httpserver.expect_ordered_request( + f"{ORIGINAL_VERSIONED_API_PREFIX}{endpoint}", method="POST" + ).respond_with_json(expected_response, status=expected_status) url = httpserver.url_for("") mlrun.mlconf.httpdb.clusterization.chief.url = url response = client.post(endpoint, data=body) @@ -397,7 +397,7 @@ def test_tracking_on_serving( ) # Adjust the required request endpoint and body - endpoint = f"{ORIGINAL_VERSIONED_API_PREFIX}/build/function" + endpoint = "build/function" json_body = _generate_build_function_request(function) response = client.post(endpoint, data=json_body) @@ -846,15 +846,21 @@ def failing_func(): def _generate_function( - function_name: str, project: str = PROJECT, function_tag: str = "latest" + function_name: str, + project: str = PROJECT, + function_tag: str = "latest", + track_models: bool = False, ): - return mlrun.new_function( + fn = mlrun.new_function( name=function_name, project=project, tag=function_tag, kind="serving", image="mlrun/mlrun", ) + if track_models: + fn.set_tracking() + return fn def _generate_build_function_request( diff --git a/tests/api/api/test_projects.py b/tests/api/api/test_projects.py index 205868606db..cfe75d25ed0 100644 --- a/tests/api/api/test_projects.py +++ b/tests/api/api/test_projects.py @@ -90,7 +90,7 @@ def test_redirection_from_worker_to_chief_delete_project( response = client.post("projects", json=project.dict()) assert response.status_code == HTTPStatus.CREATED.value - endpoint = f"{ORIGINAL_VERSIONED_API_PREFIX}/projects/{project_name}" + endpoint = f"projects/{project_name}" for strategy in mlrun.common.schemas.DeletionStrategy: headers = {"x-mlrun-deletion-strategy": strategy.value} for test_case in [ @@ -122,7 +122,7 @@ def test_redirection_from_worker_to_chief_delete_project( expected_response = test_case.get("expected_body") httpserver.expect_ordered_request( - endpoint, method="DELETE" + f"{ORIGINAL_VERSIONED_API_PREFIX}/{endpoint}", method="DELETE" ).respond_with_json(expected_response, status=expected_status) url = httpserver.url_for("") mlrun.mlconf.httpdb.clusterization.chief.url = url diff --git a/tests/api/api/test_runs.py b/tests/api/api/test_runs.py index 6c0f67b21ad..ce08ed4900e 100644 --- a/tests/api/api/test_runs.py +++ b/tests/api/api/test_runs.py @@ -34,8 +34,7 @@ from server.api.db.sqldb.models import Run from server.api.utils.singletons.db import get_db -API_V1 = "/api/v1" -RUNS_API_V1 = f"{API_V1}/projects/{{project}}/runs" +RUNS_API_ENDPOINT = "/projects/{project}/runs" def test_run_with_nan_in_body(db: Session, client: TestClient) -> None: @@ -472,12 +471,12 @@ def test_list_runs_partition_by(db: Session, client: TestClient) -> None: # Some negative testing - no sort by field response = client.get( - f"{RUNS_API_V1.format(project=projects[0])}?partition-by=name" + f"{RUNS_API_ENDPOINT.format(project=projects[0])}?partition-by=name" ) assert response.status_code == HTTPStatus.BAD_REQUEST.value # An invalid partition-by field - will be failed by fastapi due to schema validation. response = client.get( - f"{RUNS_API_V1.format(project=projects[0])}?partition-by=key&partition-sort-by=name" + f"{RUNS_API_ENDPOINT.format(project=projects[0])}?partition-by=key&partition-sort-by=name" ) assert response.status_code == HTTPStatus.UNPROCESSABLE_ENTITY.value @@ -535,7 +534,7 @@ def test_delete_runs_with_permissions(db: Session, client: TestClient): _store_run(db, uid="some-uid", project=project) runs = server.api.crud.Runs().list_runs(db, project=project) assert len(runs) == 1 - response = client.delete(RUNS_API_V1.format(project="*")) + response = client.delete(RUNS_API_ENDPOINT.format(project="*")) assert response.status_code == HTTPStatus.OK.value runs = server.api.crud.Runs().list_runs(db, project=project) assert len(runs) == 0 @@ -546,7 +545,7 @@ def test_delete_runs_with_permissions(db: Session, client: TestClient): _store_run(db, uid=None, project=second_project) all_runs = server.api.crud.Runs().list_runs(db, project="*") assert len(all_runs) == 2 - response = client.delete(RUNS_API_V1.format(project="*")) + response = client.delete(RUNS_API_ENDPOINT.format(project="*")) assert response.status_code == HTTPStatus.OK.value runs = server.api.crud.Runs().list_runs(db, project="*") assert len(runs) == 0 @@ -561,20 +560,20 @@ def test_delete_runs_without_permissions(db: Session, client: TestClient): project = "some-project" runs = server.api.crud.Runs().list_runs(db, project=project) assert len(runs) == 0 - response = client.delete(RUNS_API_V1.format(project=project)) + response = client.delete(RUNS_API_ENDPOINT.format(project=project)) assert response.status_code == HTTPStatus.UNAUTHORIZED.value # try delete runs with no permission to project (project contains runs) _store_run(db, uid="some-uid", project=project) runs = server.api.crud.Runs().list_runs(db, project=project) assert len(runs) == 1 - response = client.delete(RUNS_API_V1.format(project=project)) + response = client.delete(RUNS_API_ENDPOINT.format(project=project)) assert response.status_code == HTTPStatus.UNAUTHORIZED.value runs = server.api.crud.Runs().list_runs(db, project=project) assert len(runs) == 1 # try delete runs from all projects with no permissions - response = client.delete(RUNS_API_V1.format(project="*")) + response = client.delete(RUNS_API_ENDPOINT.format(project="*")) assert response.status_code == HTTPStatus.UNAUTHORIZED.value runs = server.api.crud.Runs().list_runs(db, project=project) assert len(runs) == 1 @@ -873,7 +872,7 @@ def _store_run(db, uid, project="some-project"): def _list_and_assert_objects( client: TestClient, params, expected_number_of_runs: int, project: str ): - response = client.get(RUNS_API_V1.format(project=project), params=params) + response = client.get(RUNS_API_ENDPOINT.format(project=project), params=params) assert response.status_code == HTTPStatus.OK.value, response.text runs = response.json()["runs"] diff --git a/tests/api/api/test_submit.py b/tests/api/api/test_submit.py index 90b8c3c490b..dfa64ed58ab 100644 --- a/tests/api/api/test_submit.py +++ b/tests/api/api/test_submit.py @@ -477,7 +477,7 @@ def test_redirection_from_worker_to_chief_submit_job_with_schedule( db: sqlalchemy.orm.Session, client: fastapi.testclient.TestClient, httpserver ): mlrun.mlconf.httpdb.clusterization.role = "worker" - endpoint = f"{ORIGINAL_VERSIONED_API_PREFIX}/submit_job" + endpoint = "/submit_job" project = "test-project" function_name = "test-function" @@ -515,9 +515,9 @@ def test_redirection_from_worker_to_chief_submit_job_with_schedule( expected_response = test_case.get("expected_body") body = test_case.get("body") - httpserver.expect_ordered_request(endpoint, method="POST").respond_with_json( - expected_response, status=expected_status - ) + httpserver.expect_ordered_request( + f"{ORIGINAL_VERSIONED_API_PREFIX}{endpoint}", method="POST" + ).respond_with_json(expected_response, status=expected_status) url = httpserver.url_for("") mlrun.mlconf.httpdb.clusterization.chief.url = url json_body = mlrun.utils.dict_to_json(body) From e3bf772e65cf24a309524e9de20afc979c90d2bb Mon Sep 17 00:00:00 2001 From: Liran BG Date: Thu, 29 Feb 2024 23:39:15 +0200 Subject: [PATCH 058/119] [Security] Static analysis issues remediation [1.6.x] (#5204) --- automation/patch_igz/patch_remote.py | 5 +- mlrun/config.py | 6 ++ mlrun/db/httpdb.py | 31 ++++---- mlrun/kfpops.py | 23 ++++-- mlrun/package/utils/_archiver.py | 4 +- mlrun/platforms/iguazio.py | 71 ++----------------- mlrun/projects/project.py | 30 ++++---- mlrun/serving/remote.py | 6 +- mlrun/utils/helpers.py | 8 +++ server/api/utils/clients/iguazio.py | 5 +- server/api/utils/clients/nuclio.py | 8 ++- tests/platforms/test_iguazio.py | 39 ---------- tests/projects/test_project.py | 2 +- .../feature_store/test_feature_store.py | 5 +- tests/test_kfp.py | 2 +- tests/utils/test_helpers.py | 14 ++++ 16 files changed, 108 insertions(+), 151 deletions(-) diff --git a/automation/patch_igz/patch_remote.py b/automation/patch_igz/patch_remote.py index 710130e24dd..2e1edb31ccc 100755 --- a/automation/patch_igz/patch_remote.py +++ b/automation/patch_igz/patch_remote.py @@ -18,6 +18,7 @@ import json import logging import os +import shlex import subprocess from typing import List @@ -460,10 +461,8 @@ def _exec_local(self, cmd: List[str], live=False) -> str: return output def _exec_remote(self, cmd: List[str], live=False) -> str: - cmd_str = " ".join(cmd) - + cmd_str = shlex.join(cmd) logger.debug("Exec remote: %s", cmd_str) - stdin_stream, stdout_stream, stderr_stream = self._ssh_client.exec_command( cmd_str ) diff --git a/mlrun/config.py b/mlrun/config.py index f03311eb662..a5a01119cab 100644 --- a/mlrun/config.py +++ b/mlrun/config.py @@ -288,6 +288,12 @@ "state": "online", "retry_api_call_on_exception": "enabled", "http_connection_timeout_keep_alive": 11, + # http client used by httpdb + "http": { + # when True, the client will verify the server's TLS + # set to False for backwards compatibility. + "verify": False, + }, "db": { "commit_retry_timeout": 30, "commit_retry_interval": 3, diff --git a/mlrun/db/httpdb.py b/mlrun/db/httpdb.py index b76585ed7c4..28182a131ea 100644 --- a/mlrun/db/httpdb.py +++ b/mlrun/db/httpdb.py @@ -152,7 +152,7 @@ def __repr__(self): @staticmethod def get_api_path_prefix(version: str = None) -> str: """ - :param version: API version to use, None (the default) will mean to use the default value from mlconf, + :param version: API version to use, None (the default) will mean to use the default value from mlrun.config, for un-versioned api set an empty string. """ if version is not None: @@ -250,7 +250,11 @@ def api_call( try: response = self.session.request( - method, url, timeout=timeout, verify=False, **kw + method, + url, + timeout=timeout, + verify=config.httpdb.http.verify, + **kw, ) except requests.RequestException as exc: error = f"{err_to_str(exc)}: {error}" if error else err_to_str(exc) @@ -302,11 +306,11 @@ def _is_retry_on_post_allowed(self, method, path: str): def connect(self, secrets=None): """Connect to the MLRun API server. Must be called prior to executing any other method. - The code utilizes the URL for the API server from the configuration - ``mlconf.dbpath``. + The code utilizes the URL for the API server from the configuration - ``config.dbpath``. For example:: - mlconf.dbpath = mlconf.dbpath or 'http://mlrun-api:8080' + config.dbpath = config.dbpath or 'http://mlrun-api:8080' db = get_run_db().connect() """ # hack to allow unit tests to instantiate HTTPRunDB without a real server behind @@ -500,7 +504,7 @@ def get_log(self, uid, project="", offset=0, size=None): if offset < 0: raise MLRunInvalidArgumentError("Offset cannot be negative") if size is None: - size = int(mlrun.mlconf.httpdb.logs.pull_logs_default_size_limit) + size = int(config.httpdb.logs.pull_logs_default_size_limit) elif size == -1: logger.warning( "Retrieving all logs. This may be inefficient and can result in a large log." @@ -546,25 +550,23 @@ def watch_log(self, uid, project="", watch=True, offset=0): state, text = self.get_log(uid, project, offset=offset) if text: - print(text.decode(errors=mlrun.mlconf.httpdb.logs.decode.errors)) + print(text.decode(errors=config.httpdb.logs.decode.errors)) nil_resp = 0 while True: offset += len(text) # if we get 3 nil responses in a row, increase the sleep time to 10 seconds # TODO: refactor this to use a conditional backoff mechanism if nil_resp < 3: - time.sleep(int(mlrun.mlconf.httpdb.logs.pull_logs_default_interval)) + time.sleep(int(config.httpdb.logs.pull_logs_default_interval)) else: time.sleep( - int( - mlrun.mlconf.httpdb.logs.pull_logs_backoff_no_logs_default_interval - ) + int(config.httpdb.logs.pull_logs_backoff_no_logs_default_interval) ) state, text = self.get_log(uid, project, offset=offset) if text: nil_resp = 0 print( - text.decode(errors=mlrun.mlconf.httpdb.logs.decode.errors), + text.decode(errors=config.httpdb.logs.decode.errors), end="", ) else: @@ -1173,7 +1175,8 @@ def delete_runtime_resources( :param force: Force deletion - delete the runtime resource even if it's not in terminal state or if the grace period didn't pass. :param grace_period: Grace period given to the runtime resource before they are actually removed, counted from - the moment they moved to terminal state (defaults to mlrun.mlconf.runtime_resources_deletion_grace_period). + the moment they moved to terminal state + (defaults to mlrun.config.config.runtime_resources_deletion_grace_period). :returns: :py:class:`~mlrun.common.schemas.GroupedByProjectRuntimeResourcesOutput` listing the runtime resources that were removed. @@ -1340,7 +1343,7 @@ def remote_builder( logger.warning( "Building a function image to ECR and loading an S3 source to the image may require conflicting access " "keys. Only the permissions granted to the platform's configured secret will take affect " - "(see mlrun.mlconf.httpdb.builder.docker_registry_secret). " + "(see mlrun.config.config.httpdb.builder.docker_registry_secret). " "In case the permissions are limited to ECR scope, you may use pull_at_runtime=True instead", source=func.spec.build.source, load_source_on_run=func.spec.build.load_source_on_run, @@ -1495,7 +1498,7 @@ def list_project_background_tasks( Retrieve updated information on project background tasks being executed. If no filter is provided, will return background tasks from the last week. - :param project: Project name (defaults to mlrun.mlconf.default_project). + :param project: Project name (defaults to mlrun.config.config.default_project). :param state: List only background tasks whose state is specified. :param created_from: Filter by background task created time in ``[created_from, created_to]``. :param created_to: Filter by background task created time in ``[created_from, created_to]``. diff --git a/mlrun/kfpops.py b/mlrun/kfpops.py index 31fd9830e31..7b81f147693 100644 --- a/mlrun/kfpops.py +++ b/mlrun/kfpops.py @@ -41,8 +41,8 @@ # default KFP artifacts and output (ui metadata, metrics etc.) # directories to /tmp to allow running with security context -KFPMETA_DIR = os.environ.get("KFPMETA_OUT_DIR", "/tmp") -KFP_ARTIFACTS_DIR = os.environ.get("KFP_ARTIFACTS_DIR", "/tmp") +KFPMETA_DIR = "/tmp" +KFP_ARTIFACTS_DIR = "/tmp" project_annotation = "mlrun/project" run_annotation = "mlrun/pipeline-step-type" @@ -71,7 +71,7 @@ def write_kfpmeta(struct): {"name": k, "numberValue": v} for k, v in results.items() if is_num(v) ], } - with open(KFPMETA_DIR + "/mlpipeline-metrics.json", "w") as f: + with open(os.path.join(KFPMETA_DIR, "mlpipeline-metrics.json"), "w") as f: json.dump(metrics, f) struct = deepcopy(struct) @@ -91,7 +91,14 @@ def write_kfpmeta(struct): elif key in results: val = results[key] try: - path = "/".join([KFP_ARTIFACTS_DIR, key]) + # NOTE: if key has "../x", it would fail on path traversal + path = os.path.join(KFP_ARTIFACTS_DIR, key) + if not mlrun.utils.helpers.is_safe_path(KFP_ARTIFACTS_DIR, path): + logger.warning( + "Path traversal is not allowed ignoring", path=path, key=key + ) + continue + path = os.path.abspath(path) logger.info("Writing artifact output", path=path, val=val) with open(path, "w") as fp: fp.write(str(val)) @@ -109,7 +116,7 @@ def write_kfpmeta(struct): "outputs": output_artifacts + [{"type": "markdown", "storage": "inline", "source": text}] } - with open(KFPMETA_DIR + "/mlpipeline-ui-metadata.json", "w") as f: + with open(os.path.join(KFPMETA_DIR, "mlpipeline-ui-metadata.json"), "w") as f: json.dump(metadata, f) @@ -450,8 +457,10 @@ def mlrun_pipeline( command=cmd + [command], file_outputs=file_outputs, output_artifact_paths={ - "mlpipeline-ui-metadata": KFPMETA_DIR + "/mlpipeline-ui-metadata.json", - "mlpipeline-metrics": KFPMETA_DIR + "/mlpipeline-metrics.json", + "mlpipeline-ui-metadata": os.path.join( + KFPMETA_DIR, "mlpipeline-ui-metadata.json" + ), + "mlpipeline-metrics": os.path.join(KFPMETA_DIR, "mlpipeline-metrics.json"), }, ) cop = add_default_function_resources(cop) diff --git a/mlrun/package/utils/_archiver.py b/mlrun/package/utils/_archiver.py index 6b03ca62ee6..371b4ec4b09 100644 --- a/mlrun/package/utils/_archiver.py +++ b/mlrun/package/utils/_archiver.py @@ -179,7 +179,9 @@ def extract_archive(cls, archive_path: str, output_path: str) -> str: # Extract: with tarfile.open(archive_path, f"r:{cls._MODE_STRING}") as tar_file: - tar_file.extractall(directory_path) + # use 'data' to ensure no security risks are imposed by the archive files + # see: https://docs.python.org/3/library/tarfile.html#tarfile.TarFile.extractall + tar_file.extractall(directory_path, filter="data") return str(directory_path) diff --git a/mlrun/platforms/iguazio.py b/mlrun/platforms/iguazio.py index b4344427fe1..ac7116c8e48 100644 --- a/mlrun/platforms/iguazio.py +++ b/mlrun/platforms/iguazio.py @@ -16,19 +16,15 @@ import os import urllib from collections import namedtuple -from datetime import datetime -from http import HTTPStatus from urllib.parse import urlparse import kfp.dsl import requests import semver -import urllib3 import v3io import mlrun.errors from mlrun.config import config as mlconf -from mlrun.errors import err_to_str from mlrun.utils import dict_to_json _cached_control_session = None @@ -488,25 +484,6 @@ def get_records(self): return response.output.records -def create_control_session(url, username, password): - # for systems without production cert - silence no cert verification WARN - urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) - if not username or not password: - raise ValueError("cannot create session key, missing username or password") - - session = requests.Session() - session.auth = (username, password) - try: - auth = session.post(f"{url}/api/sessions", verify=False) - except OSError as exc: - raise OSError(f"error: cannot connect to {url}: {err_to_str(exc)}") - - if not auth.ok: - raise OSError(f"failed to create session: {url}, {auth.text}") - - return auth.json()["data"]["id"] - - def is_iguazio_endpoint(endpoint_url: str) -> bool: # TODO: find a better heuristic return ".default-tenant." in endpoint_url @@ -533,21 +510,6 @@ def is_iguazio_session_cookie(session_cookie: str) -> bool: return False -def is_iguazio_system_2_10_or_above(dashboard_url): - # for systems without production cert - silence no cert verification WARN - urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) - response = requests.get(f"{dashboard_url}/api/external_versions", verify=False) - - if not response.ok: - if response.status_code == HTTPStatus.NOT_FOUND.value: - # in iguazio systems prior to 2.10 this endpoint didn't exist, so the api returns 404 cause endpoint not - # found - return False - response.raise_for_status() - - return True - - # we assign the control session or access key to the password since this is iguazio auth scheme # (requests should be sent with username:control_session/access_key as auth header) def add_or_refresh_credentials( @@ -577,33 +539,12 @@ def add_or_refresh_credentials( # (ideally if we could identify we're in enterprise we would have verify here that token and username have value) if not is_iguazio_endpoint(api_url): return "", "", token - iguazio_dashboard_url = "https://dashboard" + api_url[api_url.find(".") :] - - # in 2.8 mlrun api is protected with control session, from 2.10 it's protected with access key - is_access_key_auth = is_iguazio_system_2_10_or_above(iguazio_dashboard_url) - if is_access_key_auth: - if not username or not token: - raise ValueError( - "username and access key required to authenticate against iguazio system" - ) - return username, token, "" - - if not username or not password: - raise ValueError("username and password needed to create session") - - global _cached_control_session - now = datetime.now() - if _cached_control_session: - if ( - _cached_control_session[2] == username - and _cached_control_session[3] == password - and (now - _cached_control_session[1]).seconds < 20 * 60 * 60 - ): - return _cached_control_session[2], _cached_control_session[0], "" - - control_session = create_control_session(iguazio_dashboard_url, username, password) - _cached_control_session = (control_session, now, username, password) - return username, control_session, "" + + if not username or not token: + raise ValueError( + "username and access key required to authenticate against iguazio system" + ) + return username, token, "" def parse_path(url, suffix="/"): diff --git a/mlrun/projects/project.py b/mlrun/projects/project.py index 983ffff3a28..7ecf5ac5a12 100644 --- a/mlrun/projects/project.py +++ b/mlrun/projects/project.py @@ -24,7 +24,7 @@ import uuid import warnings import zipfile -from os import environ, makedirs, path, remove +from os import environ, makedirs, path from typing import Callable, Dict, List, Optional, Union import dotenv @@ -2775,7 +2775,7 @@ def save_to_db(self, store=True): def export(self, filepath=None, include_files: str = None): """save the project object into a yaml file or zip archive (default to project.yaml) - By default the project object is exported to a yaml file, when the filepath suffix is '.zip' + By default, the project object is exported to a yaml file, when the filepath suffix is '.zip' the project context dir (code files) are also copied into the zip, the archive path can include DataItem urls (for remote object storage, e.g. s3:///). @@ -2800,19 +2800,19 @@ def export(self, filepath=None, include_files: str = None): if archive_code: files_filter = include_files or "**" - tmp_path = None - if "://" in filepath: - tmp_path = tempfile.mktemp(".zip") - zipf = zipfile.ZipFile(tmp_path or filepath, "w") - for file_path in glob.iglob( - f"{project_dir}/{files_filter}", recursive=True - ): - write_path = pathlib.Path(file_path) - zipf.write(write_path, arcname=write_path.relative_to(project_dir)) - zipf.close() - if tmp_path: - mlrun.get_dataitem(filepath).upload(tmp_path) - remove(tmp_path) + with tempfile.NamedTemporaryFile(suffix=".zip") as f: + remote_file = "://" in filepath + fpath = f.name if remote_file else filepath + with zipfile.ZipFile(fpath, "w") as zipf: + for file_path in glob.iglob( + f"{project_dir}/{files_filter}", recursive=True + ): + write_path = pathlib.Path(file_path) + zipf.write( + write_path, arcname=write_path.relative_to(project_dir) + ) + if remote_file: + mlrun.get_dataitem(filepath).upload(zipf.filename) def set_model_monitoring_credentials( self, diff --git a/mlrun/serving/remote.py b/mlrun/serving/remote.py index 9e22a98402d..69d53332b0b 100644 --- a/mlrun/serving/remote.py +++ b/mlrun/serving/remote.py @@ -21,6 +21,7 @@ from storey.flow import _ConcurrentJobExecution import mlrun +import mlrun.config from mlrun.errors import err_to_str from mlrun.utils import logger @@ -173,7 +174,8 @@ def do_event(self, event): if not self._session: self._session = mlrun.utils.HTTPSessionWithRetry( self.retries, - self.backoff_factor or mlrun.mlconf.http_retry_defaults.backoff_factor, + self.backoff_factor + or mlrun.config.config.http_retry_defaults.backoff_factor, retry_on_exception=False, retry_on_status=self.retries > 0, retry_on_post=True, @@ -185,7 +187,7 @@ def do_event(self, event): resp = self._session.request( method, url, - verify=False, + verify=mlrun.config.config.httpdb.http.verify, headers=headers, data=body, timeout=self.timeout, diff --git a/mlrun/utils/helpers.py b/mlrun/utils/helpers.py index 058c1afcd91..b5389ddbebe 100644 --- a/mlrun/utils/helpers.py +++ b/mlrun/utils/helpers.py @@ -1622,3 +1622,11 @@ def get_local_file_schema() -> List: # The expression `list(string.ascii_lowercase)` generates a list of lowercase alphabets, # which corresponds to drive letters in Windows file paths such as `C:/Windows/path`. return ["file"] + list(string.ascii_lowercase) + + +def is_safe_path(base, filepath, is_symlink=False): + # Avoid path traversal attacks by ensuring that the path is safe + resolved_filepath = ( + os.path.abspath(filepath) if not is_symlink else os.path.realpath(filepath) + ) + return base == os.path.commonpath((base, resolved_filepath)) diff --git a/server/api/utils/clients/iguazio.py b/server/api/utils/clients/iguazio.py index bf692f8c69c..ce78999224a 100644 --- a/server/api/utils/clients/iguazio.py +++ b/server/api/utils/clients/iguazio.py @@ -31,6 +31,7 @@ from fastapi.concurrency import run_in_threadpool import mlrun.common.schemas +import mlrun.config import mlrun.errors import mlrun.utils.helpers import mlrun.utils.singleton @@ -630,7 +631,9 @@ def _send_request_to_api( ): url = f"{self._api_url}/api/{path}" self._prepare_request_kwargs(session, path, kwargs=kwargs) - response = self._session.request(method, url, verify=False, **kwargs) + response = self._session.request( + method, url, verify=mlrun.config.config.httpdb.http.verify, **kwargs + ) if not response.ok: try: response_body = response.json() diff --git a/server/api/utils/clients/nuclio.py b/server/api/utils/clients/nuclio.py index cf5460e0034..2435ba03a2e 100644 --- a/server/api/utils/clients/nuclio.py +++ b/server/api/utils/clients/nuclio.py @@ -237,7 +237,13 @@ def _send_request_to_api( if auth_info: auth = auth_info.to_nuclio_auth_info().to_requests_auth() - response = self._session.request(method, url, verify=False, auth=auth, **kwargs) + response = self._session.request( + method, + url, + verify=mlrun.config.config.httpdb.http.verify, + auth=auth, + **kwargs, + ) if not response.ok: log_kwargs = copy.deepcopy(kwargs) log_kwargs.update({"method": method, "path": path}) diff --git a/tests/platforms/test_iguazio.py b/tests/platforms/test_iguazio.py index 57092c2552f..4caea812089 100644 --- a/tests/platforms/test_iguazio.py +++ b/tests/platforms/test_iguazio.py @@ -13,7 +13,6 @@ # limitations under the License. # import os -from http import HTTPStatus from unittest.mock import Mock import deepdiff @@ -25,44 +24,6 @@ from mlrun.platforms import add_or_refresh_credentials -def test_add_or_refresh_credentials_iguazio_2_8_success(monkeypatch): - username = "username" - password = "password" - control_session = "control_session" - api_url = "https://dashboard.default-tenant.app.hedingber-28-1.iguazio-cd2.com" - env = os.environ - env["V3IO_USERNAME"] = username - env["V3IO_PASSWORD"] = password - - def mock_get(*args, **kwargs): - not_found_response_mock = Mock() - not_found_response_mock.ok = False - not_found_response_mock.status_code = HTTPStatus.NOT_FOUND.value - return not_found_response_mock - - def mock_session(*args, **kwargs): - session_mock = Mock() - - def _mock_successful_session_creation(*args, **kwargs): - assert session_mock.auth == (username, password) - successful_response_mock = Mock() - successful_response_mock.ok = True - successful_response_mock.json.return_value = { - "data": {"id": control_session} - } - return successful_response_mock - - session_mock.post = _mock_successful_session_creation - return session_mock - - monkeypatch.setattr(requests, "get", mock_get) - monkeypatch.setattr(requests, "Session", mock_session) - - result_username, result_control_session, _ = add_or_refresh_credentials(api_url) - assert username == result_username - assert control_session == result_control_session - - def test_add_or_refresh_credentials_iguazio_2_10_success(monkeypatch): username = "username" access_key = "access_key" diff --git a/tests/projects/test_project.py b/tests/projects/test_project.py index ac40331ca5c..21355b7b1c1 100644 --- a/tests/projects/test_project.py +++ b/tests/projects/test_project.py @@ -951,7 +951,7 @@ def test_export_to_zip(rundb_mock): assert os.path.isfile(zip_path) zipf = zipfile.ZipFile(zip_path, "r") - assert set(zipf.namelist()) == set(["./", "f.py", "project.yaml"]) + assert set(zipf.namelist()) == {"./", "f.py", "project.yaml"} # check upload to (remote) DataItem project.export("memory://x.zip") diff --git a/tests/system/feature_store/test_feature_store.py b/tests/system/feature_store/test_feature_store.py index 1b6b6f6dc63..d616962e19d 100644 --- a/tests/system/feature_store/test_feature_store.py +++ b/tests/system/feature_store/test_feature_store.py @@ -3350,7 +3350,10 @@ def test_alias_change(self): "Cookie": "session=j:" + json.dumps({"sid": os.getenv("V3IO_ACCESS_KEY")}) } response = requests.patch( - request_url, json=request_body, headers=headers, verify=False + request_url, + json=request_body, + headers=headers, + verify=config.httpdb.http.verify, ) assert ( response.status_code == 200 diff --git a/tests/test_kfp.py b/tests/test_kfp.py index 2a990fbcade..5c39bd7df02 100644 --- a/tests/test_kfp.py +++ b/tests/test_kfp.py @@ -89,7 +89,7 @@ def kfp_dirs(monkeypatch): ) monkeypatch.setattr(mlrun.kfpops, "KFPMETA_DIR", str(meta_dir)) monkeypatch.setattr(mlrun.kfpops, "KFP_ARTIFACTS_DIR", str(artifacts_dir)) - yield (str(meta_dir), str(artifacts_dir), str(output_dir)) + yield str(meta_dir), str(artifacts_dir), str(output_dir) def test_kfp_function_run(kfp_dirs): diff --git a/tests/utils/test_helpers.py b/tests/utils/test_helpers.py index 6a1376af1e9..40bcb0d0597 100644 --- a/tests/utils/test_helpers.py +++ b/tests/utils/test_helpers.py @@ -954,3 +954,17 @@ def test_iterate_list_by_chunks(iterable_list, chunk_size, expected_chunked_list def test_normalize_username(username, expected_normalized_username): normalized_username = mlrun.utils.helpers.normalize_project_username(username) assert normalized_username == expected_normalized_username + + +@pytest.mark.parametrize( + "basedir,path,is_symlink, is_valid", + [ + ("/base", "/base/valid", False, True), + ("/base", "/base/valid", True, True), + ("/base", "/../invalid", True, False), + ("/base", "/../invalid", False, False), + ], +) +def test_is_safe_path(basedir, path, is_symlink, is_valid): + safe = mlrun.utils.is_safe_path(basedir, path, is_symlink) + assert safe == is_valid From a5551bde80e9470a7df224844e01e44023ec6dce Mon Sep 17 00:00:00 2001 From: Liran BG Date: Sun, 3 Mar 2024 15:32:37 +0200 Subject: [PATCH 059/119] [Jupyter] Bump requirements [1.6.x] (#5225) --- dev-requirements.txt | 2 +- dockerfiles/base/mlrun_requirements.txt | 2 +- dockerfiles/jupyter/Dockerfile | 2 +- dockerfiles/jupyter/requirements.txt | 11 ++++++----- dockerfiles/mlrun/requirements.txt | 2 +- docs/tutorials/01-mlrun-basics.ipynb | 2 +- docs/tutorials/02-model-training.ipynb | 2 +- docs/tutorials/colab/01-mlrun-basics-colab.ipynb | 6 +++--- requirements.txt | 2 +- 9 files changed, 16 insertions(+), 15 deletions(-) diff --git a/dev-requirements.txt b/dev-requirements.txt index f8535d3d855..ca16b4ef75f 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -27,6 +27,6 @@ avro~=1.11 sqlalchemy-utils~=0.39.0 # frameworks tests -scikit-learn~=1.0 +scikit-learn~=1.4 lightgbm~=3.0; platform_machine != 'arm64' xgboost~=1.1 diff --git a/dockerfiles/base/mlrun_requirements.txt b/dockerfiles/base/mlrun_requirements.txt index 2c55e54a2eb..1855d570a12 100644 --- a/dockerfiles/base/mlrun_requirements.txt +++ b/dockerfiles/base/mlrun_requirements.txt @@ -4,6 +4,6 @@ # TODO: delete me once we delete models entirely matplotlib~=3.5 scipy~=1.11 -scikit-learn~=1.0 +scikit-learn~=1.4 seaborn~=0.11.0 scikit-plot~=0.3.7 diff --git a/dockerfiles/jupyter/Dockerfile b/dockerfiles/jupyter/Dockerfile index 494be177b77..641ad4d9d74 100644 --- a/dockerfiles/jupyter/Dockerfile +++ b/dockerfiles/jupyter/Dockerfile @@ -65,7 +65,7 @@ RUN cd /tmp/mlrun && python -m pip install ".[complete-api]" # This will usually cause a cache miss - so keep it in the last layers ARG MLRUN_CACHE_DATE=initial -RUN git clone --branch 1.5.x https://github.com/mlrun/demos.git $HOME/demos && \ +RUN git clone --branch 1.6.x https://github.com/mlrun/demos.git $HOME/demos && \ ./demos/update_demos.sh --user=jovyan --path=/home/jovyan/demos --no-backup && \ cd $HOME/demos && \ ./community_edition_align.sh && \ diff --git a/dockerfiles/jupyter/requirements.txt b/dockerfiles/jupyter/requirements.txt index 86db1a6ad67..41b0d4bd31d 100644 --- a/dockerfiles/jupyter/requirements.txt +++ b/dockerfiles/jupyter/requirements.txt @@ -1,6 +1,6 @@ matplotlib~=3.5 scipy~=1.11 -scikit-learn~=1.0 +scikit-learn~=1.4 seaborn~=0.11.0 scikit-plot~=0.3.7 xgboost~=1.1 @@ -10,14 +10,15 @@ nuclio-jupyter[jupyter-server]~=0.9.15 nbclassic>=0.2.8 # added to tackle security vulnerabilities notebook~=6.4 -Pillow~=10.0 -jupyterlab-git~=0.41.0 +Pillow~=10.2 +# remove once jupyter-server can be upgraded to >=2.0 +jupyterlab-git~=0.44,<0.50 Pygments~=2.15 wheel~=0.38 -setuptools~=68.2 +setuptools~=69.1 tornado~=6.3 requests~=2.31 cryptography~=42.0 -certifi~=2023.7 +certifi~=2024.0 mpmath~=1.3 oauthlib~=3.2 diff --git a/dockerfiles/mlrun/requirements.txt b/dockerfiles/mlrun/requirements.txt index 1e7658fea08..d3c6fde73ad 100644 --- a/dockerfiles/mlrun/requirements.txt +++ b/dockerfiles/mlrun/requirements.txt @@ -1,6 +1,6 @@ matplotlib~=3.5 scipy~=1.11 -scikit-learn~=1.0 +scikit-learn~=1.4 seaborn~=0.11.0 scikit-plot~=0.3.7 mpi4py~=3.1 diff --git a/docs/tutorials/01-mlrun-basics.ipynb b/docs/tutorials/01-mlrun-basics.ipynb index 2f0eb2b5b13..cb65c749674 100644 --- a/docs/tutorials/01-mlrun-basics.ipynb +++ b/docs/tutorials/01-mlrun-basics.ipynb @@ -1376,7 +1376,7 @@ " \"serving\",\n", " image=\"mlrun/mlrun\",\n", " kind=\"serving\",\n", - " requirements=[\"scikit-learn~=1.3.0\"],\n", + " requirements=[\"scikit-learn~=1.4.0\"],\n", ")" ] }, diff --git a/docs/tutorials/02-model-training.ipynb b/docs/tutorials/02-model-training.ipynb index 0cf9ce53505..cab187c74bf 100644 --- a/docs/tutorials/02-model-training.ipynb +++ b/docs/tutorials/02-model-training.ipynb @@ -21,7 +21,7 @@ "\n", "## MLRun installation and configuration\n", "\n", - "Before running this notebook make sure `mlrun` and `sklearn` packages are installed (`pip install mlrun scikit-learn~=1.3`) and that you have configured the access to the MLRun service. " + "Before running this notebook make sure `mlrun` and `sklearn` packages are installed (`pip install mlrun scikit-learn~=1.4`) and that you have configured the access to the MLRun service. " ] }, { diff --git a/docs/tutorials/colab/01-mlrun-basics-colab.ipynb b/docs/tutorials/colab/01-mlrun-basics-colab.ipynb index 29de48c842b..485d2689cfb 100644 --- a/docs/tutorials/colab/01-mlrun-basics-colab.ipynb +++ b/docs/tutorials/colab/01-mlrun-basics-colab.ipynb @@ -46,7 +46,7 @@ "\n", "**Before you start, make sure the MLRun client package is installed and configured properly !**\n", "\n", - "This notebook uses sklearn. If it is not installed in your environment run `!pip install scikit-learn~=1.0`." + "This notebook uses sklearn. If it is not installed in your environment run `!pip install scikit-learn~=1.4`." ] }, { @@ -61,7 +61,7 @@ "outputs": [], "source": [ "# install MLRun and sklearn, run this only once (restart the notebook after the install !!!)\n", - "%pip install mlrun scikit-learn~=1.0" + "%pip install mlrun scikit-learn~=1.4" ] }, { @@ -121,7 +121,7 @@ }, "outputs": [], "source": [ - "%pip install uvicorn~=0.17.0 dask-kubernetes~=0.11.0 apscheduler~=3.6 sqlite3-to-mysql~=1.4 scikit-learn~=1.0" + "%pip install uvicorn~=0.17.0 dask-kubernetes~=0.11.0 apscheduler~=3.6 sqlite3-to-mysql~=1.4 scikit-learn~=1.4" ] }, { diff --git a/requirements.txt b/requirements.txt index bfd3fbd4af4..2eb5b610e09 100644 --- a/requirements.txt +++ b/requirements.txt @@ -36,7 +36,7 @@ storey~=1.6.18 inflection~=0.5.0 python-dotenv~=0.17.0 # older version of setuptools contains vulnerabilities, see `GHSA-r9hx-vwmv-q579`, so we limit to 65.5 and above -setuptools~=68.2 +setuptools~=69.1 deprecated~=1.2 jinja2~=3.1, >=3.1.3 anyio~=3.7 From c8b9eb694065f530482be8203ae49d3f48410b71 Mon Sep 17 00:00:00 2001 From: Liran BG Date: Sun, 3 Mar 2024 16:35:56 +0200 Subject: [PATCH 060/119] [API] Bump requirements [1.6.x] (#5228) --- dev-requirements.txt | 2 +- dockerfiles/mlrun-api/requirements.txt | 11 +- .../cmd/schemas_compiler/docker/Dockerfile | 4 +- server/log-collector/go.mod | 51 +++--- server/log-collector/go.sum | 155 ++++++++---------- 5 files changed, 100 insertions(+), 123 deletions(-) diff --git a/dev-requirements.txt b/dev-requirements.txt index ca16b4ef75f..2c8140f91a6 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -14,7 +14,7 @@ pytest-httpserver~=1.0 pytest-timeout~=2.0 aioresponses~=0.7 requests-mock~=1.8 -httpx~=0.23.0 +httpx~=0.24.0 deepdiff~=6.5 hypothesis[numpy]~=6.87 pytest-rerunfailures~=13.0 diff --git a/dockerfiles/mlrun-api/requirements.txt b/dockerfiles/mlrun-api/requirements.txt index d106d262350..31c7a9dfeaf 100644 --- a/dockerfiles/mlrun-api/requirements.txt +++ b/dockerfiles/mlrun-api/requirements.txt @@ -1,10 +1,11 @@ uvicorn~=0.27.1 dask-kubernetes~=0.11.0 -apscheduler~=3.6, !=3.10.2 -sqlite3-to-mysql~=1.4 -objgraph~=3.5 -igz-mgmt~=0.0.10 -humanfriendly~=9.2 +# no support for 4 yet +# 3.10.2 is bugged for python 3.9 +apscheduler>=3.10.3,<4 +objgraph~=3.6 +igz-mgmt~=0.1.0 +humanfriendly~=10.0 fastapi~=0.110.0 # in sqlalchemy>=2.0 there is breaking changes (such as in Table class autoload argument is removed) sqlalchemy~=1.4 diff --git a/server/log-collector/cmd/schemas_compiler/docker/Dockerfile b/server/log-collector/cmd/schemas_compiler/docker/Dockerfile index 8f2239dfecf..2b651932a24 100644 --- a/server/log-collector/cmd/schemas_compiler/docker/Dockerfile +++ b/server/log-collector/cmd/schemas_compiler/docker/Dockerfile @@ -15,7 +15,7 @@ ARG PYTHON_VERSION=3.9 ARG GO_VERSION=1.21 -FROM golang:${GO_VERSION}-alpine AS golang +FROM gcr.io/iguazio/golang:${GO_VERSION}-alpine AS golang FROM python:${PYTHON_VERSION}-alpine @@ -25,7 +25,7 @@ ARG GRPCIO_TOOLS_VERSION="~=1.59.0" WORKDIR /app/log-collector -RUN apk add --no-cache protoc build-base linux-headers +RUN apk --no-cache upgrade && apk add --no-cache protoc build-base linux-headers COPY --from=golang /usr/local/go/ /usr/local/go/ diff --git a/server/log-collector/go.mod b/server/log-collector/go.mod index 76d1e716738..651778d1136 100644 --- a/server/log-collector/go.mod +++ b/server/log-collector/go.mod @@ -3,60 +3,59 @@ module github.com/mlrun/mlrun go 1.21 require ( - github.com/google/uuid v1.5.0 + github.com/google/uuid v1.6.0 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 github.com/nuclio/errors v0.0.4 github.com/nuclio/logger v0.0.1 github.com/nuclio/loggerus v0.0.6 github.com/sirupsen/logrus v1.9.3 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 golang.org/x/sync v0.6.0 - google.golang.org/grpc v1.60.1 + google.golang.org/grpc v1.62.0 google.golang.org/protobuf v1.32.0 - k8s.io/api v0.26.8 - k8s.io/apimachinery v0.26.8 - k8s.io/client-go v0.26.8 + k8s.io/api v0.29.2 + k8s.io/apimachinery v0.29.2 + k8s.io/client-go v0.29.2 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.3 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect - github.com/go-logr/logr v1.2.3 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.20.0 // indirect - github.com/go-openapi/swag v0.19.14 // indirect + github.com/go-logr/logr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.20.2 // indirect + github.com/go-openapi/jsonreference v0.20.4 // indirect + github.com/go-openapi/swag v0.22.9 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/google/gnostic v0.5.7-v3refs // indirect - github.com/google/go-cmp v0.5.9 // indirect - github.com/google/gofuzz v1.1.0 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/gofuzz v1.2.0 // indirect github.com/imdario/mergo v0.3.6 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/logrusorgru/aurora/v3 v3.0.0 // indirect - github.com/mailru/easyjson v0.7.6 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - golang.org/x/net v0.20.0 // indirect - golang.org/x/oauth2 v0.16.0 // indirect - golang.org/x/sys v0.16.0 // indirect - golang.org/x/term v0.16.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/net v0.21.0 // indirect + golang.org/x/oauth2 v0.17.0 // indirect + golang.org/x/sys v0.17.0 // indirect + golang.org/x/term v0.17.0 // indirect + golang.org/x/text v0.14.0 // indirectm golang.org/x/time v0.5.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/klog/v2 v2.80.1 // indirect - k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect - k8s.io/utils v0.0.0-20221107191617-1a15be271d1d // indirect - sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + k8s.io/klog/v2 v2.110.1 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/server/log-collector/go.sum b/server/log-collector/go.sum index 8718e98590e..752a21eee0b 100644 --- a/server/log-collector/go.sum +++ b/server/log-collector/go.sum @@ -4,13 +4,11 @@ github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZx github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.11.3 h1:yagOQz/38xJmcNeZJtrUcKjkHRltIaIFXKWeG1SkWGE= +github.com/emicklei/go-restful/v3 v3.11.3/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -19,18 +17,17 @@ github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= +github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= +github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= +github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= +github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= +github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -38,30 +35,24 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= @@ -74,7 +65,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -82,10 +74,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/logrusorgru/aurora/v3 v3.0.0 h1:R6zcoZZbvVcGMvDCKo45A9U/lzYyzl5NfYIvznmDfE4= github.com/logrusorgru/aurora/v3 v3.0.0/go.mod h1:vsR12bk5grlLvLXAYrBsb5Oc/N+LxAlxggSjiwMnCUc= github.com/magefile/mage v1.10.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -93,18 +83,16 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nuclio/errors v0.0.4 h1:Uf/Kfje0VJGYeuNAhuFNaL6bm0O1WCQOg8vEjiY85oQ= github.com/nuclio/errors v0.0.4/go.mod h1:KV56dHK50bOG4+fSUvCZA9D9Ky4utc5LBGGDCpxa8dY= github.com/nuclio/logger v0.0.1 h1:e+vT/Ug65RC+u0QX2J+lq3P57ZBwJ1ZA6Q2LCEcViwE= github.com/nuclio/logger v0.0.1/go.mod h1:ttazNAqTxKjQ7XrGDZxecumGa9KCIuJh88gzFY1mRXo= github.com/nuclio/loggerus v0.0.6 h1:aQVUb216pY/oXwfX4BZ/yfOr5vhPygEqFBeK9ClIsFM= github.com/nuclio/loggerus v0.0.6/go.mod h1:0i2gclRsyy2kJrnImkgnsOGwf5NC4tbcs+TisGwFkA4= -github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= -github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= -github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys= -github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= +github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -112,23 +100,22 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.8.0/go.mod h1:4GuYW9TZmE769R5STWrRakJc4UqQ3+QQ95fyz7ENv1A= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= @@ -158,11 +145,11 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= +golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -183,12 +170,12 @@ golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= @@ -207,6 +194,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= +golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -218,34 +207,23 @@ google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 h1:6GQBEOdGkX6MMTLT9V+TjtIRZCw9VPD5Z+yHY9wMgS0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= -google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/grpc v1.62.0 h1:HQKZ/fa1bXkX1oFOvSjmZEUL8wLSaZTjCcLAlmZRtdk= +google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -253,27 +231,26 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.26.8 h1:k2OtFmQPWfDUyAuYAwQPftVygF/vz4BMGSKnd15iddM= -k8s.io/api v0.26.8/go.mod h1:QaflR7cmG3V9lIz0VLBM+ylndNN897OAUAoJDcgwiQw= -k8s.io/apimachinery v0.26.8 h1:SzpGtRX3/j/Ylg8Eg65Iobpxi9Jz4vOvI0qcBZyPVrM= -k8s.io/apimachinery v0.26.8/go.mod h1:qYzLkrQ9lhrZRh0jNKo2cfvf/R1/kQONnSiyB7NUJU0= -k8s.io/client-go v0.26.8 h1:pPuTYaVtLlg/7n6rqs3MsKLi4XgNaJ3rTMyS37Y5CKU= -k8s.io/client-go v0.26.8/go.mod h1:1sBQqKmdy9rWZYQnoedpc0gnRXG7kU3HrKZvBe2QbGM= -k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= -k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= -k8s.io/utils v0.0.0-20221107191617-1a15be271d1d h1:0Smp/HP1OH4Rvhe+4B8nWGERtlqAGSftbSbbmm45oFs= -k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A= +k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0= +k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8= +k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= +k8s.io/client-go v0.29.2 h1:FEg85el1TeZp+/vYJM7hkDlSTFZ+c5nnK44DJ4FyoRg= +k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA= +k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= +k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= From bd24133113f81625daadfc33ac2c503454246555 Mon Sep 17 00:00:00 2001 From: Eyal Danieli Date: Sun, 3 Mar 2024 22:15:06 +0200 Subject: [PATCH 061/119] [Model Monitoring] Fix default HTTP path when CE namespace is not `mlrun`[1.6.x] (#5232) --- mlrun/common/model_monitoring/helpers.py | 6 ++++-- mlrun/config.py | 4 ++-- mlrun/model_monitoring/batch.py | 2 +- tests/model_monitoring/test_target_path.py | 2 +- 4 files changed, 8 insertions(+), 6 deletions(-) diff --git a/mlrun/common/model_monitoring/helpers.py b/mlrun/common/model_monitoring/helpers.py index 34b32f82b7a..9b8ea3cab0b 100644 --- a/mlrun/common/model_monitoring/helpers.py +++ b/mlrun/common/model_monitoring/helpers.py @@ -82,13 +82,15 @@ def parse_monitoring_stream_path( if application_name is None: stream_uri = ( mlrun.mlconf.model_endpoint_monitoring.default_http_sink.format( - project=project + project=project, namespace=mlrun.mlconf.namespace ) ) else: stream_uri = ( mlrun.mlconf.model_endpoint_monitoring.default_http_sink_app.format( - project=project, application_name=application_name + project=project, + application_name=application_name, + namespace=mlrun.mlconf.namespace, ) ) return stream_uri diff --git a/mlrun/config.py b/mlrun/config.py index a5a01119cab..4d7021f7180 100644 --- a/mlrun/config.py +++ b/mlrun/config.py @@ -487,8 +487,8 @@ "offline_storage_path": "model-endpoints/{kind}", # Default http path that points to the monitoring stream nuclio function. Will be used as a stream path # when the user is working in CE environment and has not provided any stream path. - "default_http_sink": "http://nuclio-{project}-model-monitoring-stream.mlrun.svc.cluster.local:8080", - "default_http_sink_app": "http://nuclio-{project}-{application_name}.mlrun.svc.cluster.local:8080", + "default_http_sink": "http://nuclio-{project}-model-monitoring-stream.{namespace}.svc.cluster.local:8080", + "default_http_sink_app": "http://nuclio-{project}-{application_name}.{namespace}.svc.cluster.local:8080", "batch_processing_function_branch": "master", "parquet_batching_max_events": 10_000, "parquet_batching_timeout_secs": timedelta(minutes=1).total_seconds(), diff --git a/mlrun/model_monitoring/batch.py b/mlrun/model_monitoring/batch.py index 8688a96663d..86ad4120fd3 100644 --- a/mlrun/model_monitoring/batch.py +++ b/mlrun/model_monitoring/batch.py @@ -992,7 +992,7 @@ def _update_drift_in_prometheus( """ stream_http_path = ( mlrun.mlconf.model_endpoint_monitoring.default_http_sink.format( - project=self.project + project=self.project, namespace=mlrun.mlconf.namespace ) ) diff --git a/tests/model_monitoring/test_target_path.py b/tests/model_monitoring/test_target_path.py index 57ed9288864..0c03b98db36 100644 --- a/tests/model_monitoring/test_target_path.py +++ b/tests/model_monitoring/test_target_path.py @@ -67,7 +67,7 @@ def test_get_stream_path(): stream_path = mlrun.model_monitoring.get_stream_path(project=TEST_PROJECT) assert ( stream_path - == f"http://nuclio-{TEST_PROJECT}-model-monitoring-stream.mlrun.svc.cluster.local:8080" + == f"http://nuclio-{TEST_PROJECT}-model-monitoring-stream.{mlrun.mlconf.namespace}.svc.cluster.local:8080" ) # kafka stream path from env From 7fb7b85a74078b5094841dd8ebb216f819d9c060 Mon Sep 17 00:00:00 2001 From: Saar Cohen <66667568+theSaarco@users.noreply.github.com> Date: Tue, 5 Mar 2024 21:46:01 +0200 Subject: [PATCH 062/119] [K8s] Support email-like usernames [1.6.x] (#5247) --- server/api/common/runtime_handlers.py | 4 ++++ server/api/utils/singletons/k8s.py | 17 +++++++++++++++-- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/server/api/common/runtime_handlers.py b/server/api/common/runtime_handlers.py index 54052def535..63ffe930989 100644 --- a/server/api/common/runtime_handlers.py +++ b/server/api/common/runtime_handlers.py @@ -43,5 +43,9 @@ def get_resource_labels(function, run=None, scrape_metrics=None): if run_owner: labels[mlrun_key + "owner"] = run_owner + if "@" in run_owner: + run_owner, domain = run_owner.split("@") + labels[mlrun_key + "owner"] = run_owner + labels[mlrun_key + "owner_domain"] = domain return labels diff --git a/server/api/utils/singletons/k8s.py b/server/api/utils/singletons/k8s.py index daec7dd2360..0424bc6c50e 100644 --- a/server/api/utils/singletons/k8s.py +++ b/server/api/utils/singletons/k8s.py @@ -22,6 +22,7 @@ import mlrun import mlrun.common.schemas import mlrun.common.secrets +import mlrun.common.secrets as mlsecrets import mlrun.errors import mlrun.platforms.iguazio import mlrun.runtimes @@ -68,7 +69,7 @@ class SecretTypes: v3io_fuse = "v3io/fuse" -class K8sHelper(mlrun.common.secrets.SecretProviderInterface): +class K8sHelper(mlsecrets.SecretProviderInterface): def __init__(self, namespace=None, silent=False, log=True): self.namespace = namespace or mlrun.mlconf.namespace self.config_file = mlrun.mlconf.kubernetes.kubeconfig_path or None @@ -401,7 +402,7 @@ def store_auth_secret( secret_data, namespace, type_=SecretTypes.v3io_fuse, - labels={"mlrun/username": username}, + labels=self._resolve_secret_labels(username), retry_on_conflict=True, ) return secret_name, action @@ -587,6 +588,18 @@ def _decode_secret_data(self, secrets_data, secret_keys=None): results[key] = base64.b64decode(secrets_data[key]).decode("utf-8") return results + def _resolve_secret_labels(self, username): + if not username: + return {} + labels = { + "mlrun/username": username, + } + if "@" in username: + username, domain = username.split("@") + labels["mlrun/username"] = username + labels["mlrun/username_domain"] = domain + return labels + class BasePod: def __init__( From 08fed1d605831f7cf401d307468f4c4df6fe59cc Mon Sep 17 00:00:00 2001 From: Liran BG Date: Tue, 5 Mar 2024 22:56:40 +0200 Subject: [PATCH 063/119] [Automation] Pass envs instead of overflowing envvar [1.6.x] (#5249) --- automation/patch_igz/patch_remote.py | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/automation/patch_igz/patch_remote.py b/automation/patch_igz/patch_remote.py index 2e1edb31ccc..ecd98425d18 100755 --- a/automation/patch_igz/patch_remote.py +++ b/automation/patch_igz/patch_remote.py @@ -17,10 +17,9 @@ import io import json import logging -import os import shlex import subprocess -from typing import List +import typing import click import coloredlogs @@ -150,10 +149,12 @@ def _get_current_version(self) -> str: def _make_mlrun(self, target, image_tag, image_name) -> str: logger.info(f"Building mlrun docker image: {target}:{image_tag}") - os.environ["MLRUN_VERSION"] = image_tag - os.environ["MLRUN_DOCKER_REPO"] = self._config["DOCKER_REGISTRY"] + env = { + "MLRUN_VERSION": image_tag, + "MLRUN_DOCKER_REPO": self._config["DOCKER_REGISTRY"], + } cmd = ["make", target] - self._exec_local(cmd, live=True) + self._exec_local(cmd, live=True, env=env) return f"{self._config['DOCKER_REGISTRY']}/{image_name}:{image_tag}" def _connect_to_node(self, node): @@ -439,9 +440,9 @@ def _get_image_tag(tag) -> str: return f"{tag}" @staticmethod - def _execute_local_proc_interactive(cmd): + def _execute_local_proc_interactive(cmd, env=None): proc = subprocess.Popen( - cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True + cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True, env=env ) for line in proc.stdout: yield line @@ -450,17 +451,22 @@ def _execute_local_proc_interactive(cmd): if ret_code: raise subprocess.CalledProcessError(ret_code, cmd) - def _exec_local(self, cmd: List[str], live=False) -> str: + def _exec_local( + self, + cmd: typing.List[str], + live: bool = False, + env: typing.Optional[dict] = None, + ) -> str: logger.debug("Exec local: %s", " ".join(cmd)) buf = io.StringIO() - for line in self._execute_local_proc_interactive(cmd): + for line in self._execute_local_proc_interactive(cmd, env): buf.write(line) if live: print(line, end="") output = buf.getvalue() return output - def _exec_remote(self, cmd: List[str], live=False) -> str: + def _exec_remote(self, cmd: typing.List[str], live=False) -> str: cmd_str = shlex.join(cmd) logger.debug("Exec remote: %s", cmd_str) stdin_stream, stdout_stream, stderr_stream = self._ssh_client.exec_command( From 0d6d1ddfb402d2b5c200711621d4c7d334d02aca Mon Sep 17 00:00:00 2001 From: Alon Maor <48641682+alonmr@users.noreply.github.com> Date: Tue, 5 Mar 2024 22:57:42 +0200 Subject: [PATCH 064/119] [Workflow] Support remote workflow with source on image [1.6.x] (#5251) --- docs/cheat-sheet.md | 2 +- docs/concepts/scheduled-jobs.md | 40 +++++++-- mlrun/common/schemas/__init__.py | 1 + mlrun/common/schemas/common.py | 40 +++++++++ mlrun/common/schemas/project.py | 2 + mlrun/config.py | 2 +- mlrun/db/base.py | 18 +++++ mlrun/db/httpdb.py | 4 +- mlrun/model.py | 5 ++ mlrun/projects/pipelines.py | 32 +++++--- mlrun/projects/project.py | 81 ++++++++++++------- mlrun/runtimes/base.py | 21 ++++- mlrun/runtimes/kubejob.py | 8 +- mlrun/runtimes/local.py | 4 +- server/api/api/endpoints/functions.py | 9 ++- server/api/api/endpoints/workflows.py | 1 + server/api/crud/workflows.py | 83 +++++++++++++++---- server/api/rundb/sqldb.py | 18 +++++ server/api/runtime_handlers/kubejob.py | 4 +- server/api/utils/builder.py | 14 ++-- tests/api/crud/test_workflows.py | 108 +++++++++++++++++++++++++ tests/projects/test_project.py | 7 +- tests/runtimes/test_run.py | 2 +- tests/system/projects/test_project.py | 35 ++++++++ 24 files changed, 455 insertions(+), 86 deletions(-) create mode 100644 mlrun/common/schemas/common.py create mode 100644 tests/api/crud/test_workflows.py diff --git a/docs/cheat-sheet.md b/docs/cheat-sheet.md index 873a00f1ff7..5d7915c39fa 100644 --- a/docs/cheat-sheet.md +++ b/docs/cheat-sheet.md @@ -633,7 +633,7 @@ csv_df = csv_source.to_dataframe() from pyspark.sql import SparkSession session = SparkSession.builder.master("local").getOrCreate() -parquet_source = ParquetSource(name="read", path="v3io://users/admin/getting_started/examples/userdata1.parquet") +parquet_source = ParquetSource(name="read", path="v3io:///users/admin/getting_started/examples/userdata1.parquet") spark_df = parquet_source.to_spark_df(session=session) # BigQuery diff --git a/docs/concepts/scheduled-jobs.md b/docs/concepts/scheduled-jobs.md index 18c48b0716e..4e0e9a2990d 100644 --- a/docs/concepts/scheduled-jobs.md +++ b/docs/concepts/scheduled-jobs.md @@ -44,11 +44,41 @@ After loading the project (`load_project`), run the project with the scheduled w project.run("main", schedule='0 * * * *') ``` -```{admonition} Note -1. Remote workflows can only be performed by a project with a **remote** source (git://github.com/mlrun/something.git, http://some/url/file.zip or http://some/url/file.tar.gz). So you need to either put your code in Git or archive it and then set a source to it. - * To set project source use the `project.set_source` method. - * To set workflow use the `project.set_workflow` method. -2. Example for a remote GitHub project - https://github.com/mlrun/project-demo +Remote/Scheduled workflows can be performed by a project with a remote source or one that is contained on the image. +Remote source will be pulled each time the workflow is run, while the local source will be loaded from the image. +To use a remote source you can either put your code in Git or archive it and then set a source to it (e.g. git://github.com/mlrun/something.git, http://some/url/file.zip, s3://some/url/file.tar.gz etc.). By default, the defined project source will be used. +* To set project source use the `project.set_source` method. +* To set workflow use the `project.set_workflow` method. + +To use a different remote source, specify the source URL whe running the workflow with `project.run(source=)` method. +You can also use a context path to load the project from a local directory contained in the image used for execution: +* To set project source use the `project.set_source` method (make sure `pull_at_runtime` is set to `False`). +* To build the image with the project yaml and code use `project.build_image` method. Optionally specify a `target_dir` for the project content. +* Create the workflow e.g. `project.set_workflow(name="my-workflow", workflow_path="./src/workflow.py")`. +* The default workflow image is `project.spec.default_image` which was enriched to and built with `project.build_image` unless specified otherwise. +* Run the workflow with the context path e.g. `project.run("my-workflow", source="./", engine="remote")`. The `source` can be absolute or relative path with `"."` or `"./"`. + +Example for a remote GitHub project - https://github.com/mlrun/project-demo + +``` +import mlrun +project_name = "remote-workflow-example" +source_url = "git://github.com/mlrun/project-demo.git" +source_code_target_dir = "./project" # Optional, relative to "/home/mlrun_code". A different absolute path can be specified. + +# Create a new project +project = mlrun.load_project(context=f"./{project_name}", url=source_url, name=project_name) + +# Set the project source and workflow +project.set_source(source_url) +project.set_workflow(name="main", workflow_path="kflow.py") + +# Build the image, load the source to the target dir and save the project +project.build_image(target_dir=source_code_target_dir) +project.save() + +# Run the workflow, load the project from the target dir on the image +project.run("main", source="./", engine="remote", dirty=True) ``` You can delete a scheduled workflow in the MLRun UI. To update a scheduled workflow, re-define the schedule in the workflow, for example: diff --git a/mlrun/common/schemas/__init__.py b/mlrun/common/schemas/__init__.py index d046e566140..c8575a3fe65 100644 --- a/mlrun/common/schemas/__init__.py +++ b/mlrun/common/schemas/__init__.py @@ -43,6 +43,7 @@ ClusterizationSpec, WaitForChiefToReachOnlineStateFeatureFlag, ) +from .common import ImageBuilder from .constants import ( APIStates, ClusterizationRole, diff --git a/mlrun/common/schemas/common.py b/mlrun/common/schemas/common.py new file mode 100644 index 00000000000..f4844ad1f46 --- /dev/null +++ b/mlrun/common/schemas/common.py @@ -0,0 +1,40 @@ +# Copyright 2023 Iguazio +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import typing + +import pydantic + + +class ImageBuilder(pydantic.BaseModel): + functionSourceCode: typing.Optional[str] = None + codeEntryType: typing.Optional[str] = None + codeEntryAttributes: typing.Optional[str] = None + source: typing.Optional[str] = None + code_origin: typing.Optional[str] = None + origin_filename: typing.Optional[str] = None + image: typing.Optional[str] = None + base_image: typing.Optional[str] = None + commands: typing.Optional[list] = None + extra: typing.Optional[str] = None + extra_args: typing.Optional[dict] = None + builder_env: typing.Optional[dict] = None + secret: typing.Optional[str] = None + registry: typing.Optional[str] = None + load_source_on_run: typing.Optional[bool] = None + with_mlrun: typing.Optional[bool] = None + auto_build: typing.Optional[bool] = None + build_pod: typing.Optional[str] = None + requirements: typing.Optional[list] = None + source_code_target_dir: typing.Optional[str] = None diff --git a/mlrun/common/schemas/project.py b/mlrun/common/schemas/project.py index 00dbe0e52eb..6f978629558 100644 --- a/mlrun/common/schemas/project.py +++ b/mlrun/common/schemas/project.py @@ -19,6 +19,7 @@ import mlrun.common.types +from .common import ImageBuilder from .object import ObjectKind, ObjectStatus @@ -85,6 +86,7 @@ class ProjectSpec(pydantic.BaseModel): desired_state: typing.Optional[ProjectDesiredState] = ProjectDesiredState.online custom_packagers: typing.Optional[typing.List[typing.Tuple[str, bool]]] = None default_image: typing.Optional[str] = None + build: typing.Optional[ImageBuilder] = None class Config: extra = pydantic.Extra.allow diff --git a/mlrun/config.py b/mlrun/config.py index 4d7021f7180..260fc694d2e 100644 --- a/mlrun/config.py +++ b/mlrun/config.py @@ -608,7 +608,7 @@ "workflows": { "default_workflow_runner_name": "workflow-runner-{}", # Default timeout seconds for retrieving workflow id after execution: - "timeouts": {"local": 120, "kfp": 30, "remote": 30}, + "timeouts": {"local": 120, "kfp": 30, "remote": 90}, }, "log_collector": { "address": "localhost:8282", diff --git a/mlrun/db/base.py b/mlrun/db/base.py index a761a959c62..2a9ef604ade 100644 --- a/mlrun/db/base.py +++ b/mlrun/db/base.py @@ -677,3 +677,21 @@ def start_function( self, func_url: str = None, function: "mlrun.runtimes.BaseRuntime" = None ): pass + + def submit_workflow( + self, + project: str, + name: str, + workflow_spec: Union[ + "mlrun.projects.pipelines.WorkflowSpec", + "mlrun.common.schemas.WorkflowSpec", + dict, + ], + arguments: Optional[dict] = None, + artifact_path: Optional[str] = None, + source: Optional[str] = None, + run_name: Optional[str] = None, + namespace: Optional[str] = None, + notifications: list["mlrun.model.Notification"] = None, + ) -> "mlrun.common.schemas.WorkflowResponse": + pass diff --git a/mlrun/db/httpdb.py b/mlrun/db/httpdb.py index 28182a131ea..cab2a95bc13 100644 --- a/mlrun/db/httpdb.py +++ b/mlrun/db/httpdb.py @@ -3453,8 +3453,8 @@ def submit_workflow( source: Optional[str] = None, run_name: Optional[str] = None, namespace: Optional[str] = None, - notifications: typing.List[mlrun.model.Notification] = None, - ): + notifications: list[mlrun.model.Notification] = None, + ) -> mlrun.common.schemas.WorkflowResponse: """ Submitting workflow for a remote execution. diff --git a/mlrun/model.py b/mlrun/model.py index 1b9d3f3843c..d2d616c9805 100644 --- a/mlrun/model.py +++ b/mlrun/model.py @@ -359,6 +359,7 @@ def __init__( requirements: list = None, extra_args=None, builder_env=None, + source_code_target_dir=None, ): self.functionSourceCode = functionSourceCode #: functionSourceCode self.codeEntryType = "" #: codeEntryType @@ -379,6 +380,7 @@ def __init__( self.auto_build = auto_build #: auto_build self.build_pod = None self.requirements = requirements or [] #: pip requirements + self.source_code_target_dir = source_code_target_dir or None @property def source(self): @@ -415,6 +417,7 @@ def build_config( overwrite=False, builder_env=None, extra_args=None, + source_code_target_dir=None, ): if image: self.image = image @@ -440,6 +443,8 @@ def build_config( self.builder_env = builder_env if extra_args: self.extra_args = extra_args + if source_code_target_dir: + self.source_code_target_dir = source_code_target_dir def with_commands( self, diff --git a/mlrun/projects/pipelines.py b/mlrun/projects/pipelines.py index c11e1d2e884..429b343a75f 100644 --- a/mlrun/projects/pipelines.py +++ b/mlrun/projects/pipelines.py @@ -69,16 +69,16 @@ class WorkflowSpec(mlrun.model.ModelObj): def __init__( self, - engine=None, - code=None, - path=None, - args=None, - name=None, - handler=None, - args_schema: dict = None, + engine: typing.Optional[str] = None, + code: typing.Optional[str] = None, + path: typing.Optional[str] = None, + args: typing.Optional[dict] = None, + name: typing.Optional[str] = None, + handler: typing.Optional[str] = None, + args_schema: typing.Optional[dict] = None, schedule: typing.Union[str, mlrun.common.schemas.ScheduleCronTrigger] = None, - cleanup_ttl: int = None, - image: str = None, + cleanup_ttl: typing.Optional[int] = None, + image: typing.Optional[str] = None, ): self.engine = engine self.code = code @@ -401,6 +401,9 @@ def enrich_function_object( else: f.spec.build.source = project.spec.source f.spec.build.load_source_on_run = project.spec.load_source_on_run + f.spec.build.source_code_target_dir = ( + project.spec.build.source_code_target_dir + ) f.spec.workdir = project.spec.workdir or project.spec.subpath f.prepare_image_for_deploy() @@ -862,6 +865,11 @@ def run( ) return + logger.debug( + "Workflow submitted, waiting for pipeline run to start", + workflow_name=workflow_response.name, + ) + # Getting workflow id from run: response = retry_until_successful( 1, @@ -988,6 +996,7 @@ def load_and_run( cleanup_ttl: int = None, load_only: bool = False, wait_for_completion: bool = False, + project_context: str = None, ): """ Auxiliary function that the RemoteRunner run once or run every schedule. @@ -1018,10 +1027,11 @@ def load_and_run( workflow and all its resources are deleted) :param load_only: for just loading the project, inner use. :param wait_for_completion: wait for workflow completion before returning + :param project_context: project context path (used for loading the project) """ try: project = mlrun.load_project( - context=f"./{project_name}", + context=project_context or f"./{project_name}", url=url, name=project_name, init_git=init_git, @@ -1053,7 +1063,7 @@ def load_and_run( raise error - context.logger.info(f"Loaded project {project.name} from remote successfully") + context.logger.info(f"Loaded project {project.name} successfully") if load_only: return diff --git a/mlrun/projects/project.py b/mlrun/projects/project.py index 7ecf5ac5a12..3e398bd1d12 100644 --- a/mlrun/projects/project.py +++ b/mlrun/projects/project.py @@ -605,9 +605,14 @@ def _load_project_dir(context, name="", subpath=""): # If there is a setup script do not force having project.yaml file project = MlrunProject() else: - raise mlrun.errors.MLRunNotFoundError( - "project or function YAML not found in path" + message = "Project or function YAML not found in path" + logger.error( + message, + context=context, + name=name, + subpath=subpath, ) + raise mlrun.errors.MLRunNotFoundError(message) project.spec.context = context project.metadata.name = name or project.metadata.name @@ -1235,20 +1240,20 @@ def set_workflow( self, name, workflow_path: str, - embed=False, - engine=None, - args_schema: typing.List[EntrypointParam] = None, - handler=None, + embed: bool = False, + engine: Optional[str] = None, + args_schema: list[EntrypointParam] = None, + handler: Optional[str] = None, schedule: typing.Union[str, mlrun.common.schemas.ScheduleCronTrigger] = None, - ttl=None, - image: str = None, + ttl: Optional[int] = None, + image: Optional[str] = None, **args, ): """Add or update a workflow, specify a name and the code path :param name: Name of the workflow :param workflow_path: URL (remote) / Path (absolute or relative to the project code path i.e. - /) for the workflow file. + /) for the workflow file. :param embed: Add the workflow code into the project.yaml :param engine: Workflow processing engine ("kfp", "local", "remote" or "remote:local") :param args_schema: List of arg schema definitions (:py:class`~mlrun.model.EntrypointParam`) @@ -2595,40 +2600,45 @@ def run( cleanup_ttl: int = None, notifications: typing.List[mlrun.model.Notification] = None, ) -> _PipelineRunStatus: - """run a workflow using kubeflow pipelines + """Run a workflow using kubeflow pipelines - :param name: name of the workflow + :param name: Name of the workflow :param workflow_path: - url to a workflow file, if not a project workflow + URL to a workflow file, if not a project workflow :param arguments: - kubeflow pipelines arguments (parameters) + Kubeflow pipelines arguments (parameters) :param artifact_path: - target path/url for workflow artifacts, the string + Target path/url for workflow artifacts, the string '{{workflow.uid}}' will be replaced by workflow id :param workflow_handler: - workflow function handler (for running workflow function directly) - :param namespace: kubernetes namespace if other than default - :param sync: force functions sync before run - :param watch: wait for pipeline completion - :param dirty: allow running the workflow when the git repo is dirty - :param engine: workflow engine running the workflow. - supported values are 'kfp' (default), 'local' or 'remote'. - for setting engine for remote running use 'remote:local' or 'remote:kfp'. - :param local: run local pipeline with local functions (set local=True in function.run()) + Workflow function handler (for running workflow function directly) + :param namespace: Kubernetes namespace if other than default + :param sync: Force functions sync before run + :param watch: Wait for pipeline completion + :param dirty: Allow running the workflow when the git repo is dirty + :param engine: Workflow engine running the workflow. + Supported values are 'kfp' (default), 'local' or 'remote'. + For setting engine for remote running use 'remote:local' or 'remote:kfp'. + :param local: Run local pipeline with local functions (set local=True in function.run()) :param schedule: ScheduleCronTrigger class instance or a standard crontab expression string (which will be converted to the class using its `from_crontab` constructor), see this link for help: https://apscheduler.readthedocs.io/en/3.x/modules/triggers/cron.html#module-apscheduler.triggers.cron for using the pre-defined workflow's schedule, set `schedule=True` - :param timeout: timeout in seconds to wait for pipeline completion (watch will be activated) - :param source: remote source to use instead of the actual `project.spec.source` (used when engine is remote). - for other engines the source is to validate that the code is up-to-date + :param timeout: Timeout in seconds to wait for pipeline completion (watch will be activated) + :param source: Source to use instead of the actual `project.spec.source` (used when engine is remote). + Can be a one of: + 1. Remote URL which is loaded dynamically to the workflow runner. + 2. A path to the project's context on the workflow runner's image. + Path can be absolute or relative to `project.spec.build.source_code_target_dir` if defined + (enriched when building a project image with source, see `MlrunProject.build_image`). + For other engines the source is used to validate that the code is up-to-date. :param cleanup_ttl: - pipeline cleanup ttl in secs (time to wait after workflow completion, at which point the - workflow and all its resources are deleted) + Pipeline cleanup ttl in secs (time to wait after workflow completion, at which point the + Workflow and all its resources are deleted) :param notifications: - list of notifications to send for workflow completion - :returns: run id + List of notifications to send for workflow completion + :returns: Run id """ arguments = arguments or {} @@ -3027,6 +3037,7 @@ def build_config( requirements_file: str = None, builder_env: dict = None, extra_args: str = None, + source_code_target_dir: str = None, ): """specify builder configuration for the project @@ -3047,6 +3058,8 @@ def build_config( e.g. builder_env={"GIT_TOKEN": token}, does not work yet in KFP :param extra_args: A string containing additional builder arguments in the format of command-line options, e.g. extra_args="--skip-tls-verify --build-arg A=val" + :param source_code_target_dir: Path on the image where source code would be extracted + (by default `/home/mlrun_code`) """ if not overwrite_build_params: # TODO: change overwrite_build_params default to True in 1.8.0 @@ -3070,6 +3083,7 @@ def build_config( overwrite=overwrite_build_params, builder_env=builder_env, extra_args=extra_args, + source_code_target_dir=source_code_target_dir, ) if set_as_default and image != self.default_image: @@ -3116,7 +3130,7 @@ def build_image( * False: The new params are merged with the existing * True: The existing params are replaced by the new ones :param extra_args: A string containing additional builder arguments in the format of command-line options, - e.g. extra_args="--skip-tls-verify --build-arg A=val"r + e.g. extra_args="--skip-tls-verify --build-arg A=val" :param target_dir: Path on the image where source code would be extracted (by default `/home/mlrun_code`) """ if not base_image: @@ -3184,6 +3198,11 @@ def build_image( force_build=True, ) + # Get the enriched target dir from the function + self.spec.build.source_code_target_dir = ( + function.spec.build.source_code_target_dir + ) + try: mlrun.db.get_run_db(secrets=self._secrets).delete_function( name=function.metadata.name diff --git a/mlrun/runtimes/base.py b/mlrun/runtimes/base.py index 865703a1f66..fe1ac650626 100644 --- a/mlrun/runtimes/base.py +++ b/mlrun/runtimes/base.py @@ -15,6 +15,7 @@ import http import re import typing +import warnings from base64 import b64encode from os import environ from typing import Callable, Dict, List, Optional, Union @@ -124,7 +125,7 @@ def __init__( self.allow_empty_resources = None # the build.source is cloned/extracted to the specified clone_target_dir # if a relative path is specified, it will be enriched with a temp dir path - self.clone_target_dir = clone_target_dir or "" + self._clone_target_dir = clone_target_dir or None @property def build(self) -> ImageBuilder: @@ -134,6 +135,24 @@ def build(self) -> ImageBuilder: def build(self, build): self._build = self._verify_dict(build, "build", ImageBuilder) + @property + def clone_target_dir(self): + warnings.warn( + "The clone_target_dir attribute is deprecated in 1.6.2 and will be removed in 1.8.0. " + "Use spec.build.source_code_target_dir instead.", + FutureWarning, + ) + return self.build.source_code_target_dir + + @clone_target_dir.setter + def clone_target_dir(self, clone_target_dir): + warnings.warn( + "The clone_target_dir attribute is deprecated in 1.6.2 and will be removed in 1.8.0. " + "Use spec.build.source_code_target_dir instead.", + FutureWarning, + ) + self.build.source_code_target_dir = clone_target_dir + def enrich_function_preemption_spec(self): pass diff --git a/mlrun/runtimes/kubejob.py b/mlrun/runtimes/kubejob.py index 36d8875425c..9035ef82d8a 100644 --- a/mlrun/runtimes/kubejob.py +++ b/mlrun/runtimes/kubejob.py @@ -73,7 +73,7 @@ def with_source_archive( if workdir: self.spec.workdir = workdir if target_dir: - self.spec.clone_target_dir = target_dir + self.spec.build.source_code_target_dir = target_dir self.spec.build.load_source_on_run = pull_at_runtime if ( @@ -232,8 +232,10 @@ def deploy( self.spec.build.base_image = self.spec.build.base_image or get_in( data, "data.spec.build.base_image" ) - # get the clone target dir in case it was enriched due to loading source - self.spec.clone_target_dir = get_in(data, "data.spec.clone_target_dir") + # Get the source target dir in case it was enriched due to loading source + self.spec.build.source_code_target_dir = get_in( + data, "data.spec.build.source_code_target_dir" + ) or get_in(data, "data.spec.clone_target_dir") ready = data.get("ready", False) if not ready: logger.info( diff --git a/mlrun/runtimes/local.py b/mlrun/runtimes/local.py index 474f3880825..260dc25bf03 100644 --- a/mlrun/runtimes/local.py +++ b/mlrun/runtimes/local.py @@ -218,7 +218,7 @@ def with_source_archive(self, source, workdir=None, handler=None, target_dir=Non if workdir: self.spec.workdir = workdir if target_dir: - self.spec.clone_target_dir = target_dir + self.spec.build.source_code_target_dir = target_dir def is_deployed(self): return True @@ -240,7 +240,7 @@ def _pre_run(self, runobj: RunObject, execution: MLClientCtx): if self.spec.build.source and not hasattr(self, "_is_run_local"): target_dir = extract_source( self.spec.build.source, - self.spec.clone_target_dir, + self.spec.build.source_code_target_dir, secrets=execution._secrets_manager, ) if workdir and not workdir.startswith("/"): diff --git a/server/api/api/endpoints/functions.py b/server/api/api/endpoints/functions.py index cb771d73a8c..a9f80043004 100644 --- a/server/api/api/endpoints/functions.py +++ b/server/api/api/endpoints/functions.py @@ -312,8 +312,15 @@ async def build_function( client_python_version, force_build, ) + + # clone_target_dir is deprecated but needs to remain for backward compatibility + func_dict = fn.to_dict() + func_dict["spec"]["clone_target_dir"] = get_in( + data, "data.spec.build.source_code_target_dir" + ) + return { - "data": fn.to_dict(), + "data": func_dict, "ready": ready, } diff --git a/server/api/api/endpoints/workflows.py b/server/api/api/endpoints/workflows.py index d3e57bccf62..ea02e191dfc 100644 --- a/server/api/api/endpoints/workflows.py +++ b/server/api/api/endpoints/workflows.py @@ -215,6 +215,7 @@ async def submit_workflow( runner=workflow_runner, project=project, workflow_request=updated_request, + auth_info=auth_info, ) status = mlrun.run.RunStatuses.running run_uid = run.uid() diff --git a/server/api/crud/workflows.py b/server/api/crud/workflows.py index 6db32879f23..8e6794f789c 100644 --- a/server/api/crud/workflows.py +++ b/server/api/crud/workflows.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import os import uuid from typing import Dict @@ -141,7 +142,9 @@ def _prepare_run_object_for_scheduling( """ meta_uid = uuid.uuid4().hex - save = self._set_source(project, workflow_request.source) + source, save, is_context = self._validate_source( + project, workflow_request.source + ) workflow_spec = workflow_request.spec run_object = RunObject( spec=RunSpec( @@ -179,6 +182,12 @@ def _prepare_run_object_for_scheduling( ), ) + if is_context: + # The source is a context (local path contained in the image), + # load the project from the context instead of remote URL + run_object.spec.parameters["project_context"] = source + run_object.spec.parameters.pop("url", None) + # Setting labels: return self._label_run_object(run_object, labels) @@ -188,6 +197,7 @@ def run( project: mlrun.common.schemas.Project, workflow_request: mlrun.common.schemas.WorkflowRequest = None, load_only: bool = False, + auth_info: mlrun.common.schemas.AuthInfo = None, ) -> RunObject: """ Run workflow runner. @@ -196,6 +206,7 @@ def run( :param project: MLRun project :param workflow_request: contains the workflow spec, that will be executed :param load_only: If True, will only load the project remotely (without running workflow) + :param auth_info: auth info of the request :returns: run context object (RunObject) with run metadata, results and status """ @@ -225,12 +236,17 @@ def run( ] artifact_path = workflow_request.artifact_path if workflow_request else "" + + # TODO: Passing auth_info is required for server side launcher, but the runner is already enriched with the + # auth_info when it was created in create_runner. We should move the enrichment to the launcher and need to + # make sure it is safe for scheduling and project load endpoint. return runner.run( runspec=run_spec, artifact_path=artifact_path, notifications=notifications, local=False, watch=False, + auth_info=auth_info, ) @staticmethod @@ -290,11 +306,11 @@ def _prepare_run_object_for_single_run( :returns: RunObject ready for execution. """ source = workflow_request.source if workflow_request else "" - save = self._set_source(project, source, load_only) + source, save, is_context = self._validate_source(project, source, load_only) run_object = RunObject( spec=RunSpec( parameters=dict( - url=project.spec.source, + url=source, project_name=project.metadata.name, load_only=load_only, save=save, @@ -310,6 +326,12 @@ def _prepare_run_object_for_single_run( metadata=RunMetadata(name=run_name), ) + if is_context: + # The source is a context (local path contained in the image), + # load the project from the context instead of remote URL + run_object.spec.parameters["project_context"] = source + run_object.spec.parameters.pop("url", None) + if not load_only: workflow_spec = workflow_request.spec run_object.spec.parameters.update( @@ -331,36 +353,63 @@ def _prepare_run_object_for_single_run( return self._label_run_object(run_object, labels) @staticmethod - def _set_source( + def _validate_source( project: mlrun.common.schemas.Project, source: str, load_only: bool = False - ) -> bool: + ) -> tuple[str, bool, bool]: """ - Setting the project source. In case the user provided a source we want to load the project from the source (like from a specific commit/branch from git repo) without changing the source of the project (save=False). :param project: MLRun project - :param source: the source of the project, needs to be a remote URL that contains the project yaml file. - :param load_only: if we only load the project, it must be saved to ensure we are not running a pipeline + :param source: The source of the project, remote URL or context on image that contains the + project yaml file. + :param load_only: If we only load the project, it must be saved to ensure we are not running a pipeline without a project as it's not supported. - :returns: True if the project need to be saved afterward. + :returns: A tuple of: + [0] = The source string. + [1] = Bool if the project need to be saved afterward. + [2] = Bool if the source is a path. """ + source = source or project.spec.source save = True - if source and not load_only: + if not source: + raise mlrun.errors.MLRunInvalidArgumentError( + "Project source is required. Either specify the source in the project or provide it in the request." + ) + + if not load_only: save = False - project.spec.source = source - if "://" not in project.spec.source: + # Path like source is not supported for load_only since it uses the mlrun default image + if source.startswith("/"): + return source, save, True + + if source.startswith("./") or source == ".": + # When the source is relative, it is relative to the project's source_code_target_dir + # If the project's source_code_target_dir is not set, the source is relative to the cwd + if project.spec.build and project.spec.build.source_code_target_dir: + source = os.path.normpath( + os.path.join(project.spec.build.source_code_target_dir, source) + ) + return source, save, True + + if "://" not in source: + if load_only: + raise mlrun.errors.MLRunInvalidArgumentError( + f"Invalid URL '{source}' for loading project '{project.metadata.name}'. " + f"Expected to be in format: :///;?#." + ) + raise mlrun.errors.MLRunInvalidArgumentError( - f"Remote workflows can only be performed by a project with remote source (e.g git:// or http://)," - f" but the specified source '{project.spec.source}' is not remote. " - f"Either put your code in Git, or archive it and then set a source to it." - f" For more details, read" + f"Invalid source '{source}' for remote workflow." + f"Expected to be a remote URL or a path to the project context on image." + f" For more details, see" f" https://docs.mlrun.org/en/latest/concepts/scheduled-jobs.html#scheduling-a-workflow" ) - return save + + return source, save, False @staticmethod def _label_run_object( diff --git a/server/api/rundb/sqldb.py b/server/api/rundb/sqldb.py index 463099dbcef..4e7bd72ee78 100644 --- a/server/api/rundb/sqldb.py +++ b/server/api/rundb/sqldb.py @@ -939,6 +939,24 @@ def store_datastore_profile( ): raise NotImplementedError() + def submit_workflow( + self, + project: str, + name: str, + workflow_spec: Union[ + mlrun.projects.pipelines.WorkflowSpec, + mlrun.common.schemas.WorkflowSpec, + dict, + ], + arguments: Optional[dict] = None, + artifact_path: Optional[str] = None, + source: Optional[str] = None, + run_name: Optional[str] = None, + namespace: Optional[str] = None, + notifications: list[mlrun.model.Notification] = None, + ) -> "mlrun.common.schemas.WorkflowResponse": + raise NotImplementedError() + def _transform_db_error(self, func, *args, **kwargs): try: return func(*args, **kwargs) diff --git a/server/api/runtime_handlers/kubejob.py b/server/api/runtime_handlers/kubejob.py index d87610049d2..c9f6ef7d9aa 100644 --- a/server/api/runtime_handlers/kubejob.py +++ b/server/api/runtime_handlers/kubejob.py @@ -162,11 +162,11 @@ def _resolve_workdir(runtime: mlrun.runtimes.KubejobRuntime): if workdir and os.path.isabs(workdir): return workdir - if runtime.spec.clone_target_dir: + if runtime.spec.build.source_code_target_dir: workdir = workdir or "" workdir = workdir.removeprefix("./") - return os.path.join(runtime.spec.clone_target_dir, workdir) + return os.path.join(runtime.spec.build.source_code_target_dir, workdir) return workdir diff --git a/server/api/utils/builder.py b/server/api/utils/builder.py index 09798afce9c..aa978eb9f23 100644 --- a/server/api/utils/builder.py +++ b/server/api/utils/builder.py @@ -487,14 +487,18 @@ def build_image( user_unix_id = runtime.spec.security_context.run_as_user enriched_group_id = runtime.spec.security_context.run_as_group + source_code_target_dir = ( + runtime.spec.build.source_code_target_dir or runtime.spec.clone_target_dir + ) if source_to_copy and ( - not runtime.spec.clone_target_dir - or not os.path.isabs(runtime.spec.clone_target_dir) + not source_code_target_dir or not os.path.isabs(source_code_target_dir) ): - relative_workdir = runtime.spec.clone_target_dir or "" + relative_workdir = source_code_target_dir or "" relative_workdir = relative_workdir.removeprefix("./") - runtime.spec.clone_target_dir = path.join("/home/mlrun_code", relative_workdir) + runtime.spec.build.source_code_target_dir = path.join( + "/home/mlrun_code", relative_workdir + ) dock = make_dockerfile( base_image, @@ -504,7 +508,7 @@ def build_image( extra=extra, user_unix_id=user_unix_id, enriched_group_id=enriched_group_id, - target_dir=runtime.spec.clone_target_dir, + target_dir=runtime.spec.build.source_code_target_dir, builder_env=builder_env_list, project_secrets=project_secrets, extra_args=extra_args, diff --git a/tests/api/crud/test_workflows.py b/tests/api/crud/test_workflows.py new file mode 100644 index 00000000000..d34bff6a02a --- /dev/null +++ b/tests/api/crud/test_workflows.py @@ -0,0 +1,108 @@ +# Copyright 2023 Iguazio +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os.path + +import pytest +import sqlalchemy.orm + +import mlrun.common.schemas +import server.api.crud +import tests.api.conftest + + +class TestWorkflows(tests.api.conftest.MockedK8sHelper): + @pytest.mark.parametrize( + "source_code_target_dir", + [ + "/home/mlrun_code", + None, + ], + ) + @pytest.mark.parametrize( + "source", + [ + "/home/mlrun/project-name/", + "./project-name", + "git://github.com/mlrun/project-name.git", + ], + ) + def test_run_workflow_with_local_source( + self, + db: sqlalchemy.orm.Session, + k8s_secrets_mock, + source_code_target_dir: str, + source: str, + ): + project = mlrun.common.schemas.Project( + metadata=mlrun.common.schemas.ProjectMetadata(name="project-name"), + spec=mlrun.common.schemas.ProjectSpec(), + ) + if source_code_target_dir: + project.spec.build = mlrun.common.schemas.common.ImageBuilder( + source_code_target_dir=source_code_target_dir + ) + + server.api.crud.Projects().create_project(db, project) + + run_name = "run-name" + runner = server.api.crud.WorkflowRunners().create_runner( + run_name=run_name, + project=project.metadata.name, + db_session=db, + auth_info=mlrun.common.schemas.AuthInfo(), + image="mlrun/mlrun", + ) + + run = server.api.crud.WorkflowRunners().run( + runner=runner, + project=project, + workflow_request=mlrun.common.schemas.WorkflowRequest( + spec=mlrun.common.schemas.WorkflowSpec( + name=run_name, + engine="remote", + code=None, + path=None, + args=None, + handler=None, + ttl=None, + args_schema=None, + schedule=None, + run_local=None, + image="mlrun/mlrun", + ), + source=source, + artifact_path="/home/mlrun/artifacts", + ), + auth_info=mlrun.common.schemas.AuthInfo(), + ) + + assert run.metadata.name == run_name + assert run.metadata.project == project.metadata.name + if "://" in source: + assert run.spec.parameters["url"] == source + assert "project_context" not in run.spec.parameters + else: + if source_code_target_dir and source.startswith("."): + expected_project_context = os.path.normpath( + os.path.join(source_code_target_dir, source) + ) + assert ( + run.spec.parameters["project_context"] == expected_project_context + ) + else: + assert run.spec.parameters["project_context"] == source + assert "url" not in run.spec.parameters + + assert run.spec.handler == "mlrun.projects.load_and_run" diff --git a/tests/projects/test_project.py b/tests/projects/test_project.py index 21355b7b1c1..e2a6150da04 100644 --- a/tests/projects/test_project.py +++ b/tests/projects/test_project.py @@ -1442,6 +1442,7 @@ def test_init_function_from_dict_function_in_spec(): "commands": [], "load_source_on_run": False, "requirements": ["pyspark==3.2.3"], + "source_code_target_dir": "/home/mlrun_code/", }, "description": "", "disable_auto_mount": False, @@ -1552,8 +1553,8 @@ def test_project_create_remote(): @pytest.mark.parametrize( "source_url, pull_at_runtime, base_image, image_name, target_dir", [ - (None, None, "aaa/bbb", "ccc/ddd", ""), - ("git://some/repo", False, None, ".some-image", ""), + (None, None, "aaa/bbb", "ccc/ddd", None), + ("git://some/repo", False, None, ".some-image", None), ( "git://some/other/repo", False, @@ -1586,7 +1587,7 @@ def test_project_build_image( if pull_at_runtime: assert build_config.load_source_on_run is None assert build_config.source is None - assert clone_target_dir == "" + assert clone_target_dir is None else: assert not build_config.load_source_on_run assert build_config.source == source_url diff --git a/tests/runtimes/test_run.py b/tests/runtimes/test_run.py index 3475fed25f2..29871f42ab4 100644 --- a/tests/runtimes/test_run.py +++ b/tests/runtimes/test_run.py @@ -46,7 +46,7 @@ def _get_runtime(): "volume_mounts": [], "env": [], "description": "", - "build": {"commands": [], "requirements": []}, + "build": {"commands": [], "requirements": [], "source_code_target_dir": ""}, "affinity": None, "disable_auto_mount": False, "priority_class_name": "", diff --git a/tests/system/projects/test_project.py b/tests/system/projects/test_project.py index 6574d874d46..8f344b81b93 100644 --- a/tests/system/projects/test_project.py +++ b/tests/system/projects/test_project.py @@ -1351,3 +1351,38 @@ def test_load_project_remotely_with_secrets_failed(self): assert state == "error" with pytest.raises(mlrun.errors.MLRunNotFoundError): db.get_project(name) + + def test_remote_workflow_source_on_image(self): + name = "source-project" + self.custom_project_names_to_delete.append(name) + + project_dir = f"{projects_dir}/{name}" + source = "git://github.com/mlrun/project-demo.git" + source_code_target_dir = ( + "./project" # Optional, results to /home/mlrun_code/project + ) + artifact_path = f"v3io:///projects/{name}" + + project = mlrun.load_project( + project_dir, + source, + name=name, + ) + project.set_source(source) + + # Build the image, load the source to the target dir and save the project + project.build_image(target_dir=source_code_target_dir) + project.save() + + run = project.run( + "main", + engine="remote", + source="./", # Relative to project.spec.build.source_code_target_dir + artifact_path=artifact_path, + dirty=True, + ) + assert run.state == mlrun.run.RunStatuses.succeeded + + # Ensuring that the project's source has not changed in the db: + project_from_db = self._run_db.get_project(name) + assert project_from_db.source == source From e45c26b287ce245a693c74417026f85b2557a6c7 Mon Sep 17 00:00:00 2001 From: Liran BG Date: Wed, 6 Mar 2024 08:31:09 +0200 Subject: [PATCH 065/119] [K8s] Update label verification flow [1.6.x] (#5252) --- mlrun/k8s_utils.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/mlrun/k8s_utils.py b/mlrun/k8s_utils.py index 7d440c096fc..4c14c742294 100644 --- a/mlrun/k8s_utils.py +++ b/mlrun/k8s_utils.py @@ -134,13 +134,13 @@ def sanitize_label_value(value: str) -> str: return re.sub(r"([^a-zA-Z0-9_.-]|^[^a-zA-Z0-9]|[^a-zA-Z0-9]$)", "-", value[:63]) -def verify_label_key(key): +def verify_label_key(key: str): + """ + Verify that the label key is valid for Kubernetes. + Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set + """ if not key: raise mlrun.errors.MLRunInvalidArgumentError("label key cannot be empty") - if key.startswith("k8s.io") or key.startswith("kubernetes.io"): - raise mlrun.errors.MLRunInvalidArgumentError( - "Labels cannot start with 'k8s.io' or 'kubernetes.io'" - ) mlrun.utils.helpers.verify_field_regex( f"project.metadata.labels.'{key}'", @@ -148,6 +148,11 @@ def verify_label_key(key): mlrun.utils.regex.k8s_character_limit, ) + if key.startswith("k8s.io/") or key.startswith("kubernetes.io/"): + raise mlrun.errors.MLRunInvalidArgumentError( + "Labels cannot start with 'k8s.io/' or 'kubernetes.io/'" + ) + parts = key.split("/") if len(parts) == 1: name = parts[0] From e1db48cb1b552aeefc480da0e3f7fc2605cacada Mon Sep 17 00:00:00 2001 From: Jonathan Daniel <36337649+jond01@users.noreply.github.com> Date: Wed, 6 Mar 2024 12:10:35 +0200 Subject: [PATCH 066/119] [Linting] Backport Ruff upgrade [1.6.x] (#5253) --- automation/system_test/prepare.py | 6 +- dev-requirements.txt | 2 +- mlrun/config.py | 8 +- mlrun/datastore/azure_blob.py | 18 +-- mlrun/datastore/google_cloud_storage.py | 12 +- mlrun/db/httpdb.py | 18 +-- mlrun/execution.py | 6 +- .../tf_keras/callbacks/logging_callback.py | 6 +- mlrun/frameworks/tf_keras/model_handler.py | 14 +- mlrun/kfpops.py | 6 +- mlrun/model_monitoring/api.py | 16 +-- .../stores/kv_model_endpoint_store.py | 26 ++-- .../stores/sql_model_endpoint_store.py | 1 - mlrun/package/packagers/pandas_packagers.py | 6 +- mlrun/runtimes/function.py | 18 +-- mlrun/runtimes/mpijob/abstract.py | 12 +- mlrun/runtimes/pod.py | 6 +- mlrun/runtimes/serving.py | 6 +- mlrun/runtimes/sparkjob/spark3job.py | 6 +- mlrun/utils/async_http.py | 6 +- mlrun/utils/http.py | 6 +- .../notifications/notification_pusher.py | 12 +- pyproject.toml | 13 +- server/api/api/endpoints/runtime_resources.py | 4 +- server/api/api/endpoints/workflows.py | 6 +- server/api/api/utils.py | 8 +- server/api/apiuvicorn.py | 6 +- .../api/crud/model_monitoring/deployment.py | 6 +- server/api/db/sqldb/db.py | 6 +- server/api/main.py | 6 +- ...487_altering_table_datastore_profiles_2.py | 1 + .../28383af526f3_market_place_to_hub.py | 1 + ...29c_increase_timestamp_fields_precision.py | 1 + ...03aef6a91d_tag_foreign_key_and_cascades.py | 1 + .../59061f6e2a87_add_index_migration.py | 1 + ...351c88a19_adding_background_tasks_table.py | 1 + ...add_requested_logs_column_and_index_to_.py | 1 + ...9d16de5f03a7_adding_data_versions_table.py | 1 + ...7ab5dec_adding_table_datastore_profiles.py | 1 + .../b268044fa2f7_adding_artifacts_v2_table.py | 1 + ...7_adding_name_and_updated_to_runs_table.py | 1 + ...9cbf87203_background_task_error_message.py | 1 + .../c0e342d73bd0_indexing_artifact_v2_key.py | 1 + .../versions/c4af40b0bf61_init.py | 1 + .../versions/c905d15bd91d_notifications.py | 1 + ...dding_next_run_time_column_to_schedule_.py | 1 + ...3_notifications_params_to_secret_params.py | 1 + ...487_altering_table_datastore_profiles_2.py | 1 + .../0b224a1b4e0d_indexing_artifact_v2_key.py | 1 + ...f_notifications_params_to_secret_params.py | 1 + .../versions/11f8dd2dc9fe_init.py | 1 + .../1c954f8cb32d_schedule_last_run_uri.py | 1 + .../2b6d23c715aa_adding_feature_sets.py | 1 + ...fb7e1274d_background_task_error_message.py | 1 + .../4acd9430b093_market_place_to_hub.py | 1 + ...dding_next_run_time_column_to_schedule_.py | 1 + ...90a1a69bc_adding_background_tasks_table.py | 1 + ...531edc7_adding_table_datastore_profiles.py | 1 + ...cd005_add_requested_logs_column_to_runs.py | 1 + .../863114f0c659_refactoring_feature_set.py | 1 + .../versions/959ae00528ad_notifications.py | 1 + ...accf9fc83d38_adding_data_versions_table.py | 1 + .../versions/b68e8e897a28_schedule_labels.py | 1 + .../bcd0c1f9720c_adding_project_labels.py | 1 + .../bf91ff18513b_add_index_migration.py | 1 + .../versions/cf21882f938e_schedule_id.py | 1 + .../d781f58f607f_tag_object_name_string.py | 1 + ...871ace_adding_marketplace_sources_table.py | 1 + ...e1dd5983c06b_schedule_concurrency_limit.py | 1 + ...3_adding_name_and_updated_to_runs_table.py | 1 + .../f4249b4ba6fa_adding_feature_vectors.py | 1 + .../f7b5a1a03629_adding_feature_labels.py | 1 + .../fa3009d9787f_adding_artifacts_v2_table.py | 1 + server/api/runtime_handlers/__init__.py | 6 +- server/api/runtime_handlers/base.py | 14 +- server/api/utils/builder.py | 6 +- server/api/utils/clients/iguazio.py | 28 ++-- server/api/utils/projects/leader.py | 6 +- tests/api/api/test_runtime_resources.py | 6 +- tests/api/api/test_utils.py | 16 +-- tests/api/crud/test_runs.py | 132 ++++++++++-------- tests/api/db/test_sqldb.py | 1 + tests/api/runtimes/test_kubejob.py | 18 +-- tests/api/utils/clients/test_iguazio.py | 24 ++-- .../automation/release_notes/test_generate.py | 16 ++- tests/integration/aws_s3/test_aws_s3.py | 18 ++- .../integration/azure_blob/test_azure_blob.py | 13 +- .../test_google_cloud_storage.py | 13 +- tests/integration/sdk_api/run/test_main.py | 6 +- tests/rundb/test_httpdb.py | 6 +- tests/runtimes/test_run.py | 36 ++--- .../model_monitoring/test_model_monitoring.py | 18 +-- tests/utils/test_get_secrets.py | 6 +- 93 files changed, 385 insertions(+), 319 deletions(-) diff --git a/automation/system_test/prepare.py b/automation/system_test/prepare.py index 79f6d5eabbf..478f3dddd39 100644 --- a/automation/system_test/prepare.py +++ b/automation/system_test/prepare.py @@ -394,9 +394,9 @@ def _enrich_env(self): spark_service_name = self._get_service_name("app=spark,component=spark-master") self._env_config["MLRUN_IGUAZIO_API_URL"] = f"https://{api_url_host}" self._env_config["V3IO_FRAMESD"] = f"https://{framesd_host}" - self._env_config[ - "MLRUN_SYSTEM_TESTS_DEFAULT_SPARK_SERVICE" - ] = spark_service_name + self._env_config["MLRUN_SYSTEM_TESTS_DEFAULT_SPARK_SERVICE"] = ( + spark_service_name + ) self._env_config["V3IO_API"] = f"https://{v3io_api_host}" self._env_config["MLRUN_DBPATH"] = f"https://{mlrun_api_url}" diff --git a/dev-requirements.txt b/dev-requirements.txt index 2c8140f91a6..e9b6d80d7d5 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -3,7 +3,7 @@ twine~=3.1 build~=1.0 # formatting & linting -ruff~=0.1.8 +ruff~=0.3.0 import-linter~=1.8 # testing diff --git a/mlrun/config.py b/mlrun/config.py index 260fc694d2e..f46534be10e 100644 --- a/mlrun/config.py +++ b/mlrun/config.py @@ -960,10 +960,10 @@ def get_default_function_pod_resources( with_gpu = ( with_gpu_requests if requirement == "requests" else with_gpu_limits ) - resources[ - requirement - ] = self.get_default_function_pod_requirement_resources( - requirement, with_gpu + resources[requirement] = ( + self.get_default_function_pod_requirement_resources( + requirement, with_gpu + ) ) return resources diff --git a/mlrun/datastore/azure_blob.py b/mlrun/datastore/azure_blob.py index 920aa08e5bf..0da6e0d2b2f 100644 --- a/mlrun/datastore/azure_blob.py +++ b/mlrun/datastore/azure_blob.py @@ -175,9 +175,9 @@ def get_spark_options(self): if "client_secret" in st or "client_id" in st or "tenant_id" in st: res[f"spark.hadoop.fs.azure.account.auth.type.{host}"] = "OAuth" - res[ - f"spark.hadoop.fs.azure.account.oauth.provider.type.{host}" - ] = "org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider" + res[f"spark.hadoop.fs.azure.account.oauth.provider.type.{host}"] = ( + "org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider" + ) if "client_id" in st: res[f"spark.hadoop.fs.azure.account.oauth2.client.id.{host}"] = st[ "client_id" @@ -188,14 +188,14 @@ def get_spark_options(self): ] if "tenant_id" in st: tenant_id = st["tenant_id"] - res[ - f"spark.hadoop.fs.azure.account.oauth2.client.endpoint.{host}" - ] = f"https://login.microsoftonline.com/{tenant_id}/oauth2/token" + res[f"spark.hadoop.fs.azure.account.oauth2.client.endpoint.{host}"] = ( + f"https://login.microsoftonline.com/{tenant_id}/oauth2/token" + ) if "sas_token" in st: res[f"spark.hadoop.fs.azure.account.auth.type.{host}"] = "SAS" - res[ - f"spark.hadoop.fs.azure.sas.token.provider.type.{host}" - ] = "org.apache.hadoop.fs.azurebfs.sas.FixedSASTokenProvider" + res[f"spark.hadoop.fs.azure.sas.token.provider.type.{host}"] = ( + "org.apache.hadoop.fs.azurebfs.sas.FixedSASTokenProvider" + ) res[f"spark.hadoop.fs.azure.sas.fixed.token.{host}"] = st["sas_token"] return res diff --git a/mlrun/datastore/google_cloud_storage.py b/mlrun/datastore/google_cloud_storage.py index 04f7bc2a6ba..980babcf3c6 100644 --- a/mlrun/datastore/google_cloud_storage.py +++ b/mlrun/datastore/google_cloud_storage.py @@ -147,13 +147,13 @@ def get_spark_options(self): if "project_id" in credentials: res["spark.hadoop.fs.gs.project.id"] = credentials["project_id"] if "private_key_id" in credentials: - res[ - "spark.hadoop.fs.gs.auth.service.account.private.key.id" - ] = credentials["private_key_id"] + res["spark.hadoop.fs.gs.auth.service.account.private.key.id"] = ( + credentials["private_key_id"] + ) if "private_key" in credentials: - res[ - "spark.hadoop.fs.gs.auth.service.account.private.key" - ] = credentials["private_key"] + res["spark.hadoop.fs.gs.auth.service.account.private.key"] = ( + credentials["private_key"] + ) if "client_email" in credentials: res["spark.hadoop.fs.gs.auth.service.account.email"] = credentials[ "client_email" diff --git a/mlrun/db/httpdb.py b/mlrun/db/httpdb.py index cab2a95bc13..7afe7ea0b0e 100644 --- a/mlrun/db/httpdb.py +++ b/mlrun/db/httpdb.py @@ -1137,17 +1137,17 @@ def list_runtime_resources( structured_dict = {} for project, job_runtime_resources_map in response.json().items(): for job_id, runtime_resources in job_runtime_resources_map.items(): - structured_dict.setdefault(project, {})[ - job_id - ] = mlrun.common.schemas.RuntimeResources(**runtime_resources) + structured_dict.setdefault(project, {})[job_id] = ( + mlrun.common.schemas.RuntimeResources(**runtime_resources) + ) return structured_dict elif group_by == mlrun.common.schemas.ListRuntimeResourcesGroupByField.project: structured_dict = {} for project, kind_runtime_resources_map in response.json().items(): for kind, runtime_resources in kind_runtime_resources_map.items(): - structured_dict.setdefault(project, {})[ - kind - ] = mlrun.common.schemas.RuntimeResources(**runtime_resources) + structured_dict.setdefault(project, {})[kind] = ( + mlrun.common.schemas.RuntimeResources(**runtime_resources) + ) return structured_dict else: raise NotImplementedError( @@ -1206,9 +1206,9 @@ def delete_runtime_resources( structured_dict = {} for project, kind_runtime_resources_map in response.json().items(): for kind, runtime_resources in kind_runtime_resources_map.items(): - structured_dict.setdefault(project, {})[ - kind - ] = mlrun.common.schemas.RuntimeResources(**runtime_resources) + structured_dict.setdefault(project, {})[kind] = ( + mlrun.common.schemas.RuntimeResources(**runtime_resources) + ) return structured_dict def create_schedule( diff --git a/mlrun/execution.py b/mlrun/execution.py index 3121121b776..60767c3462a 100644 --- a/mlrun/execution.py +++ b/mlrun/execution.py @@ -559,9 +559,9 @@ def log_iteration_results(self, best, summary: list, task: dict, commit=False): for k, v in get_in(task, ["status", "results"], {}).items(): self._results[k] = v for artifact in get_in(task, ["status", run_keys.artifacts], []): - self._artifacts_manager.artifacts[ - artifact["metadata"]["key"] - ] = artifact + self._artifacts_manager.artifacts[artifact["metadata"]["key"]] = ( + artifact + ) self._artifacts_manager.link_artifact( self.project, self.name, diff --git a/mlrun/frameworks/tf_keras/callbacks/logging_callback.py b/mlrun/frameworks/tf_keras/callbacks/logging_callback.py index f1811ec538b..c540410e288 100644 --- a/mlrun/frameworks/tf_keras/callbacks/logging_callback.py +++ b/mlrun/frameworks/tf_keras/callbacks/logging_callback.py @@ -389,9 +389,9 @@ def _add_auto_hyperparameters(self): ): try: self._get_hyperparameter(key_chain=learning_rate_key_chain) - self._dynamic_hyperparameters_keys[ - learning_rate_key - ] = learning_rate_key_chain + self._dynamic_hyperparameters_keys[learning_rate_key] = ( + learning_rate_key_chain + ) except (KeyError, IndexError, ValueError): pass diff --git a/mlrun/frameworks/tf_keras/model_handler.py b/mlrun/frameworks/tf_keras/model_handler.py index eaacc9b2572..039d36f5682 100644 --- a/mlrun/frameworks/tf_keras/model_handler.py +++ b/mlrun/frameworks/tf_keras/model_handler.py @@ -263,13 +263,13 @@ def save( # Update the paths and log artifacts if context is available: if self._weights_file is not None: if self._context is not None: - artifacts[ - self._get_weights_file_artifact_name() - ] = self._context.log_artifact( - self._weights_file, - local_path=self._weights_file, - artifact_path=output_path, - db_key=False, + artifacts[self._get_weights_file_artifact_name()] = ( + self._context.log_artifact( + self._weights_file, + local_path=self._weights_file, + artifact_path=output_path, + db_key=False, + ) ) return artifacts if self._context is not None else None diff --git a/mlrun/kfpops.py b/mlrun/kfpops.py index 7b81f147693..8e11e203939 100644 --- a/mlrun/kfpops.py +++ b/mlrun/kfpops.py @@ -408,9 +408,9 @@ def mlrun_pipeline( cmd += ["--label", f"{label}={val}"] for output in outputs: cmd += ["-o", str(output)] - file_outputs[ - output.replace(".", "_") - ] = f"/tmp/{output}" # not using path.join to avoid windows "\" + file_outputs[output.replace(".", "_")] = ( + f"/tmp/{output}" # not using path.join to avoid windows "\" + ) if project: cmd += ["--project", project] if handler: diff --git a/mlrun/model_monitoring/api.py b/mlrun/model_monitoring/api.py index e7157dcd387..19b9e3e2f96 100644 --- a/mlrun/model_monitoring/api.py +++ b/mlrun/model_monitoring/api.py @@ -436,9 +436,9 @@ def _generate_model_endpoint( ] = possible_drift_threshold model_endpoint.spec.monitoring_mode = monitoring_mode - model_endpoint.status.first_request = ( - model_endpoint.status.last_request - ) = datetime_now().isoformat() + model_endpoint.status.first_request = model_endpoint.status.last_request = ( + datetime_now().isoformat() + ) if sample_set_statistics: model_endpoint.status.feature_stats = sample_set_statistics @@ -476,11 +476,11 @@ def trigger_drift_batch_job( db_session = mlrun.get_run_db() # Register the monitoring batch job (do nothing if already exist) and get the job function as a dictionary - batch_function_dict: typing.Dict[ - str, typing.Any - ] = db_session.deploy_monitoring_batch_job( - project=project, - default_batch_image=default_batch_image, + batch_function_dict: typing.Dict[str, typing.Any] = ( + db_session.deploy_monitoring_batch_job( + project=project, + default_batch_image=default_batch_image, + ) ) # Prepare current run params diff --git a/mlrun/model_monitoring/stores/kv_model_endpoint_store.py b/mlrun/model_monitoring/stores/kv_model_endpoint_store.py index 37602277c32..dbffca1fb19 100644 --- a/mlrun/model_monitoring/stores/kv_model_endpoint_store.py +++ b/mlrun/model_monitoring/stores/kv_model_endpoint_store.py @@ -540,24 +540,24 @@ def validate_old_schema_fields(endpoint: dict): and endpoint[mlrun.common.schemas.model_monitoring.EventFieldType.METRICS] == "null" ): - endpoint[ - mlrun.common.schemas.model_monitoring.EventFieldType.METRICS - ] = json.dumps( - { - mlrun.common.schemas.model_monitoring.EventKeyMetrics.GENERIC: { - mlrun.common.schemas.model_monitoring.EventLiveStats.LATENCY_AVG_1H: 0, - mlrun.common.schemas.model_monitoring.EventLiveStats.PREDICTIONS_PER_SECOND: 0, + endpoint[mlrun.common.schemas.model_monitoring.EventFieldType.METRICS] = ( + json.dumps( + { + mlrun.common.schemas.model_monitoring.EventKeyMetrics.GENERIC: { + mlrun.common.schemas.model_monitoring.EventLiveStats.LATENCY_AVG_1H: 0, + mlrun.common.schemas.model_monitoring.EventLiveStats.PREDICTIONS_PER_SECOND: 0, + } } - } + ) ) # Validate key `uid` instead of `endpoint_id` # For backwards compatibility reasons, we replace the `endpoint_id` with `uid` which is the updated key name if mlrun.common.schemas.model_monitoring.EventFieldType.ENDPOINT_ID in endpoint: - endpoint[ - mlrun.common.schemas.model_monitoring.EventFieldType.UID - ] = endpoint[ - mlrun.common.schemas.model_monitoring.EventFieldType.ENDPOINT_ID - ] + endpoint[mlrun.common.schemas.model_monitoring.EventFieldType.UID] = ( + endpoint[ + mlrun.common.schemas.model_monitoring.EventFieldType.ENDPOINT_ID + ] + ) @staticmethod def _encode_field(field: typing.Union[str, bytes]) -> bytes: diff --git a/mlrun/model_monitoring/stores/sql_model_endpoint_store.py b/mlrun/model_monitoring/stores/sql_model_endpoint_store.py index e9cc5194eaf..82aa7070f52 100644 --- a/mlrun/model_monitoring/stores/sql_model_endpoint_store.py +++ b/mlrun/model_monitoring/stores/sql_model_endpoint_store.py @@ -31,7 +31,6 @@ class SQLModelEndpointStore(ModelEndpointStore): - """ Handles the DB operations when the DB target is from type SQL. For the SQL operations, we use SQLAlchemy, a Python SQL toolkit that handles the communication with the database. When using SQL for storing the model endpoints diff --git a/mlrun/package/packagers/pandas_packagers.py b/mlrun/package/packagers/pandas_packagers.py index ddf1e1c26ab..ea14384aca8 100644 --- a/mlrun/package/packagers/pandas_packagers.py +++ b/mlrun/package/packagers/pandas_packagers.py @@ -838,9 +838,9 @@ def _prepare_result(obj: Union[list, dict, tuple]) -> Any: """ if isinstance(obj, dict): for key, value in obj.items(): - obj[ - PandasDataFramePackager._prepare_result(obj=key) - ] = PandasDataFramePackager._prepare_result(obj=value) + obj[PandasDataFramePackager._prepare_result(obj=key)] = ( + PandasDataFramePackager._prepare_result(obj=value) + ) elif isinstance(obj, list): for i, value in enumerate(obj): obj[i] = PandasDataFramePackager._prepare_result(obj=value) diff --git a/mlrun/runtimes/function.py b/mlrun/runtimes/function.py index 7a7acfaf145..78e58a06f5b 100644 --- a/mlrun/runtimes/function.py +++ b/mlrun/runtimes/function.py @@ -432,15 +432,15 @@ def with_http( raise ValueError( "gateway timeout must be greater than the worker timeout" ) - annotations[ - "nginx.ingress.kubernetes.io/proxy-connect-timeout" - ] = f"{gateway_timeout}" - annotations[ - "nginx.ingress.kubernetes.io/proxy-read-timeout" - ] = f"{gateway_timeout}" - annotations[ - "nginx.ingress.kubernetes.io/proxy-send-timeout" - ] = f"{gateway_timeout}" + annotations["nginx.ingress.kubernetes.io/proxy-connect-timeout"] = ( + f"{gateway_timeout}" + ) + annotations["nginx.ingress.kubernetes.io/proxy-read-timeout"] = ( + f"{gateway_timeout}" + ) + annotations["nginx.ingress.kubernetes.io/proxy-send-timeout"] = ( + f"{gateway_timeout}" + ) trigger = nuclio.HttpTrigger( workers=workers, diff --git a/mlrun/runtimes/mpijob/abstract.py b/mlrun/runtimes/mpijob/abstract.py index f053eee5254..4571c2faceb 100644 --- a/mlrun/runtimes/mpijob/abstract.py +++ b/mlrun/runtimes/mpijob/abstract.py @@ -196,13 +196,13 @@ def with_autotune( if steps_per_sample is not None: horovod_autotune_settings["autotune-steps-per-sample"] = steps_per_sample if bayes_opt_max_samples is not None: - horovod_autotune_settings[ - "autotune-bayes-opt-max-samples" - ] = bayes_opt_max_samples + horovod_autotune_settings["autotune-bayes-opt-max-samples"] = ( + bayes_opt_max_samples + ) if gaussian_process_noise is not None: - horovod_autotune_settings[ - "autotune-gaussian-process-noise" - ] = gaussian_process_noise + horovod_autotune_settings["autotune-gaussian-process-noise"] = ( + gaussian_process_noise + ) self.set_envs(horovod_autotune_settings) diff --git a/mlrun/runtimes/pod.py b/mlrun/runtimes/pod.py index 77dfee1f3cd..d162ebbef4c 100644 --- a/mlrun/runtimes/pod.py +++ b/mlrun/runtimes/pod.py @@ -430,9 +430,9 @@ def enrich_resources_with_default_pod_resources( ) is None ): - resources[resource_requirement][ - resource_type - ] = default_resources[resource_requirement][resource_type] + resources[resource_requirement][resource_type] = ( + default_resources[resource_requirement][resource_type] + ) # This enables the user to define that no defaults would be applied on the resources elif resources == {}: return resources diff --git a/mlrun/runtimes/serving.py b/mlrun/runtimes/serving.py index 10da62b5d78..8d067a14bba 100644 --- a/mlrun/runtimes/serving.py +++ b/mlrun/runtimes/serving.py @@ -523,9 +523,9 @@ def _deploy_function_refs(self, builder_env: dict = None): function_object.metadata.tag = self.metadata.tag function_object.metadata.labels = function_object.metadata.labels or {} - function_object.metadata.labels[ - "mlrun/parent-function" - ] = self.metadata.name + function_object.metadata.labels["mlrun/parent-function"] = ( + self.metadata.name + ) function_object._is_child_function = True if not function_object.spec.graph: # copy the current graph only if the child doesnt have a graph of his own diff --git a/mlrun/runtimes/sparkjob/spark3job.py b/mlrun/runtimes/sparkjob/spark3job.py index 6d72bc649a5..4a9051e1922 100644 --- a/mlrun/runtimes/sparkjob/spark3job.py +++ b/mlrun/runtimes/sparkjob/spark3job.py @@ -345,9 +345,9 @@ def enrich_resources_with_default_pod_resources( ) is None ): - resources[resource_requirement][ - resource_type - ] = default_resources[resource_requirement][resource_type] + resources[resource_requirement][resource_type] = ( + default_resources[resource_requirement][resource_type] + ) else: resources = default_resources diff --git a/mlrun/utils/async_http.py b/mlrun/utils/async_http.py index 346fcc9423e..b43cd0e4a4e 100644 --- a/mlrun/utils/async_http.py +++ b/mlrun/utils/async_http.py @@ -139,9 +139,9 @@ async def _do_request(self) -> aiohttp.ClientResponse: # enrich user agent # will help traceability and debugging - headers[ - aiohttp.hdrs.USER_AGENT - ] = f"{aiohttp.http.SERVER_SOFTWARE} mlrun/{config.version}" + headers[aiohttp.hdrs.USER_AGENT] = ( + f"{aiohttp.http.SERVER_SOFTWARE} mlrun/{config.version}" + ) response: typing.Optional[ aiohttp.ClientResponse diff --git a/mlrun/utils/http.py b/mlrun/utils/http.py index 86a18b3f4be..7959ccca326 100644 --- a/mlrun/utils/http.py +++ b/mlrun/utils/http.py @@ -110,9 +110,9 @@ def __init__( def request(self, method, url, **kwargs): retry_count = 0 kwargs.setdefault("headers", {}) - kwargs["headers"][ - "User-Agent" - ] = f"{requests.utils.default_user_agent()} mlrun/{config.version}" + kwargs["headers"]["User-Agent"] = ( + f"{requests.utils.default_user_agent()} mlrun/{config.version}" + ) while True: try: response = super().request(method, url, **kwargs) diff --git a/mlrun/utils/notifications/notification_pusher.py b/mlrun/utils/notifications/notification_pusher.py index db3c329b30e..afc8430d2db 100644 --- a/mlrun/utils/notifications/notification_pusher.py +++ b/mlrun/utils/notifications/notification_pusher.py @@ -307,9 +307,9 @@ def _push_notification_sync( traceback=traceback.format_exc(), ) update_notification_status_kwargs["reason"] = f"Exception error: {str(exc)}" - update_notification_status_kwargs[ - "status" - ] = mlrun.common.schemas.NotificationStatus.ERROR + update_notification_status_kwargs["status"] = ( + mlrun.common.schemas.NotificationStatus.ERROR + ) raise exc finally: self._update_notification_status( @@ -356,9 +356,9 @@ async def _push_notification_async( traceback=traceback.format_exc(), ) update_notification_status_kwargs["reason"] = f"Exception error: {str(exc)}" - update_notification_status_kwargs[ - "status" - ] = mlrun.common.schemas.NotificationStatus.ERROR + update_notification_status_kwargs["status"] = ( + mlrun.common.schemas.NotificationStatus.ERROR + ) raise exc finally: await mlrun.utils.helpers.run_in_threadpool( diff --git a/pyproject.toml b/pyproject.toml index ff5b3bf7fc6..fb0bbfcc850 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,23 +1,20 @@ [tool.ruff] +extend-include = ["*.ipynb"] +target-version = "py39" + +[tool.ruff.lint] select = [ "F", # pyflakes "W", # pycodestyle "E", # pycodestyle "I", # isort ] -extend-exclude = [ - "server/api/proto", - "playground", -] -extend-include = ["*.ipynb"] - -[tool.ruff.lint] exclude = ["*.ipynb"] [tool.ruff.lint.pycodestyle] max-line-length = 120 -[tool.ruff.per-file-ignores] +[tool.ruff.lint.per-file-ignores] "__init__.py" = ["F401"] [tool.pytest.ini_options] diff --git a/server/api/api/endpoints/runtime_resources.py b/server/api/api/endpoints/runtime_resources.py index a6bede026e2..259457e2192 100644 --- a/server/api/api/endpoints/runtime_resources.py +++ b/server/api/api/endpoints/runtime_resources.py @@ -215,7 +215,9 @@ async def _get_runtime_resources_allowed_projects( mlrun.common.schemas.AuthorizationAction.read, auth_info, ) - grouped_by_project_runtime_resources_output: mlrun.common.schemas.GroupedByProjectRuntimeResourcesOutput + grouped_by_project_runtime_resources_output: ( + mlrun.common.schemas.GroupedByProjectRuntimeResourcesOutput + ) grouped_by_project_runtime_resources_output = await run_in_threadpool( server.api.crud.RuntimeResources().list_runtime_resources, project, diff --git a/server/api/api/endpoints/workflows.py b/server/api/api/endpoints/workflows.py index ea02e191dfc..5c07c058e40 100644 --- a/server/api/api/endpoints/workflows.py +++ b/server/api/api/endpoints/workflows.py @@ -194,9 +194,9 @@ async def submit_workflow( client_version ) if client_python_version is not None: - workflow_runner.metadata.labels[ - "mlrun/client_python_version" - ] = sanitize_label_value(client_python_version) + workflow_runner.metadata.labels["mlrun/client_python_version"] = ( + sanitize_label_value(client_python_version) + ) try: if workflow_spec.schedule: await run_in_threadpool( diff --git a/server/api/api/utils.py b/server/api/api/utils.py index f7de17f55df..bbe937bf259 100644 --- a/server/api/api/utils.py +++ b/server/api/api/utils.py @@ -514,10 +514,10 @@ def _mask_v3io_volume_credentials( if isinstance( volume["flexVolume"], kubernetes.client.V1FlexVolumeSource ): - volume[ - "flexVolume" - ] = k8s_api_client.sanitize_for_serialization( - volume["flexVolume"] + volume["flexVolume"] = ( + k8s_api_client.sanitize_for_serialization( + volume["flexVolume"] + ) ) else: raise mlrun.errors.MLRunInvalidArgumentError( diff --git a/server/api/apiuvicorn.py b/server/api/apiuvicorn.py index 1c14fc5136d..e5a3ef51c8d 100644 --- a/server/api/apiuvicorn.py +++ b/server/api/apiuvicorn.py @@ -28,9 +28,9 @@ class UvicornMLRunLoggerMixin( def _get_uvicorn_log_config(): base_log_config = uvicorn.config.LOGGING_CONFIG - base_log_config["formatters"]["default"][ - "()" - ] = "server.api.apiuvicorn.UvicornMLRunLoggerMixin" + base_log_config["formatters"]["default"]["()"] = ( + "server.api.apiuvicorn.UvicornMLRunLoggerMixin" + ) return base_log_config diff --git a/server/api/crud/model_monitoring/deployment.py b/server/api/crud/model_monitoring/deployment.py index 259edf3d72e..b91428ed7a1 100644 --- a/server/api/crud/model_monitoring/deployment.py +++ b/server/api/crud/model_monitoring/deployment.py @@ -554,9 +554,9 @@ def _submit_schedule_batch_job( tracking_offset=tracking_offset, ) - task.spec.parameters[ - mm_constants.EventFieldType.BATCH_INTERVALS_DICT - ] = batch_dict + task.spec.parameters[mm_constants.EventFieldType.BATCH_INTERVALS_DICT] = ( + batch_dict + ) data = { "task": task.to_dict(), diff --git a/server/api/db/sqldb/db.py b/server/api/db/sqldb/db.py index 37de1b00297..2b2e1c0b369 100644 --- a/server/api/db/sqldb/db.py +++ b/server/api/db/sqldb/db.py @@ -400,9 +400,9 @@ def list_runs( notification ) run_struct["spec"]["notifications"].append(notification_spec) - run_struct["status"]["notifications"][ - notification.name - ] = notification_status + run_struct["status"]["notifications"][notification.name] = ( + notification_status + ) runs.append(run_struct) return runs diff --git a/server/api/main.py b/server/api/main.py index c5ffa26dec8..95b15c6e826 100644 --- a/server/api/main.py +++ b/server/api/main.py @@ -712,9 +712,9 @@ async def abort_run(stale_run): # Using semaphore to limit the chunk we get from the thread pool for run aborting async with semaphore: # mark abort as internal, it doesn't have a background task - stale_run[ - "new_background_task_id" - ] = server.api.constants.internal_abort_task_id + stale_run["new_background_task_id"] = ( + server.api.constants.internal_abort_task_id + ) await fastapi.concurrency.run_in_threadpool( server.api.db.session.run_function_with_new_db_session, server.api.crud.Runs().abort_run, diff --git a/server/api/migrations_mysql/versions/026c947c4487_altering_table_datastore_profiles_2.py b/server/api/migrations_mysql/versions/026c947c4487_altering_table_datastore_profiles_2.py index 2af81e5bcac..2df42df2a7f 100644 --- a/server/api/migrations_mysql/versions/026c947c4487_altering_table_datastore_profiles_2.py +++ b/server/api/migrations_mysql/versions/026c947c4487_altering_table_datastore_profiles_2.py @@ -20,6 +20,7 @@ Create Date: 2023-08-10 14:15:30.523729 """ + import sqlalchemy as sa from alembic import op diff --git a/server/api/migrations_mysql/versions/28383af526f3_market_place_to_hub.py b/server/api/migrations_mysql/versions/28383af526f3_market_place_to_hub.py index 1a3d9f3265c..dde2ceead38 100644 --- a/server/api/migrations_mysql/versions/28383af526f3_market_place_to_hub.py +++ b/server/api/migrations_mysql/versions/28383af526f3_market_place_to_hub.py @@ -19,6 +19,7 @@ Create Date: 2023-04-24 11:06:36.177314 """ + from alembic import op # revision identifiers, used by Alembic. diff --git a/server/api/migrations_mysql/versions/32bae1b0e29c_increase_timestamp_fields_precision.py b/server/api/migrations_mysql/versions/32bae1b0e29c_increase_timestamp_fields_precision.py index 443ef380b2a..c089ef20d42 100644 --- a/server/api/migrations_mysql/versions/32bae1b0e29c_increase_timestamp_fields_precision.py +++ b/server/api/migrations_mysql/versions/32bae1b0e29c_increase_timestamp_fields_precision.py @@ -19,6 +19,7 @@ Create Date: 2022-01-16 19:32:08.676120 """ + import sqlalchemy.dialects.mysql from alembic import op diff --git a/server/api/migrations_mysql/versions/4903aef6a91d_tag_foreign_key_and_cascades.py b/server/api/migrations_mysql/versions/4903aef6a91d_tag_foreign_key_and_cascades.py index 219ba1ec4b9..24244fe5f76 100644 --- a/server/api/migrations_mysql/versions/4903aef6a91d_tag_foreign_key_and_cascades.py +++ b/server/api/migrations_mysql/versions/4903aef6a91d_tag_foreign_key_and_cascades.py @@ -19,6 +19,7 @@ Create Date: 2021-11-24 17:38:11.753522 """ + from alembic import op # revision identifiers, used by Alembic. diff --git a/server/api/migrations_mysql/versions/59061f6e2a87_add_index_migration.py b/server/api/migrations_mysql/versions/59061f6e2a87_add_index_migration.py index f6e44242515..fd6d5e29910 100644 --- a/server/api/migrations_mysql/versions/59061f6e2a87_add_index_migration.py +++ b/server/api/migrations_mysql/versions/59061f6e2a87_add_index_migration.py @@ -19,6 +19,7 @@ Create Date: 2023-11-05 12:43:53.787957 """ + from alembic import op # revision identifiers, used by Alembic. diff --git a/server/api/migrations_mysql/versions/5f1351c88a19_adding_background_tasks_table.py b/server/api/migrations_mysql/versions/5f1351c88a19_adding_background_tasks_table.py index 3d4569fb05e..7d74d29ba97 100644 --- a/server/api/migrations_mysql/versions/5f1351c88a19_adding_background_tasks_table.py +++ b/server/api/migrations_mysql/versions/5f1351c88a19_adding_background_tasks_table.py @@ -19,6 +19,7 @@ Create Date: 2022-06-12 19:59:29.618366 """ + import sqlalchemy as sa from alembic import op from sqlalchemy.dialects import mysql diff --git a/server/api/migrations_mysql/versions/88e656800d6a_add_requested_logs_column_and_index_to_.py b/server/api/migrations_mysql/versions/88e656800d6a_add_requested_logs_column_and_index_to_.py index d3ae248d813..efbb8036650 100644 --- a/server/api/migrations_mysql/versions/88e656800d6a_add_requested_logs_column_and_index_to_.py +++ b/server/api/migrations_mysql/versions/88e656800d6a_add_requested_logs_column_and_index_to_.py @@ -19,6 +19,7 @@ Create Date: 2023-01-11 11:21:46.882374 """ + import sqlalchemy as sa from alembic import op diff --git a/server/api/migrations_mysql/versions/9d16de5f03a7_adding_data_versions_table.py b/server/api/migrations_mysql/versions/9d16de5f03a7_adding_data_versions_table.py index fa6fd3d81b1..69cbf6dbe37 100644 --- a/server/api/migrations_mysql/versions/9d16de5f03a7_adding_data_versions_table.py +++ b/server/api/migrations_mysql/versions/9d16de5f03a7_adding_data_versions_table.py @@ -19,6 +19,7 @@ Create Date: 2021-10-04 16:08:05.267113 """ + import sqlalchemy as sa from alembic import op diff --git a/server/api/migrations_mysql/versions/b1d1e7ab5dec_adding_table_datastore_profiles.py b/server/api/migrations_mysql/versions/b1d1e7ab5dec_adding_table_datastore_profiles.py index 74cecee781b..9b9c8107ae4 100644 --- a/server/api/migrations_mysql/versions/b1d1e7ab5dec_adding_table_datastore_profiles.py +++ b/server/api/migrations_mysql/versions/b1d1e7ab5dec_adding_table_datastore_profiles.py @@ -20,6 +20,7 @@ Create Date: 2023-06-30 13:42:19.974990 """ + import sqlalchemy as sa from alembic import op diff --git a/server/api/migrations_mysql/versions/b268044fa2f7_adding_artifacts_v2_table.py b/server/api/migrations_mysql/versions/b268044fa2f7_adding_artifacts_v2_table.py index 4708a453c52..cdf47a25187 100644 --- a/server/api/migrations_mysql/versions/b268044fa2f7_adding_artifacts_v2_table.py +++ b/server/api/migrations_mysql/versions/b268044fa2f7_adding_artifacts_v2_table.py @@ -19,6 +19,7 @@ Create Date: 2023-11-22 20:04:18.402025 """ + import sqlalchemy as sa from alembic import op from sqlalchemy.dialects import mysql diff --git a/server/api/migrations_mysql/versions/b86f5b53f3d7_adding_name_and_updated_to_runs_table.py b/server/api/migrations_mysql/versions/b86f5b53f3d7_adding_name_and_updated_to_runs_table.py index c6f31ed0ed5..b2bd2e764b1 100644 --- a/server/api/migrations_mysql/versions/b86f5b53f3d7_adding_name_and_updated_to_runs_table.py +++ b/server/api/migrations_mysql/versions/b86f5b53f3d7_adding_name_and_updated_to_runs_table.py @@ -19,6 +19,7 @@ Create Date: 2022-01-08 19:28:45.141873 """ + import datetime import sqlalchemy as sa diff --git a/server/api/migrations_mysql/versions/b899cbf87203_background_task_error_message.py b/server/api/migrations_mysql/versions/b899cbf87203_background_task_error_message.py index af31e5ee324..070f3b0487d 100644 --- a/server/api/migrations_mysql/versions/b899cbf87203_background_task_error_message.py +++ b/server/api/migrations_mysql/versions/b899cbf87203_background_task_error_message.py @@ -19,6 +19,7 @@ Create Date: 2023-11-08 10:59:06.391117 """ + import sqlalchemy as sa from alembic import op diff --git a/server/api/migrations_mysql/versions/c0e342d73bd0_indexing_artifact_v2_key.py b/server/api/migrations_mysql/versions/c0e342d73bd0_indexing_artifact_v2_key.py index ebfdbcafa94..5a5536fe892 100644 --- a/server/api/migrations_mysql/versions/c0e342d73bd0_indexing_artifact_v2_key.py +++ b/server/api/migrations_mysql/versions/c0e342d73bd0_indexing_artifact_v2_key.py @@ -19,6 +19,7 @@ Create Date: 2024-02-07 14:46:55.639228 """ + from alembic import op # revision identifiers, used by Alembic. diff --git a/server/api/migrations_mysql/versions/c4af40b0bf61_init.py b/server/api/migrations_mysql/versions/c4af40b0bf61_init.py index c11ee7880f0..b6f3f563659 100644 --- a/server/api/migrations_mysql/versions/c4af40b0bf61_init.py +++ b/server/api/migrations_mysql/versions/c4af40b0bf61_init.py @@ -19,6 +19,7 @@ Create Date: 2021-09-30 10:55:51.956636 """ + import sqlalchemy as sa import sqlalchemy.dialects.mysql from alembic import op diff --git a/server/api/migrations_mysql/versions/c905d15bd91d_notifications.py b/server/api/migrations_mysql/versions/c905d15bd91d_notifications.py index b67b8f7e446..c4b044c8938 100644 --- a/server/api/migrations_mysql/versions/c905d15bd91d_notifications.py +++ b/server/api/migrations_mysql/versions/c905d15bd91d_notifications.py @@ -19,6 +19,7 @@ Create Date: 2022-09-20 10:44:41.727488 """ + import sqlalchemy as sa from alembic import op from sqlalchemy.dialects import mysql diff --git a/server/api/migrations_mysql/versions/ee041e8fdaa0_adding_next_run_time_column_to_schedule_.py b/server/api/migrations_mysql/versions/ee041e8fdaa0_adding_next_run_time_column_to_schedule_.py index 5adbe05d0b6..0b53256393d 100644 --- a/server/api/migrations_mysql/versions/ee041e8fdaa0_adding_next_run_time_column_to_schedule_.py +++ b/server/api/migrations_mysql/versions/ee041e8fdaa0_adding_next_run_time_column_to_schedule_.py @@ -19,6 +19,7 @@ Create Date: 2022-08-16 17:56:47.826661 """ + import sqlalchemy as sa from alembic import op from sqlalchemy.dialects import mysql diff --git a/server/api/migrations_mysql/versions/eefc169f7633_notifications_params_to_secret_params.py b/server/api/migrations_mysql/versions/eefc169f7633_notifications_params_to_secret_params.py index 84f6349b29e..6583b2b72fb 100644 --- a/server/api/migrations_mysql/versions/eefc169f7633_notifications_params_to_secret_params.py +++ b/server/api/migrations_mysql/versions/eefc169f7633_notifications_params_to_secret_params.py @@ -19,6 +19,7 @@ Create Date: 2023-08-29 10:30:57.901466 """ + import sqlalchemy as sa from alembic import op diff --git a/server/api/migrations_sqlite/versions/026c947c4487_altering_table_datastore_profiles_2.py b/server/api/migrations_sqlite/versions/026c947c4487_altering_table_datastore_profiles_2.py index 19658ecbc1f..68fb10ca6c0 100644 --- a/server/api/migrations_sqlite/versions/026c947c4487_altering_table_datastore_profiles_2.py +++ b/server/api/migrations_sqlite/versions/026c947c4487_altering_table_datastore_profiles_2.py @@ -20,6 +20,7 @@ Create Date: 2023-08-10 14:15:30.523729 """ + import sqlalchemy as sa from alembic import op diff --git a/server/api/migrations_sqlite/versions/0b224a1b4e0d_indexing_artifact_v2_key.py b/server/api/migrations_sqlite/versions/0b224a1b4e0d_indexing_artifact_v2_key.py index a2763d5da98..c5d0cdefb4b 100644 --- a/server/api/migrations_sqlite/versions/0b224a1b4e0d_indexing_artifact_v2_key.py +++ b/server/api/migrations_sqlite/versions/0b224a1b4e0d_indexing_artifact_v2_key.py @@ -19,6 +19,7 @@ Create Date: 2024-02-07 14:47:10.021608 """ + from alembic import op # revision identifiers, used by Alembic. diff --git a/server/api/migrations_sqlite/versions/114b2c80710f_notifications_params_to_secret_params.py b/server/api/migrations_sqlite/versions/114b2c80710f_notifications_params_to_secret_params.py index da853a01410..3ede6121496 100644 --- a/server/api/migrations_sqlite/versions/114b2c80710f_notifications_params_to_secret_params.py +++ b/server/api/migrations_sqlite/versions/114b2c80710f_notifications_params_to_secret_params.py @@ -19,6 +19,7 @@ Create Date: 2023-08-29 10:52:00.586301 """ + import sqlalchemy as sa from alembic import op diff --git a/server/api/migrations_sqlite/versions/11f8dd2dc9fe_init.py b/server/api/migrations_sqlite/versions/11f8dd2dc9fe_init.py index 0ef27d3a54c..4ab3d711d9a 100644 --- a/server/api/migrations_sqlite/versions/11f8dd2dc9fe_init.py +++ b/server/api/migrations_sqlite/versions/11f8dd2dc9fe_init.py @@ -19,6 +19,7 @@ Create Date: 2020-10-06 15:50:35.588592 """ + import sqlalchemy as sa from alembic import op diff --git a/server/api/migrations_sqlite/versions/1c954f8cb32d_schedule_last_run_uri.py b/server/api/migrations_sqlite/versions/1c954f8cb32d_schedule_last_run_uri.py index e767536d8eb..d1de1f1a74a 100644 --- a/server/api/migrations_sqlite/versions/1c954f8cb32d_schedule_last_run_uri.py +++ b/server/api/migrations_sqlite/versions/1c954f8cb32d_schedule_last_run_uri.py @@ -19,6 +19,7 @@ Create Date: 2020-11-11 09:39:09.551025 """ + import sqlalchemy as sa from alembic import op diff --git a/server/api/migrations_sqlite/versions/2b6d23c715aa_adding_feature_sets.py b/server/api/migrations_sqlite/versions/2b6d23c715aa_adding_feature_sets.py index 2c8d8aa7413..48f5243af63 100644 --- a/server/api/migrations_sqlite/versions/2b6d23c715aa_adding_feature_sets.py +++ b/server/api/migrations_sqlite/versions/2b6d23c715aa_adding_feature_sets.py @@ -19,6 +19,7 @@ Create Date: 2020-11-05 01:42:53.395810 """ + import sqlalchemy as sa from alembic import op diff --git a/server/api/migrations_sqlite/versions/332fb7e1274d_background_task_error_message.py b/server/api/migrations_sqlite/versions/332fb7e1274d_background_task_error_message.py index 039e54f7c08..c8a1266c263 100644 --- a/server/api/migrations_sqlite/versions/332fb7e1274d_background_task_error_message.py +++ b/server/api/migrations_sqlite/versions/332fb7e1274d_background_task_error_message.py @@ -19,6 +19,7 @@ Create Date: 2023-11-08 10:56:54.339846 """ + import sqlalchemy as sa from alembic import op diff --git a/server/api/migrations_sqlite/versions/4acd9430b093_market_place_to_hub.py b/server/api/migrations_sqlite/versions/4acd9430b093_market_place_to_hub.py index b901709fd38..773faa35541 100644 --- a/server/api/migrations_sqlite/versions/4acd9430b093_market_place_to_hub.py +++ b/server/api/migrations_sqlite/versions/4acd9430b093_market_place_to_hub.py @@ -19,6 +19,7 @@ Create Date: 2023-04-26 22:41:59.726305 """ + import sqlalchemy as sa from alembic import op diff --git a/server/api/migrations_sqlite/versions/6401142f2d7c_adding_next_run_time_column_to_schedule_.py b/server/api/migrations_sqlite/versions/6401142f2d7c_adding_next_run_time_column_to_schedule_.py index 979aeb6e8cd..368b69dcb06 100644 --- a/server/api/migrations_sqlite/versions/6401142f2d7c_adding_next_run_time_column_to_schedule_.py +++ b/server/api/migrations_sqlite/versions/6401142f2d7c_adding_next_run_time_column_to_schedule_.py @@ -19,6 +19,7 @@ Create Date: 2022-08-16 17:51:41.624145 """ + import sqlalchemy as sa from alembic import op diff --git a/server/api/migrations_sqlite/versions/64d90a1a69bc_adding_background_tasks_table.py b/server/api/migrations_sqlite/versions/64d90a1a69bc_adding_background_tasks_table.py index 6143594058e..fc535f1fd27 100644 --- a/server/api/migrations_sqlite/versions/64d90a1a69bc_adding_background_tasks_table.py +++ b/server/api/migrations_sqlite/versions/64d90a1a69bc_adding_background_tasks_table.py @@ -19,6 +19,7 @@ Create Date: 2022-06-12 20:00:38.183341 """ + import sqlalchemy as sa from alembic import op diff --git a/server/api/migrations_sqlite/versions/6e0c9531edc7_adding_table_datastore_profiles.py b/server/api/migrations_sqlite/versions/6e0c9531edc7_adding_table_datastore_profiles.py index 928ceed65de..19bb018a3d0 100644 --- a/server/api/migrations_sqlite/versions/6e0c9531edc7_adding_table_datastore_profiles.py +++ b/server/api/migrations_sqlite/versions/6e0c9531edc7_adding_table_datastore_profiles.py @@ -20,6 +20,7 @@ Create Date: 2023-06-30 13:44:34.141769 """ + import sqlalchemy as sa from alembic import op diff --git a/server/api/migrations_sqlite/versions/803438ecd005_add_requested_logs_column_to_runs.py b/server/api/migrations_sqlite/versions/803438ecd005_add_requested_logs_column_to_runs.py index 895d3deceb8..ffaf35bed5d 100644 --- a/server/api/migrations_sqlite/versions/803438ecd005_add_requested_logs_column_to_runs.py +++ b/server/api/migrations_sqlite/versions/803438ecd005_add_requested_logs_column_to_runs.py @@ -19,6 +19,7 @@ Create Date: 2023-01-11 10:31:18.505231 """ + import sqlalchemy as sa from alembic import op diff --git a/server/api/migrations_sqlite/versions/863114f0c659_refactoring_feature_set.py b/server/api/migrations_sqlite/versions/863114f0c659_refactoring_feature_set.py index 4449032d0aa..9aaa0d864f6 100644 --- a/server/api/migrations_sqlite/versions/863114f0c659_refactoring_feature_set.py +++ b/server/api/migrations_sqlite/versions/863114f0c659_refactoring_feature_set.py @@ -19,6 +19,7 @@ Create Date: 2020-11-11 11:22:36.653049 """ + import sqlalchemy as sa from alembic import op diff --git a/server/api/migrations_sqlite/versions/959ae00528ad_notifications.py b/server/api/migrations_sqlite/versions/959ae00528ad_notifications.py index 98f399c7974..0593b2d7f8a 100644 --- a/server/api/migrations_sqlite/versions/959ae00528ad_notifications.py +++ b/server/api/migrations_sqlite/versions/959ae00528ad_notifications.py @@ -19,6 +19,7 @@ Create Date: 2022-09-20 10:40:41.354209 """ + import sqlalchemy as sa from alembic import op diff --git a/server/api/migrations_sqlite/versions/accf9fc83d38_adding_data_versions_table.py b/server/api/migrations_sqlite/versions/accf9fc83d38_adding_data_versions_table.py index fcb7a78e28c..a420a1c9104 100644 --- a/server/api/migrations_sqlite/versions/accf9fc83d38_adding_data_versions_table.py +++ b/server/api/migrations_sqlite/versions/accf9fc83d38_adding_data_versions_table.py @@ -19,6 +19,7 @@ Create Date: 2021-10-04 16:05:42.095290 """ + import sqlalchemy as sa from alembic import op diff --git a/server/api/migrations_sqlite/versions/b68e8e897a28_schedule_labels.py b/server/api/migrations_sqlite/versions/b68e8e897a28_schedule_labels.py index 71066ee5772..537df628d7a 100644 --- a/server/api/migrations_sqlite/versions/b68e8e897a28_schedule_labels.py +++ b/server/api/migrations_sqlite/versions/b68e8e897a28_schedule_labels.py @@ -19,6 +19,7 @@ Create Date: 2020-10-07 11:30:41.810844 """ + import sqlalchemy as sa from alembic import op diff --git a/server/api/migrations_sqlite/versions/bcd0c1f9720c_adding_project_labels.py b/server/api/migrations_sqlite/versions/bcd0c1f9720c_adding_project_labels.py index 27f82c6e436..b87759861c8 100644 --- a/server/api/migrations_sqlite/versions/bcd0c1f9720c_adding_project_labels.py +++ b/server/api/migrations_sqlite/versions/bcd0c1f9720c_adding_project_labels.py @@ -19,6 +19,7 @@ Create Date: 2020-12-20 03:42:02.763802 """ + import sqlalchemy as sa from alembic import op diff --git a/server/api/migrations_sqlite/versions/bf91ff18513b_add_index_migration.py b/server/api/migrations_sqlite/versions/bf91ff18513b_add_index_migration.py index 63596356b16..3a1e46e7048 100644 --- a/server/api/migrations_sqlite/versions/bf91ff18513b_add_index_migration.py +++ b/server/api/migrations_sqlite/versions/bf91ff18513b_add_index_migration.py @@ -19,6 +19,7 @@ Create Date: 2023-11-05 12:43:39.286669 """ + from alembic import op # revision identifiers, used by Alembic. diff --git a/server/api/migrations_sqlite/versions/cf21882f938e_schedule_id.py b/server/api/migrations_sqlite/versions/cf21882f938e_schedule_id.py index 31ccadc3b05..818116ff7f3 100644 --- a/server/api/migrations_sqlite/versions/cf21882f938e_schedule_id.py +++ b/server/api/migrations_sqlite/versions/cf21882f938e_schedule_id.py @@ -19,6 +19,7 @@ Create Date: 2020-10-07 11:21:49.223077 """ + import sqlalchemy as sa from alembic import op diff --git a/server/api/migrations_sqlite/versions/d781f58f607f_tag_object_name_string.py b/server/api/migrations_sqlite/versions/d781f58f607f_tag_object_name_string.py index bca2ed5b661..ed7c53db480 100644 --- a/server/api/migrations_sqlite/versions/d781f58f607f_tag_object_name_string.py +++ b/server/api/migrations_sqlite/versions/d781f58f607f_tag_object_name_string.py @@ -19,6 +19,7 @@ Create Date: 2021-07-29 16:06:45.555323 """ + import sqlalchemy as sa from alembic import op diff --git a/server/api/migrations_sqlite/versions/deac06871ace_adding_marketplace_sources_table.py b/server/api/migrations_sqlite/versions/deac06871ace_adding_marketplace_sources_table.py index 768c87a902b..e06e844669a 100644 --- a/server/api/migrations_sqlite/versions/deac06871ace_adding_marketplace_sources_table.py +++ b/server/api/migrations_sqlite/versions/deac06871ace_adding_marketplace_sources_table.py @@ -19,6 +19,7 @@ Create Date: 2021-06-30 15:56:09.543139 """ + import sqlalchemy as sa from alembic import op diff --git a/server/api/migrations_sqlite/versions/e1dd5983c06b_schedule_concurrency_limit.py b/server/api/migrations_sqlite/versions/e1dd5983c06b_schedule_concurrency_limit.py index 8f03f52218e..5d137f5f32a 100644 --- a/server/api/migrations_sqlite/versions/e1dd5983c06b_schedule_concurrency_limit.py +++ b/server/api/migrations_sqlite/versions/e1dd5983c06b_schedule_concurrency_limit.py @@ -19,6 +19,7 @@ Create Date: 2021-03-15 13:36:18.703619 """ + import sqlalchemy as sa from alembic import op diff --git a/server/api/migrations_sqlite/versions/e5594ed3ab53_adding_name_and_updated_to_runs_table.py b/server/api/migrations_sqlite/versions/e5594ed3ab53_adding_name_and_updated_to_runs_table.py index ee89a85e7f7..8269536e25d 100644 --- a/server/api/migrations_sqlite/versions/e5594ed3ab53_adding_name_and_updated_to_runs_table.py +++ b/server/api/migrations_sqlite/versions/e5594ed3ab53_adding_name_and_updated_to_runs_table.py @@ -19,6 +19,7 @@ Create Date: 2022-01-08 12:33:59.070265 """ + import datetime import sqlalchemy as sa diff --git a/server/api/migrations_sqlite/versions/f4249b4ba6fa_adding_feature_vectors.py b/server/api/migrations_sqlite/versions/f4249b4ba6fa_adding_feature_vectors.py index 1ac20d67ce9..d4e5383ea5c 100644 --- a/server/api/migrations_sqlite/versions/f4249b4ba6fa_adding_feature_vectors.py +++ b/server/api/migrations_sqlite/versions/f4249b4ba6fa_adding_feature_vectors.py @@ -19,6 +19,7 @@ Create Date: 2020-11-24 14:43:08.789873 """ + import sqlalchemy as sa from alembic import op diff --git a/server/api/migrations_sqlite/versions/f7b5a1a03629_adding_feature_labels.py b/server/api/migrations_sqlite/versions/f7b5a1a03629_adding_feature_labels.py index 6d5b2c0799d..1a9014b4c99 100644 --- a/server/api/migrations_sqlite/versions/f7b5a1a03629_adding_feature_labels.py +++ b/server/api/migrations_sqlite/versions/f7b5a1a03629_adding_feature_labels.py @@ -19,6 +19,7 @@ Create Date: 2020-11-09 11:19:51.472174 """ + import sqlalchemy as sa from alembic import op diff --git a/server/api/migrations_sqlite/versions/fa3009d9787f_adding_artifacts_v2_table.py b/server/api/migrations_sqlite/versions/fa3009d9787f_adding_artifacts_v2_table.py index 8701a4f6346..db32a7da683 100644 --- a/server/api/migrations_sqlite/versions/fa3009d9787f_adding_artifacts_v2_table.py +++ b/server/api/migrations_sqlite/versions/fa3009d9787f_adding_artifacts_v2_table.py @@ -19,6 +19,7 @@ Create Date: 2023-11-22 20:01:50.197379 """ + import sqlalchemy as sa from alembic import op diff --git a/server/api/runtime_handlers/__init__.py b/server/api/runtime_handlers/__init__.py index 0a34c8dc3c5..c730dfcc843 100644 --- a/server/api/runtime_handlers/__init__.py +++ b/server/api/runtime_handlers/__init__.py @@ -38,9 +38,9 @@ def get_runtime_handler(kind: str) -> BaseRuntimeHandler: if not runtime_handler_instances_cache.setdefault(RuntimeKinds.mpijob, {}).get( mpijob_crd_version ): - runtime_handler_instances_cache[RuntimeKinds.mpijob][ - mpijob_crd_version - ] = runtime_handler_class() + runtime_handler_instances_cache[RuntimeKinds.mpijob][mpijob_crd_version] = ( + runtime_handler_class() + ) return runtime_handler_instances_cache[RuntimeKinds.mpijob][mpijob_crd_version] kind_runtime_handler_map = { diff --git a/server/api/runtime_handlers/base.py b/server/api/runtime_handlers/base.py index b5c8374941f..1e64010c894 100644 --- a/server/api/runtime_handlers/base.py +++ b/server/api/runtime_handlers/base.py @@ -594,9 +594,9 @@ def _ensure_run_not_stuck_on_non_terminal_state( "Updating run state", run_uid=run_uid, run_state=RunStates.error ) run.setdefault("status", {})["state"] = RunStates.error - run.setdefault("status", {})[ - "reason" - ] = "A runtime resource related to this run could not be found" + run.setdefault("status", {})["reason"] = ( + "A runtime resource related to this run could not be found" + ) run.setdefault("status", {})["last_update"] = now.isoformat() db.store_run(db_session, run, run_uid, project) @@ -1513,10 +1513,10 @@ def _add_resource_to_grouped_by_field_resources_response( if first_field_value not in resources: resources[first_field_value] = {} if second_field_value not in resources[first_field_value]: - resources[first_field_value][ - second_field_value - ] = mlrun.common.schemas.RuntimeResources( - pod_resources=[], crd_resources=[] + resources[first_field_value][second_field_value] = ( + mlrun.common.schemas.RuntimeResources( + pod_resources=[], crd_resources=[] + ) ) if not getattr( resources[first_field_value][second_field_value], resource_field_name diff --git a/server/api/utils/builder.py b/server/api/utils/builder.py index aa978eb9f23..426f5004454 100644 --- a/server/api/utils/builder.py +++ b/server/api/utils/builder.py @@ -350,9 +350,9 @@ def configure_kaniko_ecr_init_container( aws_credentials_file_env_value = "/tmp/aws/credentials" # set the credentials file location in the init container - init_container_env[ - aws_credentials_file_env_key - ] = aws_credentials_file_env_value + init_container_env[aws_credentials_file_env_key] = ( + aws_credentials_file_env_value + ) # set the kaniko container AWS credentials location to the mount's path kpod.env.append( diff --git a/server/api/utils/clients/iguazio.py b/server/api/utils/clients/iguazio.py index ce78999224a..9e62332a5a5 100644 --- a/server/api/utils/clients/iguazio.py +++ b/server/api/utils/clients/iguazio.py @@ -742,20 +742,20 @@ def _transform_mlrun_project_to_iguazio_project( } } if project.metadata.created: - body["data"]["attributes"][ - "created_at" - ] = project.metadata.created.isoformat() + body["data"]["attributes"]["created_at"] = ( + project.metadata.created.isoformat() + ) if project.metadata.labels is not None: - body["data"]["attributes"][ - "labels" - ] = Client._transform_mlrun_labels_to_iguazio_labels( - project.metadata.labels + body["data"]["attributes"]["labels"] = ( + Client._transform_mlrun_labels_to_iguazio_labels( + project.metadata.labels + ) ) if project.metadata.annotations is not None: - body["data"]["attributes"][ - "annotations" - ] = Client._transform_mlrun_labels_to_iguazio_labels( - project.metadata.annotations + body["data"]["attributes"]["annotations"] = ( + Client._transform_mlrun_labels_to_iguazio_labels( + project.metadata.annotations + ) ) if project.spec.owner: body["data"]["attributes"]["owner_username"] = project.spec.owner @@ -802,9 +802,9 @@ def _transform_iguazio_project_to_mlrun_project( iguazio_project["attributes"].get("mlrun_project", "{}") ) # name is mandatory in the mlrun schema, without adding it the schema initialization will fail - mlrun_project_without_common_fields.setdefault("metadata", {})[ - "name" - ] = iguazio_project["attributes"]["name"] + mlrun_project_without_common_fields.setdefault("metadata", {})["name"] = ( + iguazio_project["attributes"]["name"] + ) mlrun_project = mlrun.common.schemas.Project( **mlrun_project_without_common_fields ) diff --git a/server/api/utils/projects/leader.py b/server/api/utils/projects/leader.py index 8097f046e35..148393a1071 100644 --- a/server/api/utils/projects/leader.py +++ b/server/api/utils/projects/leader.py @@ -214,9 +214,9 @@ def _sync_projects(self): followers_projects_map = collections.defaultdict(dict) for _follower_name, follower_projects in follower_projects_map.items(): for project in follower_projects.projects: - followers_projects_map[_follower_name][ - project.metadata.name - ] = project + followers_projects_map[_follower_name][project.metadata.name] = ( + project + ) # create map - leader project name -> leader project for easier searches leader_projects_map = {} diff --git a/tests/api/api/test_runtime_resources.py b/tests/api/api/test_runtime_resources.py index d5cb00907c6..a6645a2a396 100644 --- a/tests/api/api/test_runtime_resources.py +++ b/tests/api/api/test_runtime_resources.py @@ -694,9 +694,9 @@ def _filter_kind_from_grouped_by_project_runtime_resources_output( ) in grouped_by_project_runtime_resources_output.items(): for kind, runtime_resources in kind_runtime_resources_map.items(): if kind == filter_kind: - filtered_output.setdefault(project, {})[ - kind - ] = grouped_by_project_runtime_resources_output[project][kind] + filtered_output.setdefault(project, {})[kind] = ( + grouped_by_project_runtime_resources_output[project][kind] + ) return filtered_output diff --git a/tests/api/api/test_utils.py b/tests/api/api/test_utils.py index 273d4282f24..f37934a98c4 100644 --- a/tests/api/api/test_utils.py +++ b/tests/api/api/test_utils.py @@ -839,18 +839,18 @@ def test_mask_v3io_volume_credentials( v3io_volume["flexVolume"] = k8s_api_client.sanitize_for_serialization( v3io_volume["flexVolume"] ) - no_access_key_v3io_volume[ - "flexVolume" - ] = k8s_api_client.sanitize_for_serialization( - no_access_key_v3io_volume["flexVolume"] + no_access_key_v3io_volume["flexVolume"] = ( + k8s_api_client.sanitize_for_serialization( + no_access_key_v3io_volume["flexVolume"] + ) ) no_name_v3io_volume["flexVolume"] = k8s_api_client.sanitize_for_serialization( no_name_v3io_volume["flexVolume"] ) - no_matching_mount_v3io_volume[ - "flexVolume" - ] = k8s_api_client.sanitize_for_serialization( - no_matching_mount_v3io_volume["flexVolume"] + no_matching_mount_v3io_volume["flexVolume"] = ( + k8s_api_client.sanitize_for_serialization( + no_matching_mount_v3io_volume["flexVolume"] + ) ) v3io_volume_mount = k8s_api_client.sanitize_for_serialization(v3io_volume_mount) conflicting_v3io_volume_mount = k8s_api_client.sanitize_for_serialization( diff --git a/tests/api/crud/test_runs.py b/tests/api/crud/test_runs.py index 885fc052b7b..7717bd119e2 100644 --- a/tests/api/crud/test_runs.py +++ b/tests/api/crud/test_runs.py @@ -51,35 +51,41 @@ async def test_delete_runs_with_resources(self, db: sqlalchemy.orm.Session): assert run["metadata"]["name"] == "run-name" k8s_helper = server.api.utils.singletons.k8s.get_k8s_helper() - with unittest.mock.patch.object( - k8s_helper.v1api, "delete_namespaced_pod" - ) as delete_namespaced_pod_mock, unittest.mock.patch.object( - k8s_helper.v1api, - "list_namespaced_pod", - side_effect=[ - k8s_client.V1PodList( - items=[ - k8s_client.V1Pod( - metadata=k8s_client.V1ObjectMeta( - name="pod-name", - labels={ - "mlrun/class": "job", - "mlrun/project": project, - "mlrun/uid": "uid", - }, - ), - status=k8s_client.V1PodStatus(phase="Running"), - ) - ] - ), - # 2nd time for waiting for pod to be deleted - k8s_client.V1PodList(items=[]), - ], - ), unittest.mock.patch.object( - server.api.runtime_handlers.BaseRuntimeHandler, "_ensure_run_logs_collected" - ), unittest.mock.patch.object( - server.api.utils.clients.log_collector.LogCollectorClient, "delete_logs" - ) as delete_logs_mock: + with ( + unittest.mock.patch.object( + k8s_helper.v1api, "delete_namespaced_pod" + ) as delete_namespaced_pod_mock, + unittest.mock.patch.object( + k8s_helper.v1api, + "list_namespaced_pod", + side_effect=[ + k8s_client.V1PodList( + items=[ + k8s_client.V1Pod( + metadata=k8s_client.V1ObjectMeta( + name="pod-name", + labels={ + "mlrun/class": "job", + "mlrun/project": project, + "mlrun/uid": "uid", + }, + ), + status=k8s_client.V1PodStatus(phase="Running"), + ) + ] + ), + # 2nd time for waiting for pod to be deleted + k8s_client.V1PodList(items=[]), + ], + ), + unittest.mock.patch.object( + server.api.runtime_handlers.BaseRuntimeHandler, + "_ensure_run_logs_collected", + ), + unittest.mock.patch.object( + server.api.utils.clients.log_collector.LogCollectorClient, "delete_logs" + ) as delete_logs_mock, + ): await server.api.crud.Runs().delete_run(db, "uid", 0, project) delete_namespaced_pod_mock.assert_called_once() delete_logs_mock.assert_called_once() @@ -114,17 +120,23 @@ async def test_delete_runs(self, db: sqlalchemy.orm.Session): assert len(runs) == 20 k8s_helper = server.api.utils.singletons.k8s.get_k8s_helper() - with unittest.mock.patch.object( - k8s_helper.v1api, "delete_namespaced_pod" - ) as delete_namespaced_pod_mock, unittest.mock.patch.object( - k8s_helper.v1api, - "list_namespaced_pod", - return_value=k8s_client.V1PodList(items=[]), - ), unittest.mock.patch.object( - server.api.runtime_handlers.BaseRuntimeHandler, "_ensure_run_logs_collected" - ), unittest.mock.patch.object( - server.api.utils.clients.log_collector.LogCollectorClient, "delete_logs" - ) as delete_logs_mock: + with ( + unittest.mock.patch.object( + k8s_helper.v1api, "delete_namespaced_pod" + ) as delete_namespaced_pod_mock, + unittest.mock.patch.object( + k8s_helper.v1api, + "list_namespaced_pod", + return_value=k8s_client.V1PodList(items=[]), + ), + unittest.mock.patch.object( + server.api.runtime_handlers.BaseRuntimeHandler, + "_ensure_run_logs_collected", + ), + unittest.mock.patch.object( + server.api.utils.clients.log_collector.LogCollectorClient, "delete_logs" + ) as delete_logs_mock, + ): await server.api.crud.Runs().delete_runs(db, name=run_name, project=project) runs = server.api.crud.Runs().list_runs(db, run_name, project=project) assert len(runs) == 0 @@ -160,18 +172,21 @@ async def test_delete_runs_failure(self, db: sqlalchemy.orm.Session): assert len(runs) == 3 k8s_helper = server.api.utils.singletons.k8s.get_k8s_helper() - with unittest.mock.patch.object( - k8s_helper.v1api, "delete_namespaced_pod" - ), unittest.mock.patch.object( - k8s_helper.v1api, - "list_namespaced_pod", - side_effect=[ - k8s_client.V1PodList(items=[]), - Exception("Boom!"), - k8s_client.V1PodList(items=[]), - ], - ), unittest.mock.patch.object( - server.api.runtime_handlers.BaseRuntimeHandler, "_ensure_run_logs_collected" + with ( + unittest.mock.patch.object(k8s_helper.v1api, "delete_namespaced_pod"), + unittest.mock.patch.object( + k8s_helper.v1api, + "list_namespaced_pod", + side_effect=[ + k8s_client.V1PodList(items=[]), + Exception("Boom!"), + k8s_client.V1PodList(items=[]), + ], + ), + unittest.mock.patch.object( + server.api.runtime_handlers.BaseRuntimeHandler, + "_ensure_run_logs_collected", + ), ): with pytest.raises(mlrun.errors.MLRunBadRequestError) as exc: await server.api.crud.Runs().delete_runs( @@ -230,11 +245,14 @@ def test_run_abortion_failure(self, db: sqlalchemy.orm.Session): run_uid, project=project, ) - with unittest.mock.patch.object( - server.api.crud.RuntimeResources(), - "delete_runtime_resources", - side_effect=mlrun.errors.MLRunInternalServerError("BOOM"), - ), pytest.raises(mlrun.errors.MLRunInternalServerError) as exc: + with ( + unittest.mock.patch.object( + server.api.crud.RuntimeResources(), + "delete_runtime_resources", + side_effect=mlrun.errors.MLRunInternalServerError("BOOM"), + ), + pytest.raises(mlrun.errors.MLRunInternalServerError) as exc, + ): server.api.crud.Runs().abort_run(db, project, run_uid, 0) assert "BOOM" == str(exc.value) diff --git a/tests/api/db/test_sqldb.py b/tests/api/db/test_sqldb.py index 6a85507a07c..df30d9a3759 100644 --- a/tests/api/db/test_sqldb.py +++ b/tests/api/db/test_sqldb.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """SQLDB specific tests, common tests should be in test_dbs.py""" + import copy from contextlib import contextmanager from datetime import datetime, timedelta diff --git a/tests/api/runtimes/test_kubejob.py b/tests/api/runtimes/test_kubejob.py index 5fcf1e1f78d..5d1c4a0ae0d 100644 --- a/tests/api/runtimes/test_kubejob.py +++ b/tests/api/runtimes/test_kubejob.py @@ -1042,12 +1042,12 @@ def test_set_state_thresholds_success(self, db: Session, k8s_secrets_mock): self.execute_function(runtime) run = get_db().list_runs(db, project=self.project)[0] expected_state_thresholds = override_state_thresholds - expected_state_thresholds[ - "image_pull_backoff" - ] = mlconf.function.spec.state_thresholds.default.image_pull_backoff - expected_state_thresholds[ - "pending_scheduled" - ] = mlconf.function.spec.state_thresholds.default.pending_scheduled + expected_state_thresholds["image_pull_backoff"] = ( + mlconf.function.spec.state_thresholds.default.image_pull_backoff + ) + expected_state_thresholds["pending_scheduled"] = ( + mlconf.function.spec.state_thresholds.default.pending_scheduled + ) assert run["spec"]["state_thresholds"] == expected_state_thresholds patch_state_thresholds = { @@ -1061,9 +1061,9 @@ def test_set_state_thresholds_success(self, db: Session, k8s_secrets_mock): run = get_db().list_runs(db, project=self.project)[0] expected_state_thresholds = patch_state_thresholds expected_state_thresholds["executing"] = override_state_thresholds["executing"] - expected_state_thresholds[ - "pending_scheduled" - ] = mlconf.function.spec.state_thresholds.default.pending_scheduled + expected_state_thresholds["pending_scheduled"] = ( + mlconf.function.spec.state_thresholds.default.pending_scheduled + ) assert run["spec"]["state_thresholds"] == expected_state_thresholds @staticmethod diff --git a/tests/api/utils/clients/test_iguazio.py b/tests/api/utils/clients/test_iguazio.py index d9eb7cf1209..3e22043ddf2 100644 --- a/tests/api/utils/clients/test_iguazio.py +++ b/tests/api/utils/clients/test_iguazio.py @@ -1177,10 +1177,10 @@ def _build_project_response( }, } if with_mlrun_project: - body["attributes"][ - "mlrun_project" - ] = iguazio_client._transform_mlrun_project_to_iguazio_mlrun_project_attribute( - project + body["attributes"]["mlrun_project"] = ( + iguazio_client._transform_mlrun_project_to_iguazio_mlrun_project_attribute( + project + ) ) if project.spec.description: body["attributes"]["description"] = project.spec.description @@ -1189,16 +1189,16 @@ def _build_project_response( if owner_access_key: body["attributes"]["owner_access_key"] = owner_access_key if project.metadata.labels: - body["attributes"][ - "labels" - ] = iguazio_client._transform_mlrun_labels_to_iguazio_labels( - project.metadata.labels + body["attributes"]["labels"] = ( + iguazio_client._transform_mlrun_labels_to_iguazio_labels( + project.metadata.labels + ) ) if project.metadata.annotations: - body["attributes"][ - "annotations" - ] = iguazio_client._transform_mlrun_labels_to_iguazio_labels( - project.metadata.annotations + body["attributes"]["annotations"] = ( + iguazio_client._transform_mlrun_labels_to_iguazio_labels( + project.metadata.annotations + ) ) body["attributes"]["operational_status"] = ( operational_status.value diff --git a/tests/automation/release_notes/test_generate.py b/tests/automation/release_notes/test_generate.py index 274ccb64a8a..68ac5b9319b 100644 --- a/tests/automation/release_notes/test_generate.py +++ b/tests/automation/release_notes/test_generate.py @@ -192,13 +192,15 @@ def test_generate_release_notes(): ] automation.release_notes.generate.tempfile = unittest.mock.MagicMock() for case in cases: - with unittest.mock.patch( - "automation.release_notes.generate.ReleaseNotesGenerator._run_command" - ) as _run_command_mock, unittest.mock.patch( - "automation.release_notes.generate.ReleaseNotesGenerator._resolve_github_username" - ) as _resolve_github_user_mock, unittest.mock.patch( - "sys.stdout", new=io.StringIO() - ) as stdout_mock: + with ( + unittest.mock.patch( + "automation.release_notes.generate.ReleaseNotesGenerator._run_command" + ) as _run_command_mock, + unittest.mock.patch( + "automation.release_notes.generate.ReleaseNotesGenerator._resolve_github_username" + ) as _resolve_github_user_mock, + unittest.mock.patch("sys.stdout", new=io.StringIO()) as stdout_mock, + ): _run_command_mock.side_effect = case["_run_command"] _resolve_github_user_mock.side_effect = case["_resolve_github_username"] try: diff --git a/tests/integration/aws_s3/test_aws_s3.py b/tests/integration/aws_s3/test_aws_s3.py index 2cb935e5139..e38c520cde0 100644 --- a/tests/integration/aws_s3/test_aws_s3.py +++ b/tests/integration/aws_s3/test_aws_s3.py @@ -219,11 +219,10 @@ def test_directory(self, use_datastore_profile): # Create the DataFrames df1 = pd.DataFrame(data1) df2 = pd.DataFrame(data2) - with tempfile.NamedTemporaryFile( - suffix=".parquet", delete=True - ) as temp_file1, tempfile.NamedTemporaryFile( - suffix=".parquet", delete=True - ) as temp_file2: + with ( + tempfile.NamedTemporaryFile(suffix=".parquet", delete=True) as temp_file1, + tempfile.NamedTemporaryFile(suffix=".parquet", delete=True) as temp_file2, + ): # Save DataFrames as Parquet files df1.to_parquet(temp_file1.name, index=False) df2.to_parquet(temp_file2.name, index=False) @@ -255,11 +254,10 @@ def test_directory_csv(self, use_datastore_profile): # Create the DataFrames df1 = pd.DataFrame(data1) df2 = pd.DataFrame(data2) - with tempfile.NamedTemporaryFile( - suffix=".csv", delete=True - ) as temp_file1, tempfile.NamedTemporaryFile( - suffix=".csv", delete=True - ) as temp_file2: + with ( + tempfile.NamedTemporaryFile(suffix=".csv", delete=True) as temp_file1, + tempfile.NamedTemporaryFile(suffix=".csv", delete=True) as temp_file2, + ): # Save DataFrames as csv files df1.to_csv(temp_file1.name, index=False) df2.to_csv(temp_file2.name, index=False) diff --git a/tests/integration/azure_blob/test_azure_blob.py b/tests/integration/azure_blob/test_azure_blob.py index 2363f582f7e..905c31ed26a 100644 --- a/tests/integration/azure_blob/test_azure_blob.py +++ b/tests/integration/azure_blob/test_azure_blob.py @@ -276,11 +276,14 @@ def test_read_df_dir( # Create the DataFrames df1 = pd.DataFrame(data1) df2 = pd.DataFrame(data2) - with tempfile.NamedTemporaryFile( - suffix=f".{file_extension}", delete=True - ) as temp_file1, tempfile.NamedTemporaryFile( - suffix=f".{file_extension}", delete=True - ) as temp_file2: + with ( + tempfile.NamedTemporaryFile( + suffix=f".{file_extension}", delete=True + ) as temp_file1, + tempfile.NamedTemporaryFile( + suffix=f".{file_extension}", delete=True + ) as temp_file2, + ): first_file_path = temp_file1.name second_file_path = temp_file2.name writer(df1, temp_file1.name, index=False) diff --git a/tests/integration/google_cloud_storage/test_google_cloud_storage.py b/tests/integration/google_cloud_storage/test_google_cloud_storage.py index bbaaf1ebf88..26a33c7d84e 100644 --- a/tests/integration/google_cloud_storage/test_google_cloud_storage.py +++ b/tests/integration/google_cloud_storage/test_google_cloud_storage.py @@ -241,11 +241,14 @@ def test_directory(self, use_datastore_profile, file_format, write_method): # Create the DataFrames df1 = pd.DataFrame(data1) df2 = pd.DataFrame(data2) - with tempfile.NamedTemporaryFile( - suffix=f".{file_format}", delete=True - ) as temp_file1, tempfile.NamedTemporaryFile( - suffix=f".{file_format}", delete=True - ) as temp_file2: + with ( + tempfile.NamedTemporaryFile( + suffix=f".{file_format}", delete=True + ) as temp_file1, + tempfile.NamedTemporaryFile( + suffix=f".{file_format}", delete=True + ) as temp_file2, + ): # Save DataFrames as files write_method(df1, temp_file1.name, index=False) write_method(df2, temp_file2.name, index=False) diff --git a/tests/integration/sdk_api/run/test_main.py b/tests/integration/sdk_api/run/test_main.py index 298079db8de..40bcfd949a0 100644 --- a/tests/integration/sdk_api/run/test_main.py +++ b/tests/integration/sdk_api/run/test_main.py @@ -296,9 +296,9 @@ def test_main_run_nonpy_from_env(self): os.environ["MLRUN_EXEC_CODE"] = b64encode(nonpy_code.encode("utf-8")).decode( "utf-8" ) - os.environ[ - "MLRUN_EXEC_CONFIG" - ] = '{"spec":{},"metadata":{"uid":"123411", "name":"tst", "labels": {"kind": "job"}}}' + os.environ["MLRUN_EXEC_CONFIG"] = ( + '{"spec":{},"metadata":{"uid":"123411", "name":"tst", "labels": {"kind": "job"}}}' + ) # --kfp flag will force the logs to print (for the assert) out = self._exec_run( diff --git a/tests/rundb/test_httpdb.py b/tests/rundb/test_httpdb.py index 3be890ae949..d5f24cef678 100644 --- a/tests/rundb/test_httpdb.py +++ b/tests/rundb/test_httpdb.py @@ -64,9 +64,9 @@ def start_server(workdir, env_config: dict): port = free_port() env = environ.copy() env["MLRUN_httpdb__port"] = str(port) - env[ - "MLRUN_httpdb__dsn" - ] = f"sqlite:///{workdir}/mlrun.sqlite3?check_same_thread=false" + env["MLRUN_httpdb__dsn"] = ( + f"sqlite:///{workdir}/mlrun.sqlite3?check_same_thread=false" + ) env["MLRUN_httpdb__logs_path"] = workdir env.update(env_config or {}) cmd = [ diff --git a/tests/runtimes/test_run.py b/tests/runtimes/test_run.py index 29871f42ab4..c3b73dad1cf 100644 --- a/tests/runtimes/test_run.py +++ b/tests/runtimes/test_run.py @@ -64,9 +64,9 @@ def test_new_function_from_runtime(): runtime = _get_runtime() function = mlrun.new_function(runtime=runtime) expected_runtime = runtime - expected_runtime["spec"][ - "preemption_mode" - ] = mlrun.mlconf.function_defaults.preemption_mode + expected_runtime["spec"]["preemption_mode"] = ( + mlrun.mlconf.function_defaults.preemption_mode + ) assert ( DeepDiff( function.to_dict(), @@ -82,9 +82,9 @@ def test_new_function_args_without_command(): runtime["spec"]["command"] = "" function = mlrun.new_function(runtime=runtime) expected_runtime = runtime - expected_runtime["spec"][ - "preemption_mode" - ] = mlrun.mlconf.function_defaults.preemption_mode + expected_runtime["spec"]["preemption_mode"] = ( + mlrun.mlconf.function_defaults.preemption_mode + ) assert ( DeepDiff( function.to_dict(), @@ -137,9 +137,9 @@ def test_new_function_with_resources(): ]: expected_runtime = copy.deepcopy(runtime) expected_runtime["spec"]["resources"] = test_case.get("expected_resources") - expected_runtime["spec"][ - "preemption_mode" - ] = mlrun.mlconf.function_defaults.preemption_mode + expected_runtime["spec"]["preemption_mode"] = ( + mlrun.mlconf.function_defaults.preemption_mode + ) runtime["spec"]["resources"] = test_case.get("resources", None) mlrun.mlconf.default_function_pod_resources = test_case.get("default_resources") function = mlrun.new_function(runtime=runtime) @@ -273,12 +273,12 @@ def test_new_function_args_with_default_image_pull_secret(): runtime = _get_runtime() function = mlrun.new_function(runtime=runtime) expected_runtime = runtime - expected_runtime["spec"][ - "image_pull_secret" - ] = mlrun.mlconf.function.spec.image_pull_secret.default - expected_runtime["spec"][ - "preemption_mode" - ] = mlrun.mlconf.function_defaults.preemption_mode + expected_runtime["spec"]["image_pull_secret"] = ( + mlrun.mlconf.function.spec.image_pull_secret.default + ) + expected_runtime["spec"]["preemption_mode"] = ( + mlrun.mlconf.function_defaults.preemption_mode + ) assert ( DeepDiff( function.to_dict(), @@ -297,9 +297,9 @@ def test_new_function_override_default_image_pull_secret(): function = mlrun.new_function(runtime=runtime) expected_runtime = runtime expected_runtime["spec"]["image_pull_secret"] = new_secret - expected_runtime["spec"][ - "preemption_mode" - ] = mlrun.mlconf.function_defaults.preemption_mode + expected_runtime["spec"]["preemption_mode"] = ( + mlrun.mlconf.function_defaults.preemption_mode + ) assert ( DeepDiff( function.to_dict(), diff --git a/tests/system/model_monitoring/test_model_monitoring.py b/tests/system/model_monitoring/test_model_monitoring.py index 92f5ff61662..2cbaa2c45f4 100644 --- a/tests/system/model_monitoring/test_model_monitoring.py +++ b/tests/system/model_monitoring/test_model_monitoring.py @@ -824,9 +824,9 @@ def test_batch_drift(self): "p0": [0, 0], } ) - infer_results_df[ - mlrun.common.schemas.EventFieldType.TIMESTAMP - ] = datetime.utcnow() + infer_results_df[mlrun.common.schemas.EventFieldType.TIMESTAMP] = ( + datetime.utcnow() + ) # Record results and trigger the monitoring batch job endpoint_id = "123123123123" @@ -990,9 +990,9 @@ def custom_setup_class(cls) -> None: cls.training_set = cls.x_train.join(cls.y_train) cls.test_set = cls.x_test.join(cls.y_test) cls.infer_results_df = cls.test_set - cls.infer_results_df[ - mlrun.common.schemas.EventFieldType.TIMESTAMP - ] = datetime.utcnow() + cls.infer_results_df[mlrun.common.schemas.EventFieldType.TIMESTAMP] = ( + datetime.utcnow() + ) cls.endpoint_id = "5d6ce0e704442c0ac59a933cb4d238baba83bb5d" cls.function_name = f"{cls.name_prefix}-function" cls._train() @@ -1085,9 +1085,9 @@ def custom_setup_class(cls) -> None: cls.model_name = "clf_model" cls.infer_results_df = cls.train_set.copy() - cls.infer_results_df[ - mlrun.common.schemas.EventFieldType.TIMESTAMP - ] = datetime.utcnow() + cls.infer_results_df[mlrun.common.schemas.EventFieldType.TIMESTAMP] = ( + datetime.utcnow() + ) def custom_setup(self): mlrun.runtimes.utils.global_context.set(None) diff --git a/tests/utils/test_get_secrets.py b/tests/utils/test_get_secrets.py index 097717e3c9b..7e37acdce0e 100644 --- a/tests/utils/test_get_secrets.py +++ b/tests/utils/test_get_secrets.py @@ -32,9 +32,9 @@ def test_get_secret_from_env(): os.environ[key] = value assert mlrun.get_secret_or_env(key) == value - os.environ[ - SecretsStore.k8s_env_variable_name_for_secret(key) - ] = project_secret_value + os.environ[SecretsStore.k8s_env_variable_name_for_secret(key)] = ( + project_secret_value + ) # Project secrets should not override directly set env variables assert mlrun.get_secret_or_env(key) == value From cba3c087c376de16e681a2ff335e3723446843ec Mon Sep 17 00:00:00 2001 From: Jonathan Daniel <36337649+jond01@users.noreply.github.com> Date: Thu, 7 Mar 2024 16:40:08 +0200 Subject: [PATCH 067/119] [Linting] Use a fixed Ruff version [1.6.x] (#5259) --- dev-requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dev-requirements.txt b/dev-requirements.txt index e9b6d80d7d5..6f39b8781c9 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -3,8 +3,8 @@ twine~=3.1 build~=1.0 # formatting & linting -ruff~=0.3.0 -import-linter~=1.8 +ruff==0.3.0 +import-linter~=2.0 # testing pytest~=7.4 From 55b8072a30fc93e5e25eedd01314e481580a6321 Mon Sep 17 00:00:00 2001 From: Alon Maor <48641682+alonmr@users.noreply.github.com> Date: Thu, 7 Mar 2024 23:41:20 +0200 Subject: [PATCH 068/119] [Project] Fail project deletion if doesn't exist in leader [1.6.x] (#5261) --- server/api/api/endpoints/projects.py | 44 +++++++--------------------- server/api/api/utils.py | 43 +++++++-------------------- tests/api/api/test_projects.py | 21 ++++++++----- 3 files changed, 35 insertions(+), 73 deletions(-) diff --git a/server/api/api/endpoints/projects.py b/server/api/api/endpoints/projects.py index a2e72e1f609..0b16bcacc82 100644 --- a/server/api/api/endpoints/projects.py +++ b/server/api/api/endpoints/projects.py @@ -227,38 +227,16 @@ async def delete_project( background_tasks.add_task(task) return fastapi.Response(status_code=http.HTTPStatus.ACCEPTED.value) - is_running_in_background = False - force_delete = False - try: - is_running_in_background = await run_in_threadpool( - get_project_member().delete_project, - db_session, - name, - deletion_strategy, - auth_info.projects_role, - auth_info, - wait_for_completion=wait_for_completion, - ) - except mlrun.errors.MLRunNotFoundError as exc: - if not server.api.utils.helpers.is_request_from_leader(auth_info.projects_role): - logger.debug( - "Project not found in leader, ensuring project deleted in mlrun", - err=mlrun.errors.err_to_str(exc), - ) - force_delete = True - - if force_delete: - # In this case the wrapper delete project request is the one deleting the project because it - # doesn't exist in the leader. - await run_in_threadpool( - server.api.crud.Projects().delete_project, - db_session, - name, - deletion_strategy, - auth_info, - ) - - elif is_running_in_background: + is_running_in_background = await run_in_threadpool( + get_project_member().delete_project, + db_session, + name, + deletion_strategy, + auth_info.projects_role, + auth_info, + wait_for_completion=wait_for_completion, + ) + if is_running_in_background: return fastapi.Response(status_code=http.HTTPStatus.ACCEPTED.value) else: @@ -269,8 +247,6 @@ async def delete_project( ) await get_project_member().post_delete_project(name) - if force_delete: - return fastapi.Response(status_code=http.HTTPStatus.ACCEPTED.value) return fastapi.Response(status_code=http.HTTPStatus.NO_CONTENT.value) diff --git a/server/api/api/utils.py b/server/api/api/utils.py index bbe937bf259..44f0f42e810 100644 --- a/server/api/api/utils.py +++ b/server/api/api/utils.py @@ -1149,39 +1149,18 @@ async def _delete_project( wait_for_project_deletion: bool, background_task_name: str, ): - force_deleted = False - try: - await run_in_threadpool( - get_project_member().delete_project, - db_session, - project_name, - deletion_strategy, - auth_info.projects_role, - auth_info, - wait_for_completion=True, - background_task_name=background_task_name, - ) - except mlrun.errors.MLRunNotFoundError as exc: - if not server.api.utils.helpers.is_request_from_leader(auth_info.projects_role): - logger.warning( - "Project not found in leader, ensuring project is deleted in mlrun", - project_name=project_name, - exc=err_to_str(exc), - ) - force_deleted = True - - if force_deleted: - # In this case the wrapper delete project job is the one deleting the project because it - # doesn't exist in the leader. - await run_in_threadpool( - server.api.crud.Projects().delete_project, - db_session, - project_name, - deletion_strategy, - auth_info, - ) + await run_in_threadpool( + get_project_member().delete_project, + db_session, + project_name, + deletion_strategy, + auth_info.projects_role, + auth_info, + wait_for_completion=True, + background_task_name=background_task_name, + ) - elif wait_for_project_deletion: + if wait_for_project_deletion: await run_in_threadpool( verify_project_is_deleted, project_name, diff --git a/tests/api/api/test_projects.py b/tests/api/api/test_projects.py index cfe75d25ed0..cb1530f8951 100644 --- a/tests/api/api/test_projects.py +++ b/tests/api/api/test_projects.py @@ -980,17 +980,24 @@ def test_delete_project_not_found_in_leader( with unittest.mock.patch.object( mock_project_follower_iguazio_client, "delete_project", - side_effect=mlrun.errors.MLRunNotFoundError("Project not found"), + side_effect=mlrun.errors.MLRunNotFoundError("Project not found in Iguazio"), ): response = unversioned_client.delete( f"{delete_api_version}/projects/{project.metadata.name}", ) - assert response.status_code == HTTPStatus.ACCEPTED.value - - response = unversioned_client.get( - f"v1/projects/{project.metadata.name}", - ) - assert response.status_code == HTTPStatus.NOT_FOUND.value + if delete_api_version == "v1": + assert response.status_code == HTTPStatus.NOT_FOUND.value + assert "Project not found in Iguazio" in response.json()["detail"] + else: + background_task = mlrun.common.schemas.BackgroundTask(**response.json()) + background_task = server.api.utils.background_tasks.InternalBackgroundTasksHandler().get_background_task( + background_task.metadata.name + ) + assert ( + background_task.status.state + == mlrun.common.schemas.BackgroundTaskState.failed + ) + assert "Project not found in Iguazio" in background_task.status.error # Test should not run more than a few seconds because we test that if the background task fails, From d9bd3d0743c29667b33f527f8f49f98f75f68316 Mon Sep 17 00:00:00 2001 From: Liran BG Date: Sun, 10 Mar 2024 14:39:56 +0200 Subject: [PATCH 069/119] [Logger] Fix populating log formatter [1.6.x] (#5264) --- mlrun/config.py | 13 +++++++++++-- mlrun/utils/logger.py | 4 ++-- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/mlrun/config.py b/mlrun/config.py index f46534be10e..7d30cfa260a 100644 --- a/mlrun/config.py +++ b/mlrun/config.py @@ -1346,12 +1346,21 @@ def read_env(env=None, prefix=env_prefix): if igz_domain: config["ui_url"] = f"https://mlrun-ui.{igz_domain}" - if config.get("log_level"): + if log_level := config.get("log_level"): import mlrun.utils.logger # logger created (because of imports mess) before the config is loaded (in tests), therefore we're changing its # level manually - mlrun.utils.logger.set_logger_level(config["log_level"]) + mlrun.utils.logger.set_logger_level(log_level) + + if log_formatter_name := config.get("log_formatter"): + import mlrun.utils.logger + + log_formatter = mlrun.utils.create_formatter_instance( + mlrun.utils.FormatterKinds(log_formatter_name) + ) + mlrun.utils.logger.get_handler("default").setFormatter(log_formatter) + # The default function pod resource values are of type str; however, when reading from environment variable numbers, # it converts them to type int if contains only number, so we want to convert them to str. _convert_resources_to_str(config) diff --git a/mlrun/utils/logger.py b/mlrun/utils/logger.py index e3b786a8a2d..3060b7cdbee 100644 --- a/mlrun/utils/logger.py +++ b/mlrun/utils/logger.py @@ -186,7 +186,7 @@ class FormatterKinds(Enum): JSON = "json" -def _create_formatter_instance(formatter_kind: FormatterKinds) -> logging.Formatter: +def create_formatter_instance(formatter_kind: FormatterKinds) -> logging.Formatter: return { FormatterKinds.HUMAN: HumanReadableFormatter(), FormatterKinds.HUMAN_EXTENDED: HumanReadableExtendedFormatter(), @@ -208,7 +208,7 @@ def create_logger( logger_instance = Logger(level, name=name, propagate=False) # resolve formatter - formatter_instance = _create_formatter_instance( + formatter_instance = create_formatter_instance( FormatterKinds(formatter_kind.lower()) ) From 3cb7f01d34365e410432dc001060090cb12dfc33 Mon Sep 17 00:00:00 2001 From: TomerShor <90552140+TomerShor@users.noreply.github.com> Date: Mon, 11 Mar 2024 10:05:23 +0200 Subject: [PATCH 070/119] [Pipelines] Omit `sort_by` and `filter` query params in paginated requests [1.6.x] (#5265) --- server/api/crud/pipelines.py | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/server/api/crud/pipelines.py b/server/api/crud/pipelines.py index 881a8679a29..8414ba03a95 100644 --- a/server/api/crud/pipelines.py +++ b/server/api/crud/pipelines.py @@ -66,13 +66,13 @@ def list_pipelines( if project != "*": run_dicts = [] while page_token is not None: - # kfp doesn't allow us to pass both a page_token and the filter. When we have a token from previous - # call, we will strip out the filter and use the token to continue (the token contains the details of - # the filter that was used to create it) + # kfp doesn't allow us to pass both a page_token and the `filter` and `sort_by` params. + # When we have a token from previous call, we will strip out the filter and use the token to continue + # (the token contains the details of the filter that was used to create it) response = kfp_client._run_api.list_runs( page_token=page_token, page_size=mlrun.common.schemas.PipelinesPagination.max_page_size, - sort_by=sort_by, + sort_by=sort_by if page_token == "" else "", filter=filter_ if page_token == "" else "", ) run_dicts.extend([run.to_dict() for run in response.runs or []]) @@ -86,13 +86,22 @@ def list_pipelines( total_size = len(runs) next_page_token = None else: - response = kfp_client._run_api.list_runs( - page_token=page_token, - page_size=page_size - or mlrun.common.schemas.PipelinesPagination.default_page_size, - sort_by=sort_by, - filter=filter_, - ) + try: + response = kfp_client._run_api.list_runs( + page_token=page_token, + page_size=page_size + or mlrun.common.schemas.PipelinesPagination.default_page_size, + sort_by=sort_by, + filter=filter_, + ) + except kfp_server_api.ApiException as exc: + # extract the summary of the error message from the exception + error_message = exc.body or exc.reason or exc + if "message" in error_message: + error_message = error_message["message"] + raise mlrun.errors.err_for_status_code( + exc.status, err_to_str(error_message) + ) from exc runs = [run.to_dict() for run in response.runs or []] runs = self._filter_runs_by_name(runs, name_contains) next_page_token = response.next_page_token From f7b5b7273ae0258aefbb4740628a974c4203b4aa Mon Sep 17 00:00:00 2001 From: tomer-mamia <125267619+tomerm-iguazio@users.noreply.github.com> Date: Mon, 11 Mar 2024 10:54:16 +0200 Subject: [PATCH 071/119] [Datastore] Fix v3io datastore - update to v3io-py api [1.6.x] (#5246) --- mlrun/datastore/base.py | 28 --- mlrun/datastore/v3io.py | 116 ++++++---- requirements.txt | 4 +- tests/common_fixtures.py | 32 +++ .../test_dbfs_store/test_dbfs_store.py | 17 +- .../datastore/assets/additional_data.csv | 4 + .../datastore/assets/additional_data.parquet | Bin 0 -> 4336 bytes tests/system/datastore/assets/test.txt | 2 + .../datastore/assets/testdata_short.json | 3 + .../datastore/assets/testdata_short.parquet | Bin 3891 -> 4317 bytes tests/system/datastore/test_dbfs.py | 5 +- tests/system/datastore/test_v3io.py | 211 +++++++++++++++--- tests/test_requirements.py | 2 +- 13 files changed, 310 insertions(+), 114 deletions(-) create mode 100644 tests/system/datastore/assets/additional_data.csv create mode 100644 tests/system/datastore/assets/additional_data.parquet create mode 100644 tests/system/datastore/assets/test.txt create mode 100644 tests/system/datastore/assets/testdata_short.json diff --git a/mlrun/datastore/base.py b/mlrun/datastore/base.py index 0328db47a8e..b7ebb7ac34f 100644 --- a/mlrun/datastore/base.py +++ b/mlrun/datastore/base.py @@ -654,34 +654,6 @@ def http_get(url, headers=None, auth=None): return response.content -def http_head(url, headers=None, auth=None): - try: - response = requests.head(url, headers=headers, auth=auth, verify=verify_ssl) - except OSError as exc: - raise OSError(f"error: cannot connect to {url}: {err_to_str(exc)}") - - mlrun.errors.raise_for_status(response) - - return response.headers - - -def http_put(url, data, headers=None, auth=None, session=None): - try: - put_api = session.put if session else requests.put - response = put_api( - url, data=data, headers=headers, auth=auth, verify=verify_ssl - ) - except OSError as exc: - raise OSError(f"error: cannot connect to {url}: {err_to_str(exc)}") from exc - - mlrun.errors.raise_for_status(response) - - -def http_upload(url, file_path, headers=None, auth=None): - with open(file_path, "rb") as data: - http_put(url, data, headers, auth) - - class HttpStore(DataStore): def __init__(self, parent, schema, name, endpoint="", secrets: dict = None): super().__init__(parent, name, schema, endpoint, secrets) diff --git a/mlrun/datastore/v3io.py b/mlrun/datastore/v3io.py index f34fefe132c..ed4e3ace6e2 100644 --- a/mlrun/datastore/v3io.py +++ b/mlrun/datastore/v3io.py @@ -15,12 +15,11 @@ import mmap import os import time -from copy import deepcopy from datetime import datetime import fsspec -import requests -import v3io.dataplane +import v3io +from v3io.dataplane.response import HttpResponseError import mlrun from mlrun.datastore.helpers import ONE_GB, ONE_MB @@ -30,11 +29,6 @@ DataStore, FileStats, basic_auth_header, - get_range, - http_get, - http_head, - http_put, - http_upload, ) V3IO_LOCAL_ROOT = "v3io" @@ -47,17 +41,18 @@ def __init__(self, parent, schema, name, endpoint="", secrets: dict = None): self.headers = None self.secure = self.kind == "v3ios" + + token = self._get_secret_or_env("V3IO_ACCESS_KEY") + username = self._get_secret_or_env("V3IO_USERNAME") + password = self._get_secret_or_env("V3IO_PASSWORD") if self.endpoint.startswith("https://"): self.endpoint = self.endpoint[len("https://") :] self.secure = True elif self.endpoint.startswith("http://"): self.endpoint = self.endpoint[len("http://") :] self.secure = False - - token = self._get_secret_or_env("V3IO_ACCESS_KEY") - username = self._get_secret_or_env("V3IO_USERNAME") - password = self._get_secret_or_env("V3IO_PASSWORD") - + self.client = v3io.dataplane.Client(access_key=token, endpoint=self.url) + self.object = self.client.object self.auth = None self.token = token if token: @@ -65,6 +60,16 @@ def __init__(self, parent, schema, name, endpoint="", secrets: dict = None): elif username and password: self.headers = basic_auth_header(username, password) + @staticmethod + def _do_object_request(function: callable, *args, **kwargs): + try: + return function(*args, **kwargs) + except HttpResponseError as http_response_error: + raise mlrun.errors.err_for_status_code( + status_code=http_response_error.status_code, + message=mlrun.errors.err_to_str(http_response_error), + ) + @staticmethod def uri_to_ipython(endpoint, subpath): return V3IO_LOCAL_ROOT + subpath @@ -91,13 +96,19 @@ def get_storage_options(self): def _upload(self, key: str, src_path: str, max_chunk_size: int = ONE_GB): """helper function for upload method, allows for controlling max_chunk_size in testing""" + container, path = split_path(self._join(key)) file_size = os.path.getsize(src_path) # in bytes if file_size <= ONE_MB: - http_upload(self.url + self._join(key), src_path, self.headers, None) + with open(src_path, "rb") as source_file: + data = source_file.read() + self._do_object_request( + self.object.put, + container=container, + path=path, + body=data, + append=False, + ) return - append_header = deepcopy(self.headers) - append_header["Range"] = "-1" - # chunk must be a multiple of the ALLOCATIONGRANULARITY # https://docs.python.org/3/library/mmap.html if residue := max_chunk_size % mmap.ALLOCATIONGRANULARITY: @@ -114,11 +125,13 @@ def _upload(self, key: str, src_path: str, max_chunk_size: int = ONE_GB): access=mmap.ACCESS_READ, offset=file_offset, ) as mmap_obj: - http_put( - self.url + self._join(key), - mmap_obj, - append_header if file_offset else self.headers, - None, + append = file_offset != 0 + self._do_object_request( + self.object.put, + container=container, + path=path, + body=mmap_obj, + append=append, ) file_offset += chunk_size @@ -126,43 +139,55 @@ def upload(self, key, src_path): return self._upload(key, src_path) def get(self, key, size=None, offset=0): - headers = self.headers - if size or offset: - headers = deepcopy(headers) - headers["Range"] = get_range(size, offset) - return http_get(self.url + self._join(key), headers) + container, path = split_path(self._join(key)) + return self._do_object_request( + function=self.object.get, + container=container, + path=path, + offset=offset, + num_bytes=size, + ).body - def _put(self, key, data, max_chunk_size: int = ONE_GB): + def _put(self, key, data, append=False, max_chunk_size: int = ONE_GB): """helper function for put method, allows for controlling max_chunk_size in testing""" + container, path = split_path(self._join(key)) buffer_size = len(data) # in bytes if buffer_size <= ONE_MB: - http_put(self.url + self._join(key), data, self.headers, None) + self._do_object_request( + self.object.put, + container=container, + path=path, + body=data, + append=append, + ) return - append_header = deepcopy(self.headers) - append_header["Range"] = "-1" buffer_offset = 0 try: data = memoryview(data) except TypeError: pass - with requests.Session() as requests_session: - while buffer_offset < buffer_size: - chunk_size = min(buffer_size - buffer_offset, max_chunk_size) - http_put( - self.url + self._join(key), - data[buffer_offset : buffer_offset + chunk_size], - append_header if buffer_offset else self.headers, - None, - requests_session, - ) - buffer_offset += chunk_size + while buffer_offset < buffer_size: + chunk_size = min(buffer_size - buffer_offset, max_chunk_size) + append = True if buffer_offset or append else False + self._do_object_request( + self.object.put, + container=container, + path=path, + body=data[buffer_offset : buffer_offset + chunk_size], + append=append, + ) + buffer_offset += chunk_size def put(self, key, data, append=False): - return self._put(key, data) + return self._put(key, data, append) def stat(self, key): - head = http_head(self.url + self._join(key), self.headers) + container, path = split_path(self._join(key)) + response = self._do_object_request( + function=self.object.head, container=container, path=path + ) + head = dict(response.headers) size = int(head.get("Content-Length", "0")) datestr = head.get("Last-Modified", "0") modified = time.mktime( @@ -171,7 +196,6 @@ def stat(self, key): return FileStats(size, modified) def listdir(self, key): - v3io_client = v3io.dataplane.Client(endpoint=self.url, access_key=self.token) container, subpath = split_path(self._join(key)) if not subpath.endswith("/"): subpath += "/" @@ -180,7 +204,7 @@ def listdir(self, key): subpath_length = len(subpath) - 1 try: - response = v3io_client.container.list( + response = self.client.container.list( container=container, path=subpath, get_all_attributes=False, diff --git a/requirements.txt b/requirements.txt index 2eb5b610e09..adb19a6e16c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -21,7 +21,7 @@ pyyaml~=5.1 requests~=2.31 # >=0.8.6 from kfp 1.6.0 (and still up until 1.8.10) tabulate~=0.8.6 -v3io~=0.5.21 +v3io~=0.6.2 # pydantic 1.10.8 fixes a bug with literal and typing-extension 4.6.0 # https://docs.pydantic.dev/latest/changelog/#v1108-2023-05-23 pydantic~=1.10, >=1.10.8 @@ -32,7 +32,7 @@ dependency-injector~=4.41 # should be identical to gcs and s3fs. fsspec==2023.9.2 v3iofs~=0.1.17 -storey~=1.6.18 +storey~=1.6.19 inflection~=0.5.0 python-dotenv~=0.17.0 # older version of setuptools contains vulnerabilities, see `GHSA-r9hx-vwmv-q579`, so we limit to 65.5 and above diff --git a/tests/common_fixtures.py b/tests/common_fixtures.py index 1f6d9cc5828..20b5f181f22 100644 --- a/tests/common_fixtures.py +++ b/tests/common_fixtures.py @@ -28,6 +28,8 @@ import pytest import requests import v3io.dataplane +import v3io.dataplane.object +import v3io.dataplane.response from aioresponses import aioresponses as aioresponses_ import mlrun.common.schemas @@ -139,6 +141,17 @@ def chdir_to_test_location(request): @pytest.fixture def patch_file_forbidden(monkeypatch): + class MockV3ioObject: + def get(self, *args, **kwargs): + raise v3io.dataplane.response.HttpResponseError( + "error", HTTPStatus.FORBIDDEN.value + ) + + def head(self, *args, **kwargs): + raise v3io.dataplane.response.HttpResponseError( + "error", HTTPStatus.FORBIDDEN.value + ) + class MockV3ioClient: def __init__(self, *args, **kwargs): self.container = self @@ -146,6 +159,10 @@ def __init__(self, *args, **kwargs): def list(self, *args, **kwargs): raise RuntimeError("Permission denied") + @property + def object(self): + return MockV3ioObject() + mock_get = mock_failed_get_func(HTTPStatus.FORBIDDEN.value) monkeypatch.setattr(requests, "get", mock_get) @@ -155,6 +172,17 @@ def list(self, *args, **kwargs): @pytest.fixture def patch_file_not_found(monkeypatch): + class MockV3ioObject: + def get(self, *args, **kwargs): + raise v3io.dataplane.response.HttpResponseError( + "error", HTTPStatus.NOT_FOUND.value + ) + + def head(self, *args, **kwargs): + raise v3io.dataplane.response.HttpResponseError( + "error", HTTPStatus.NOT_FOUND.value + ) + class MockV3ioClient: def __init__(self, *args, **kwargs): self.container = self @@ -162,6 +190,10 @@ def __init__(self, *args, **kwargs): def list(self, *args, **kwargs): raise FileNotFoundError + @property + def object(self): + return MockV3ioObject() + mock_get = mock_failed_get_func(HTTPStatus.NOT_FOUND.value) monkeypatch.setattr(requests, "get", mock_get) diff --git a/tests/integration/test_dbfs_store/test_dbfs_store.py b/tests/integration/test_dbfs_store/test_dbfs_store.py index 8338ed08d63..7c3fb2b092a 100644 --- a/tests/integration/test_dbfs_store/test_dbfs_store.py +++ b/tests/integration/test_dbfs_store/test_dbfs_store.py @@ -213,15 +213,11 @@ def test_as_df( assert source.equals(response) @pytest.mark.parametrize( - "file_extension, local_file_path, reader", + "file_extension, local_file_path, reader, reader_args", [ - ( - "parquet", - parquet_path, - dd.read_parquet, - ), - ("csv", csv_path, dd.read_csv), - ("json", json_path, dd.read_json), + ("parquet", parquet_path, dd.read_parquet, {}), + ("csv", csv_path, dd.read_csv, {}), + ("json", json_path, dd.read_json, {"orient": "values"}), ], ) def test_as_df_dd( @@ -230,12 +226,13 @@ def test_as_df_dd( file_extension: str, local_file_path: str, reader: callable, + reader_args: dict, ): if use_datastore_profile: pytest.skip( "dask dataframe is not supported by datastore profile." ) # TODO add support - source = reader(local_file_path) + source = reader(local_file_path, **reader_args) upload_file_path = ( f"{self.dbfs_store_path}/file_{uuid.uuid4()}.{file_extension}" ) @@ -246,7 +243,7 @@ def test_as_df_dd( ) upload_data_item = mlrun.run.get_dataitem(dataitem_url) upload_data_item.upload(local_file_path) - response = upload_data_item.as_df(df_module=dd) + response = upload_data_item.as_df(df_module=dd, **reader_args) assert dd.assert_eq(source, response) def _setup_df_dir( diff --git a/tests/system/datastore/assets/additional_data.csv b/tests/system/datastore/assets/additional_data.csv new file mode 100644 index 00000000000..c3122f3e77d --- /dev/null +++ b/tests/system/datastore/assets/additional_data.csv @@ -0,0 +1,4 @@ +id,name,number,float_number,date_of_birth +4,Alice,40,4.5,1990-05-15 +5,Daniel,50,5.5,1985-10-22 +6,Charlie,60,6.5,1992-12-05 diff --git a/tests/system/datastore/assets/additional_data.parquet b/tests/system/datastore/assets/additional_data.parquet new file mode 100644 index 0000000000000000000000000000000000000000..14c3b5129cb5ad30148fd3b63656fccba66820a4 GIT binary patch literal 4336 zcmcgw&2Ji45MLaOiRDz0l|@jY%0daUD#wL2xFJd+tIf2mK>W(C;1E1gP~H zQcy16T!5#~vvB1m75&2#roEvg<+~mVE@%q95byze9LX`5R2*+#)dvI|Td$ba#M>6IaJ zuPGWT!Tzl$@SP|6@0IAwE8%GuGeJ7&UPrnah=t>x{lb)7bdNMa|N77d>7JH?(gFJW zr3<=!X4jC^_5|gxe9=D_f%IuVbG*;VXJbTqkfIhn-qoGOrFAN>VE+=Wtkm?TXdOF~dhLn+bT#_rRY+>u+ngA4l1hw|dO}g(^ge9< z1uj4T`EccLsHYD%e`qc*&q~bJV0hcs4(gV=6Z`tTi*vfBh?aWXtQ|{Q+iHxC>vvD| zvun{;D%7F%W!FKE#O6@=oOfdy1qs2hXEha3~Pp>mqF`b z+}1m>>R+vf?OF=>LgCPpw|U#6|IdAlzV#ateMdzbeOEOUcxx4oGMdTD@FpIQBv#v%q)*{$DL11=u9abGnY+JC5 zk$JNP(=9SpaT-5=3JcrcF>$UA-8#M0pt&o@PMHns*c!AjT%>Dia#OJpbdAd?J{?h^Qu*Yi1qy9zDw5z7sj00OLdv(G=Y8KrxiLP-v#LoggeYFpR|$ z5Rjnr4L~wX%YILVAzF~S0$ncM=~(1jF`bm%Zc*-vH_iYoCdeI&-;~&7C!Y@K*17@{ z^(;;&6mJUd=(f|*r_`?SsJAqzXrRQI`be{#cx-l_qTUUI)OK4Ht!A5)oUTc2u|2?w zYYoJ9yV=KGJ++l@9)}ccoHu}&$Dcwg?;cwCz&Hy;u<7J zV4wb&H}tN+l@b*u-;?%AmSi4i545zT4sva=oIK@EnswgHH6%mPd97dPjbuYAAE8ZG zDW7%rGpr_W6&tcy02$jKu6uHZ-{yA@I(#PgT;WQ6^m9;&E2ytBsg6>PcPoq?Td0g< zcB|#Mj<%9n>PQ)kC$xm}*=dED5tqvX_U!Y@Rxv4ah>6vdafPlbm(#s-Y>zFJDf z)JmH8<&Bgk8l@8@L-dwVDK@}Itojh}?sYVwoIH`3VoNe);;&RHa{G_!TFr%B9)Nw; z0{%w0SWPS!G|W#hbY0?#HOa_YfMXkQv;;d|zY3gIEBTtTr}sxX$t*B=TR#J@-c@$1 zF`N6iu5ek{Uu=vy+KY7%_e3n4LvmJb?%0tXax)7{Trzabe{|Mysgh1k@+&d9XF@rD zDLilnno!~Aaf3OJ_414}g*uGoPi$_XE-ZOZpU0o`Jh^0C;?7e08Lab*a}tcR9&lJg zUsap?-x@ zswe1B+m&2QoL_6ir^4x2mvin%u>torCc-_;pA;p@%q#P2Z+M@#aynq`$(ntKBo6+n zGqMBKuKm7R&JJWFwMG1xf-al+mc%5;{W^G5=pJMe1Kxx>o~%hY7n#v}68g;WgZ-2H z-FzmO!qx?Sk!AOW=YahT``rWVcbN`6SQQ_;r2j#G1l1w*S6mjFq%%S@#07ccV;83! z$-5>oLal1(1BJsNyAQB6DK?nl_r0sLgrbaalvMQSa@A$R{ z&$h;Rc;W<k$0B3L#)S{IJUrakw5Tyvz}i3a*T;3TF6B+JGl zLsBqoC9z`*HXgS>^6B98Q!pam6W9+X$PXEW{^9%+D!A4X+t0eHbz5$=Zj+DX+VCT| XW`7<-PvFm&$sg*U6^c59|8@KaB&<1i literal 0 HcmV?d00001 diff --git a/tests/system/datastore/assets/test.txt b/tests/system/datastore/assets/test.txt new file mode 100644 index 00000000000..bec7a6c2cb9 --- /dev/null +++ b/tests/system/datastore/assets/test.txt @@ -0,0 +1,2 @@ +This is just a test file, meant to test the upload functionality. +Nothing really interesting here. diff --git a/tests/system/datastore/assets/testdata_short.json b/tests/system/datastore/assets/testdata_short.json new file mode 100644 index 00000000000..9bcef908486 --- /dev/null +++ b/tests/system/datastore/assets/testdata_short.json @@ -0,0 +1,3 @@ +{"id":1,"name":"John","number":10,"float_number":1.5,"date_of_birth":"1990-01-01T00:00:00.000"} +{"id":2,"name":"Jane","number":20,"float_number":2.5,"date_of_birth":"1995-05-10T00:00:00.000"} +{"id":3,"name":"Bob","number":30,"float_number":3.5,"date_of_birth":"1985-12-15T00:00:00.000"} diff --git a/tests/system/datastore/assets/testdata_short.parquet b/tests/system/datastore/assets/testdata_short.parquet index d119dc1922eda913f76ded9c780f848fb26dfb11..2b80b187784f32ca8922a6cdf9a8256f5636e380 100644 GIT binary patch literal 4317 zcmcgw&2Ji45MOLUi0epEl%-Ii%0das!FFK{ZirGP3l8h0HV!1_W0T0*FJSH6g}`D= zA|KmRPf<@*)n2Oh+P@*E9{LwVJyktaJyt#B&_ie5vY1^Ul2s*7_U?N#^JaeYd-G;x zKVZvA+D|9w^&L7uQ&%Y}?DtWZe0LTodn}ILG(}&d!>eF)1%3n3aM00&gT9eXh`KpL z3d*JH=q}(}SiDZf|L{d=eu0+BMnnEvx0-@Kw&~LtYIUYQZ-^eN;r|1O=9#`N+ zQLtX887GquYkrUw+AWj3MN>t{*|y}(Ws|z;N3-xED8a-9_mw|NFOT5*P0>&Z;O~5) zAARw^7USP8M%~+IqH^)Og#2EEI5>HBW zo#6OoApXk*V0qfl9Pe}TS#?+rQ`7~Y|IOVC7jIJjdp6SLd(-JSnsCsby}644r==iq zrSobjN-vH0UvB7a(K>J^_1YKz`BMB(ES+g@b7IU%Dm6~(2}NeY*+`@u!G@ZeN&U$ijAOebcSO^TRK!_3y~SB zCBnrM!Td!`HNjjno49xXQGV@K>~sQ*8%0c0aK8)1NajJInbu~Cu;9Wl7EeGxf=*Wf z$tW%RJr#y%LFx%~zI3N!k?+KGQg*9FxhLK@1F)DNcPxHeVv}8bI;2^()j7Y3>At}0sXE-5)(le%Jeh!xivirsE| zfV+;>Zcl5QC{3o;6YES2s=}dvLdmQr*Hf`mD!j~=%X`(^c7G55difI^Cv2W&*%JKX z_?%@^Y_^HACYx&#SvKp^4^Vi2tlt@I*=u+bFGSC??0|)R@J1)OeOTfeB>P~W{ggNK zzQ9#dHKuqhZC5PG+|~B9tfUSLU9p-z;t$(R-Ym2vL(zF{(BzGDORDapO<$>=^mcNr zCU2BmvRMKdI~c8xiHaX+kx9C^6-ZWXQx{rBdd09yPUw2fI83`@9AGIk;F& ztd=y)PcU>{;>r!l$XkG86L53{JKmrUoYrf_hO(^>9GzsAn4+zpfmiPmdznKD=&AfV~5<#!jhB>9rJh2Iw{q%=}CSirtnOt7SDwT&Oj4t z{5)c4T;ON&Uub_&U%f>7&WHQk5Ulp^`Zkbg1oG zAtBDMHR4m@bgauc_oLi``0+gmaM@{a8YuIexfvc)wrF6*Abm zpf9rQ_UIh2pJBh-!+w|R!Gl%tu|@hH^hZ!Vfc}chQk!%}Xok2TPke0Qlp}dJBt~e| z4SlF^7-VY?TQg2r?m5hE!SwzXahfCh$rOYzFhJHm=X}PgX^_>~9C*jKJ$SY=#={dQ zpoDRThvbK~TP^4^z%U$p@`+&Cq_ZyVlTF*+{kY~xy%P=ed%#IdOGuWDM~0+e*h*r@ zmTWv;f8^7_=|^Bhz9+CBOpqTk2>qk^XHzxnepFrEnT@e?T&|VL3j7+1}dDU0%gpd=gSdx zc^Lndy4jMQ+PG6MeEiR+`G2rT@Wnuf9I~>33LHi`W19 z)i)zg9tWr&@=&G2dneRM$NoEelBctp?C7;ty`~r)r|G5ZrH_BP!arL<&!)u9Q8LT3 zu$g4IKv!+*mdq|gmt0!F&STgW*wC83#Oy@CK8g%$hGCZh9gDb&kK20b!p=A6BWpDB zp+GnizW+WOoPYOjWeO|3RgbCuMbLEB}toy`RD zLie`^(_~9`WauIpgxWpRe6qHBqJ#>bQsYr46!mOFZE5bdfx{9&sh#)NJ@Q#e%O$1z1ry0ne1p;%d`dN;obSi73ZBabDEQ%&;iFZNY z!@L2q?Q|1lHq}^4(51A8^|Y;uKyfP>Q>1iK+9)N}+yU4`3|UMH#lwALCqoQHEDIH} z)YNjf(B7UV9(U_tvpy({=t?#f@qqGIEz?wTrTU1CF5f&`>~c>Qi$_(^P4(GjN8L8- z+P2vr`gl~a+Gb_rt?Yq#6-?Eb7aQ>T1n*bG+9ut@r#*S zE(`ZdO^mx@m%GX~15x)QI}E0#IXa*cU*qQ9mTPgzpD&{pkIYksX!nSzXUi8U~z-oQ7ZDwWz$ zL-AtP>8UZnt%X^&S9W_swaQ>?YJf3^TI|YVVNj6@Nm~1MItlxMvY}ErnpGaCaq~zq zuxIkeyLS`)okL<^9gp-!yebtQ$zrNUWgOn>6*CDTZwuJPIed9}fGx}mLayTLHuiry zf!jTNxkDj=qXrJ?Op_AnWTEqb9!U`w>UTMHR5eqEEENP9iMvM8((0eVnO;_Pje;;k@tSzy!f^?dgM4@K6vpD8jX8QXb4qLSzwzHPBDE zuM$c`zzsj$5pw-H!>zXE&X+yixuUk)SLheU>hKF;)%~gn--jQ>qkqiy8HU+|f0zFU DKQgO> diff --git a/tests/system/datastore/test_dbfs.py b/tests/system/datastore/test_dbfs.py index 3a3f5fb1b37..9e7f98be9e1 100644 --- a/tests/system/datastore/test_dbfs.py +++ b/tests/system/datastore/test_dbfs.py @@ -153,5 +153,8 @@ def test_ingest_with_dbfs( result.reset_index(inplace=True, drop=False) assert_frame_equal( - expected.sort_index(axis=1), result.sort_index(axis=1), check_like=True + expected.sort_index(axis=1), + result.sort_index(axis=1), + check_like=True, + check_dtype=False, ) diff --git a/tests/system/datastore/test_v3io.py b/tests/system/datastore/test_v3io.py index 7d62dadc1ef..f1e107e1322 100644 --- a/tests/system/datastore/test_v3io.py +++ b/tests/system/datastore/test_v3io.py @@ -15,7 +15,11 @@ import os import random import subprocess +import tempfile +import uuid +from urllib.parse import urlparse +import pandas as pd import pytest import mlrun.datastore @@ -25,10 +29,63 @@ @TestMLRunSystem.skip_test_if_env_not_configured @pytest.mark.enterprise class TestV3ioDataStore(TestMLRunSystem): + @classmethod + def setup_class(cls): + super().setup_class() + cls.test_file_path = str(cls.get_assets_path() / "test.txt") + test_parquet_path = str(cls.get_assets_path() / "testdata_short.parquet") + test_csv_path = str(cls.get_assets_path() / "testdata_short.csv") + test_json_path = str(cls.get_assets_path() / "testdata_short.json") + cls.df_paths = { + "parquet": test_parquet_path, + "csv": test_csv_path, + "json": test_json_path, + } + test_additional_parquet_path = str( + cls.get_assets_path() / "additional_data.parquet" + ) + test_additional_csv_path = str(cls.get_assets_path() / "additional_data.csv") + cls.additional_df_paths = { + "parquet": test_additional_parquet_path, + "csv": test_additional_csv_path, + } + with open(cls.test_file_path) as f: + cls.test_string = f.read() + cls.test_dir_path = "/bigdata/v3io_tests" + cls.v3io_test_dir_url = "v3io://" + cls.test_dir_path + + @classmethod + def teardown_class(cls): + dir_data_item = mlrun.get_dataitem(cls.v3io_test_dir_url) + try: + dir_data_item.delete(recursive=True) + except Exception: + pass + super().teardown_class() + + def setup_method(self, method): + self.object_dir_url = f"{self.v3io_test_dir_url}/directory-{uuid.uuid4()}" + super().setup_method(method) + @staticmethod def _skip_set_environment(): return True + def _get_data_item(self, secrets={}, file_extension="txt"): + object_url = f"{self.object_dir_url}/file_{uuid.uuid4()}.{file_extension}" + return mlrun.run.get_dataitem(object_url, secrets=secrets), object_url + + def _setup_df_dir(self, first_file_path, second_file_path, file_extension): + dataitem_url = f"{self.object_dir_url}/df_{uuid.uuid4()}.{file_extension}" + + uploaded_data_item = mlrun.run.get_dataitem(dataitem_url) + uploaded_data_item.upload(first_file_path) + + dataitem_url = f"{self.object_dir_url}/df_{uuid.uuid4()}.{file_extension}" + + uploaded_data_item = mlrun.run.get_dataitem(dataitem_url) + uploaded_data_item.upload(second_file_path) + def test_v3io_large_object_upload(self, tmp_path): tempfile_1_path = os.path.join(tmp_path, "tempfile_1") tempfile_2_path = os.path.join(tmp_path, "tempfile_2") @@ -42,10 +99,7 @@ def test_v3io_large_object_upload(self, tmp_path): offset = r.randint(0, file_size - 1) f.seek(offset) f.write(bytearray([i])) - object_path = "/bigdata/test_v3io_large_object_upload" - v3io_object_url = "v3io://" + object_path - - data_item = mlrun.datastore.store_manager.object(v3io_object_url) + data_item, object_url = self._get_data_item() try: self._logger.debug( @@ -65,7 +119,7 @@ def test_v3io_large_object_upload(self, tmp_path): # Do the test again, this time exercising the v3io datastore _upload() loop self._logger.debug("Exercising the v3io _upload() loop") os.remove(tempfile_2_path) - + object_path = urlparse(object_url).path data_item.store._upload( object_path, tempfile_1_path, max_chunk_size=100 * 1024 ) @@ -86,36 +140,141 @@ def test_v3io_large_object_upload(self, tmp_path): def test_v3io_large_object_put(self): file_size = 20 * 1024 * 1024 # 20MB generated_buffer = bytearray(os.urandom(file_size)) - object_path = "/bigdata/test_v3io_large_object_put" - v3io_object_url = "v3io://" + object_path - data_item = mlrun.datastore.store_manager.object(v3io_object_url) - try: - # Exercise the DataItem put flow - data_item.put(generated_buffer) - returned_buffer = data_item.get() - assert returned_buffer == generated_buffer + data_item, object_url = self._get_data_item() + object_path = urlparse(object_url).path - data_item.store._put( - object_path, generated_buffer, max_chunk_size=100 * 1024 - ) - returned_buffer = data_item.get() - assert returned_buffer == generated_buffer + data_item.put(generated_buffer) + returned_buffer = data_item.get() + assert returned_buffer == generated_buffer - finally: - data_item.delete() + data_item.store._put(object_path, generated_buffer, max_chunk_size=100 * 1024) + returned_buffer = data_item.get() + assert returned_buffer == generated_buffer + + def test_put_get_and_download(self): + data_item, _ = self._get_data_item() + data_item.put(self.test_string) + response = data_item.get() + assert response.decode() == self.test_string + response = data_item.get(offset=20) + assert response.decode() == self.test_string[20:] + response = data_item.get(size=20) + assert response.decode() == self.test_string[:20] + response = data_item.get(offset=20, size=0) + assert response.decode() == self.test_string[20:] + response = data_item.get(offset=20, size=10) + assert response.decode() == self.test_string[20:30] + + with tempfile.NamedTemporaryFile(mode="w+", delete=True) as temp_file: + data_item.download(temp_file.name) + content = temp_file.read() + assert content == self.test_string + + # append=True test: + data_item.put(self.test_string, append=True) + response = data_item.get() + assert response.decode() == self.test_string + self.test_string + + def test_stat(self): + data_item, _ = self._get_data_item() + data_item.put(self.test_string) + stat = data_item.stat() + assert stat.size == len(self.test_string) def test_list_dir(self): - dir_base_path = "/bigdata/test_base_dir/" - v3io_dir_url = "v3io://" + dir_base_path - dir_base_item = mlrun.datastore.store_manager.object(v3io_dir_url) - file_item = mlrun.datastore.store_manager.object(v3io_dir_url + "test_file") + dir_base_item = mlrun.datastore.store_manager.object(self.object_dir_url) + file_item = mlrun.datastore.store_manager.object( + self.object_dir_url + "/test_file.txt" + ) file_item_deep = mlrun.datastore.store_manager.object( - v3io_dir_url + "test_dir/test_file" + self.object_dir_url + "/test_dir/test_file.txt" ) try: file_item.put("test") file_item_deep.put("test") actual_dir_content = dir_base_item.listdir() - assert actual_dir_content == ["test_dir/", "test_file"] + assert actual_dir_content == ["test_dir/", "test_file.txt"] finally: dir_base_item.delete() + + def test_upload(self): + data_item, _ = self._get_data_item() + data_item.upload(self.test_file_path) + response = data_item.get() + assert response.decode() == self.test_string + + def test_rm(self): + data_item, _ = self._get_data_item() + data_item.upload(self.test_file_path) + data_item.stat() + data_item.delete() + with pytest.raises( + mlrun.errors.MLRunNotFoundError, match="Request failed with status 404" + ): + data_item.stat() + + @pytest.mark.parametrize( + "file_extension,kwargs, reader", + [ + ( + "parquet", + {}, + pd.read_parquet, + ), + ("csv", {}, pd.read_csv), + ("json", {"orient": "records", "lines": True}, pd.read_json), + ], + ) + def test_as_df( + self, + file_extension: str, + kwargs: dict, + reader: callable, + ): + local_file_path = self.df_paths[file_extension] + source = reader(local_file_path, **kwargs) + source["date_of_birth"] = pd.to_datetime(source["date_of_birth"]) + dataitem, _ = self._get_data_item(file_extension=file_extension) + dataitem.upload(local_file_path) + response = dataitem.as_df(time_column="date_of_birth", **kwargs) + pd.testing.assert_frame_equal(source, response) + + @pytest.mark.parametrize( + "file_extension, reader", + [ + ( + "parquet", + pd.read_parquet, + ), + ("csv", pd.read_csv), + ], + ) + def test_check_read_df_dir( + self, + file_extension: str, + reader: callable, + ): + first_file_path = self.df_paths[file_extension] + second_file_path = self.additional_df_paths[file_extension] + self._setup_df_dir( + first_file_path=first_file_path, + second_file_path=second_file_path, + file_extension=file_extension, + ) + + dir_data_item = mlrun.run.get_dataitem(self.object_dir_url) + response_df = ( + dir_data_item.as_df(format=file_extension, time_column="date_of_birth") + .sort_values("id") + .reset_index(drop=True) + ) + df = reader(first_file_path) + df["date_of_birth"] = pd.to_datetime(df["date_of_birth"]) + additional_df = reader(second_file_path) + additional_df["date_of_birth"] = pd.to_datetime(additional_df["date_of_birth"]) + appended_df = ( + pd.concat([df, additional_df], axis=0) + .sort_values("id") + .reset_index(drop=True) + ) + pd.testing.assert_frame_equal(response_df, appended_df) diff --git a/tests/test_requirements.py b/tests/test_requirements.py index abca09d4512..62261335823 100644 --- a/tests/test_requirements.py +++ b/tests/test_requirements.py @@ -111,7 +111,7 @@ def test_requirement_specifiers_convention(): ignored_invalid_map = { # See comment near requirement for why we're limiting to patch changes only for all of these "aiobotocore": {">=2.5.0,<2.8"}, - "storey": {"~=1.6.18"}, + "storey": {"~=1.6.19"}, "nuclio-sdk": {">=0.5"}, "bokeh": {"~=2.4, >=2.4.2"}, # protobuf is limited just for docs From 603f3363c055eec52183adc778bed8bf6c619d7a Mon Sep 17 00:00:00 2001 From: Alon Maor <48641682+alonmr@users.noreply.github.com> Date: Mon, 11 Mar 2024 11:26:37 +0200 Subject: [PATCH 072/119] [System test] Fix remote runner not saving project [1.6.x] (#5269) --- automation/patch_igz/patch_remote.py | 6 ++- server/api/crud/workflows.py | 4 +- tests/api/crud/test_workflows.py | 58 +++++++++++++++++++++++---- tests/system/env-template.yml | 2 +- tests/system/projects/test_project.py | 2 +- 5 files changed, 57 insertions(+), 15 deletions(-) diff --git a/automation/patch_igz/patch_remote.py b/automation/patch_igz/patch_remote.py index ecd98425d18..80e021ae8b5 100755 --- a/automation/patch_igz/patch_remote.py +++ b/automation/patch_igz/patch_remote.py @@ -17,6 +17,7 @@ import io import json import logging +import os import shlex import subprocess import typing @@ -195,7 +196,7 @@ def _replace_deploy_policy(self): "deployment", "mlrun-api-chief", "-p", - f"'{self._deploy_patch}'", + f"{self._deploy_patch}", ] ) @@ -209,7 +210,7 @@ def _replace_deploy_policy(self): "deployment", "mlrun-api-worker", "-p", - f"'{self._deploy_patch}'", + f"{self._deploy_patch}", ] ) @@ -441,6 +442,7 @@ def _get_image_tag(tag) -> str: @staticmethod def _execute_local_proc_interactive(cmd, env=None): + env = os.environ | (env or {}) proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True, env=env ) diff --git a/server/api/crud/workflows.py b/server/api/crud/workflows.py index 8e6794f789c..e1d1ca0e7a4 100644 --- a/server/api/crud/workflows.py +++ b/server/api/crud/workflows.py @@ -372,16 +372,14 @@ def _validate_source( [2] = Bool if the source is a path. """ + save = bool(load_only or not source) source = source or project.spec.source - save = True if not source: raise mlrun.errors.MLRunInvalidArgumentError( "Project source is required. Either specify the source in the project or provide it in the request." ) if not load_only: - save = False - # Path like source is not supported for load_only since it uses the mlrun default image if source.startswith("/"): return source, save, True diff --git a/tests/api/crud/test_workflows.py b/tests/api/crud/test_workflows.py index d34bff6a02a..e4904507eb7 100644 --- a/tests/api/crud/test_workflows.py +++ b/tests/api/crud/test_workflows.py @@ -72,14 +72,6 @@ def test_run_workflow_with_local_source( spec=mlrun.common.schemas.WorkflowSpec( name=run_name, engine="remote", - code=None, - path=None, - args=None, - handler=None, - ttl=None, - args_schema=None, - schedule=None, - run_local=None, image="mlrun/mlrun", ), source=source, @@ -106,3 +98,53 @@ def test_run_workflow_with_local_source( assert "url" not in run.spec.parameters assert run.spec.handler == "mlrun.projects.load_and_run" + + @pytest.mark.parametrize( + "source, load_only, expected_save", + [ + ("./project-name", False, False), + ("", False, True), + ("s3://project-name", True, True), + ("", True, True), + ], + ) + def test_run_workflow_save_project( + self, + db: sqlalchemy.orm.Session, + k8s_secrets_mock, + source: str, + load_only: bool, + expected_save: bool, + ): + project = mlrun.common.schemas.Project( + metadata=mlrun.common.schemas.ProjectMetadata(name="project-name"), + spec=mlrun.common.schemas.ProjectSpec(source="s3://some-source"), + ) + server.api.crud.Projects().create_project(db, project) + + run_name = "run-name" + runner = server.api.crud.WorkflowRunners().create_runner( + run_name=run_name, + project=project.metadata.name, + db_session=db, + auth_info=mlrun.common.schemas.AuthInfo(), + image="mlrun/mlrun", + ) + + run = server.api.crud.WorkflowRunners().run( + runner=runner, + project=project, + workflow_request=mlrun.common.schemas.WorkflowRequest( + spec=mlrun.common.schemas.WorkflowSpec( + name=run_name, + engine="remote", + image="mlrun/mlrun", + ), + source=source, + artifact_path="/home/mlrun/artifacts", + ), + auth_info=mlrun.common.schemas.AuthInfo(), + load_only=load_only, + ) + + assert run.spec.parameters["save"] == expected_save diff --git a/tests/system/env-template.yml b/tests/system/env-template.yml index d530ea9677b..e3ec224a790 100644 --- a/tests/system/env-template.yml +++ b/tests/system/env-template.yml @@ -18,7 +18,7 @@ # The mlrun-api URL. e.g. https://mlrun-api.default-tenant.app.hedingber-28-1.iguazio-cd2.com MLRUN_DBPATH: -# The webapi https_direct url - e.g. https://default-tenant.app.hedingber-28-1.iguazio-cd2.com:8444 +# The webapi https_direct url - e.g. https://webapi.default-tenant.app.hedingber-28-1.iguazio-cd2.com V3IO_API: # Iguazio API URL - e.g. https://dashboard.default-tenant.app.hedingber-28-1.iguazio-cd2.com diff --git a/tests/system/projects/test_project.py b/tests/system/projects/test_project.py index 8f344b81b93..bbb24cc16f7 100644 --- a/tests/system/projects/test_project.py +++ b/tests/system/projects/test_project.py @@ -626,7 +626,7 @@ def test_remote_from_archive(self): project.export(archive_path) project.spec.source = archive_path project.save() - self._logger.debug("saved project", project=project.to_yaml()) + self._logger.debug("Saved project", project=project.to_yaml()) run = project.run( "main", watch=True, From 0360dc1edec309cd38adef45742f652a5b03bd21 Mon Sep 17 00:00:00 2001 From: Alon Maor <48641682+alonmr@users.noreply.github.com> Date: Mon, 11 Mar 2024 11:26:52 +0200 Subject: [PATCH 073/119] [Function] Reduce `clone_target_dir` deprecation warning noise [1.6.x] (#5270) --- mlrun/runtimes/base.py | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/mlrun/runtimes/base.py b/mlrun/runtimes/base.py index fe1ac650626..96f547b92c1 100644 --- a/mlrun/runtimes/base.py +++ b/mlrun/runtimes/base.py @@ -137,20 +137,24 @@ def build(self, build): @property def clone_target_dir(self): - warnings.warn( - "The clone_target_dir attribute is deprecated in 1.6.2 and will be removed in 1.8.0. " - "Use spec.build.source_code_target_dir instead.", - FutureWarning, - ) + # TODO: remove this property in 1.9.0 + if self.build.source_code_target_dir: + warnings.warn( + "The clone_target_dir attribute is deprecated in 1.6.2 and will be removed in 1.9.0. " + "Use spec.build.source_code_target_dir instead.", + FutureWarning, + ) return self.build.source_code_target_dir @clone_target_dir.setter def clone_target_dir(self, clone_target_dir): - warnings.warn( - "The clone_target_dir attribute is deprecated in 1.6.2 and will be removed in 1.8.0. " - "Use spec.build.source_code_target_dir instead.", - FutureWarning, - ) + # TODO: remove this property in 1.9.0 + if clone_target_dir: + warnings.warn( + "The clone_target_dir attribute is deprecated in 1.6.2 and will be removed in 1.9.0. " + "Use spec.build.source_code_target_dir instead.", + FutureWarning, + ) self.build.source_code_target_dir = clone_target_dir def enrich_function_preemption_spec(self): From 0a757b21e17be98278c010581c2b45107fcd4268 Mon Sep 17 00:00:00 2001 From: Alon Maor <48641682+alonmr@users.noreply.github.com> Date: Mon, 11 Mar 2024 15:04:11 +0200 Subject: [PATCH 074/119] [Build] Fix `clone_target_dir` backwards compatibility [1.6.x] (#5277) --- server/api/api/endpoints/functions.py | 2 +- tests/api/api/test_functions.py | 33 ++++++++++ tests/api/utils/test_builder.py | 92 +++++++++++++-------------- 3 files changed, 80 insertions(+), 47 deletions(-) diff --git a/server/api/api/endpoints/functions.py b/server/api/api/endpoints/functions.py index a9f80043004..4a7f4e31b0b 100644 --- a/server/api/api/endpoints/functions.py +++ b/server/api/api/endpoints/functions.py @@ -316,7 +316,7 @@ async def build_function( # clone_target_dir is deprecated but needs to remain for backward compatibility func_dict = fn.to_dict() func_dict["spec"]["clone_target_dir"] = get_in( - data, "data.spec.build.source_code_target_dir" + func_dict, "spec.build.source_code_target_dir" ) return { diff --git a/tests/api/api/test_functions.py b/tests/api/api/test_functions.py index ff509b5d9c1..06d27a0a51b 100644 --- a/tests/api/api/test_functions.py +++ b/tests/api/api/test_functions.py @@ -693,6 +693,39 @@ def test_build_no_access_key( assert response.json()["detail"]["reason"] == expected_reason +def test_build_clone_target_dir_backwards_compatability( + monkeypatch, + db: sqlalchemy.orm.Session, + client: fastapi.testclient.TestClient, + k8s_secrets_mock, +): + tests.api.api.utils.create_project(client, PROJECT) + clone_target_dir = "/some/path" + function_dict = { + "kind": "job", + "metadata": { + "name": "function-name", + "project": "project-name", + "tag": "latest", + }, + "spec": { + "clone_target_dir": clone_target_dir, + }, + } + + monkeypatch.setattr( + server.api.utils.builder, + "build_image", + lambda *args, **kwargs: "success", + ) + + response = client.post( + "build/function", + json={"function": function_dict}, + ) + assert response.json()["data"]["spec"]["clone_target_dir"] == clone_target_dir + + def test_start_function_succeeded( db: sqlalchemy.orm.Session, client: fastapi.testclient.TestClient, monkeypatch ): diff --git a/tests/api/utils/test_builder.py b/tests/api/utils/test_builder.py index aeaf34e5f24..b7b96a6bc6c 100644 --- a/tests/api/utils/test_builder.py +++ b/tests/api/utils/test_builder.py @@ -1129,52 +1129,6 @@ def test_mlrun_base_image_no_requirements(): assert with_mlrun is False -def _get_target_image_from_create_pod_mock(): - return _create_pod_mock_pod_spec().containers[0].args[5] - - -def _create_pod_mock_pod_spec(): - return ( - server.api.utils.singletons.k8s.get_k8s_helper() - .create_pod.call_args[0][0] - .pod.spec - ) - - -def _patch_k8s_helper(monkeypatch): - get_k8s_helper_mock = unittest.mock.Mock() - get_k8s_helper_mock.create_pod = unittest.mock.Mock( - side_effect=lambda pod: (pod, "some-namespace") - ) - get_k8s_helper_mock.get_project_secret_name = unittest.mock.Mock( - side_effect=lambda name: "name" - ) - get_k8s_helper_mock.get_project_secret_keys = unittest.mock.Mock( - side_effect=lambda project, filter_internal: ["KEY"] - ) - get_k8s_helper_mock.get_project_secret_data = unittest.mock.Mock( - side_effect=lambda project, keys: {"KEY": "val"} - ) - monkeypatch.setattr( - server.api.utils.singletons.k8s, - "get_k8s_helper", - lambda *args, **kwargs: get_k8s_helper_mock, - ) - - -def _mock_default_service_account(monkeypatch, service_account): - resolve_project_default_service_account_mock = unittest.mock.MagicMock() - resolve_project_default_service_account_mock.return_value = ( - [], - service_account, - ) - monkeypatch.setattr( - server.api.api.utils, - "resolve_project_default_service_account", - resolve_project_default_service_account_mock, - ) - - @pytest.mark.parametrize( "builder_env,source,commands,extra_args,expected_in_stage", [ @@ -1615,3 +1569,49 @@ def test_resolve_function_image_secret( resolved_image_target, secret_name ) ) + + +def _get_target_image_from_create_pod_mock(): + return _create_pod_mock_pod_spec().containers[0].args[5] + + +def _create_pod_mock_pod_spec(): + return ( + server.api.utils.singletons.k8s.get_k8s_helper() + .create_pod.call_args[0][0] + .pod.spec + ) + + +def _patch_k8s_helper(monkeypatch): + get_k8s_helper_mock = unittest.mock.Mock() + get_k8s_helper_mock.create_pod = unittest.mock.Mock( + side_effect=lambda pod: (pod, "some-namespace") + ) + get_k8s_helper_mock.get_project_secret_name = unittest.mock.Mock( + side_effect=lambda name: "name" + ) + get_k8s_helper_mock.get_project_secret_keys = unittest.mock.Mock( + side_effect=lambda project, filter_internal: ["KEY"] + ) + get_k8s_helper_mock.get_project_secret_data = unittest.mock.Mock( + side_effect=lambda project, keys: {"KEY": "val"} + ) + monkeypatch.setattr( + server.api.utils.singletons.k8s, + "get_k8s_helper", + lambda *args, **kwargs: get_k8s_helper_mock, + ) + + +def _mock_default_service_account(monkeypatch, service_account): + resolve_project_default_service_account_mock = unittest.mock.MagicMock() + resolve_project_default_service_account_mock.return_value = ( + [], + service_account, + ) + monkeypatch.setattr( + server.api.api.utils, + "resolve_project_default_service_account", + resolve_project_default_service_account_mock, + ) From 2f7cf62583c4ef1f62a48781dbacbb13ec2cc373 Mon Sep 17 00:00:00 2001 From: Liran BG Date: Mon, 11 Mar 2024 16:40:17 +0200 Subject: [PATCH 075/119] [SQLAlchemy] Recycle & ping connection prior using them [1.6.x] (#5278) --- mlrun/common/db/sql_session.py | 3 +++ mlrun/config.py | 6 +++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/mlrun/common/db/sql_session.py b/mlrun/common/db/sql_session.py index e30b6dd09ac..163aeaad2d3 100644 --- a/mlrun/common/db/sql_session.py +++ b/mlrun/common/db/sql_session.py @@ -63,9 +63,12 @@ def _init_engine(dsn=None): max_overflow = config.httpdb.db.connections_pool_max_overflow if max_overflow is None: max_overflow = config.httpdb.max_workers + kwargs = { "pool_size": pool_size, "max_overflow": max_overflow, + "pool_pre_ping": config.httpdb.db.connections_pool_pre_ping, + "pool_recycle": config.httpdb.db.connections_pool_recycle, } engine = create_engine(dsn, **kwargs) _engines[dsn] = engine diff --git a/mlrun/config.py b/mlrun/config.py index 7d30cfa260a..3bae32aa81a 100644 --- a/mlrun/config.py +++ b/mlrun/config.py @@ -312,7 +312,11 @@ # default is 16MB, max 1G, for more info https://dev.mysql.com/doc/refman/8.0/en/packet-too-large.html "max_allowed_packet": 64000000, # 64MB }, - # None will set this to be equal to the httpdb.max_workers + # tests connections for liveness upon each checkout + "connections_pool_pre_ping": True, + # this setting causes the pool to recycle connections after the given number of seconds has passed + "connections_pool_recycle": 60 * 60, + # None defaults to httpdb.max_workers "connections_pool_size": None, "connections_pool_max_overflow": None, # below is a db-specific configuration From 79a661a632cfdc3d89c11e59dd13b9f52d0d3142 Mon Sep 17 00:00:00 2001 From: Liran BG Date: Wed, 13 Mar 2024 14:52:57 +0200 Subject: [PATCH 076/119] [Project] Fix concurrent project deletion [1.6.x] (#5282) --- server/api/api/endpoints/projects.py | 3 ++- server/api/api/endpoints/projects_v2.py | 3 ++- server/api/api/utils.py | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/server/api/api/endpoints/projects.py b/server/api/api/endpoints/projects.py index 0b16bcacc82..56ee4b5b752 100644 --- a/server/api/api/endpoints/projects.py +++ b/server/api/api/endpoints/projects.py @@ -224,7 +224,8 @@ async def delete_project( db_session, auth_info, ) - background_tasks.add_task(task) + if task: + background_tasks.add_task(task) return fastapi.Response(status_code=http.HTTPStatus.ACCEPTED.value) is_running_in_background = await run_in_threadpool( diff --git a/server/api/api/endpoints/projects_v2.py b/server/api/api/endpoints/projects_v2.py index 9488f05cb28..ad119992733 100644 --- a/server/api/api/endpoints/projects_v2.py +++ b/server/api/api/endpoints/projects_v2.py @@ -110,7 +110,8 @@ async def delete_project( db_session, auth_info, ) - background_tasks.add_task(task) + if task: + background_tasks.add_task(task) response.status_code = http.HTTPStatus.ACCEPTED.value return server.api.utils.background_tasks.InternalBackgroundTasksHandler().get_background_task( diff --git a/server/api/api/utils.py b/server/api/api/utils.py index 44f0f42e810..dfde191532a 100644 --- a/server/api/api/utils.py +++ b/server/api/api/utils.py @@ -1116,10 +1116,11 @@ def get_or_create_project_deletion_background_task( background_task_kind = background_task_kind_format.format(project_name) try: - return server.api.utils.background_tasks.InternalBackgroundTasksHandler().get_active_background_task_by_kind( + task = server.api.utils.background_tasks.InternalBackgroundTasksHandler().get_active_background_task_by_kind( background_task_kind, raise_on_not_found=True, ) + return None, task.metadata.name except mlrun.errors.MLRunNotFoundError: logger.debug( "Existing background task not found, creating new one", From e8f6cd0e7189afdfbaf0fd562eaea9d33bfb3ff2 Mon Sep 17 00:00:00 2001 From: Alon Maor <48641682+alonmr@users.noreply.github.com> Date: Sun, 17 Mar 2024 09:07:53 +0200 Subject: [PATCH 077/119] [Projects] Allow deleting archived projects without leader [1.6.x] (#5290) --- server/api/api/endpoints/projects.py | 63 ++++++++++++++++++++----- server/api/api/endpoints/projects_v2.py | 38 ++++++++++----- server/api/api/utils.py | 62 +++++++++++++++++------- server/api/utils/clients/iguazio.py | 25 +++++++--- server/api/utils/projects/follower.py | 3 ++ server/api/utils/projects/leader.py | 1 + server/api/utils/projects/member.py | 1 + tests/api/api/test_projects.py | 53 ++++++++++++++++----- 8 files changed, 190 insertions(+), 56 deletions(-) diff --git a/server/api/api/endpoints/projects.py b/server/api/api/endpoints/projects.py index 56ee4b5b752..5c9ccfb2212 100644 --- a/server/api/api/endpoints/projects.py +++ b/server/api/api/endpoints/projects.py @@ -185,6 +185,15 @@ async def delete_project( server.api.api.deps.get_db_session ), ): + # check if project exists + try: + project = await run_in_threadpool( + get_project_member().get_project, db_session, name, auth_info.session + ) + except mlrun.errors.MLRunNotFoundError: + logger.info("Project not found, nothing to delete", project=name) + return fastapi.Response(status_code=http.HTTPStatus.NO_CONTENT.value) + # delete project can be responsible for deleting schedules. Schedules are running only on chief, # that is why we re-route requests to chief if ( @@ -219,7 +228,7 @@ async def delete_project( # wait for this background task to complete before marking the task as done. task, _ = await run_in_threadpool( server.api.api.utils.get_or_create_project_deletion_background_task, - name, + project, deletion_strategy, db_session, auth_info, @@ -228,16 +237,46 @@ async def delete_project( background_tasks.add_task(task) return fastapi.Response(status_code=http.HTTPStatus.ACCEPTED.value) - is_running_in_background = await run_in_threadpool( - get_project_member().delete_project, - db_session, - name, - deletion_strategy, - auth_info.projects_role, - auth_info, - wait_for_completion=wait_for_completion, - ) - if is_running_in_background: + is_running_in_background = False + force_delete = False + try: + is_running_in_background = await run_in_threadpool( + get_project_member().delete_project, + db_session, + name, + deletion_strategy, + auth_info.projects_role, + auth_info, + wait_for_completion=wait_for_completion, + ) + except mlrun.errors.MLRunNotFoundError as exc: + if server.api.utils.helpers.is_request_from_leader(auth_info.projects_role): + raise exc + + if project.status.state != mlrun.common.schemas.ProjectState.archived: + raise mlrun.errors.MLRunPreconditionFailedError( + f"Failed to delete project {name}. Project not found in leader, but it is not in archived state." + ) + + logger.warning( + "Project not found in leader, ensuring project deleted in mlrun", + project_name=name, + err=mlrun.errors.err_to_str(exc), + ) + force_delete = True + + if force_delete: + # In this case the wrapper delete project request is the one deleting the project because it + # doesn't exist in the leader. + await run_in_threadpool( + server.api.crud.Projects().delete_project, + db_session, + name, + deletion_strategy, + auth_info, + ) + + elif is_running_in_background: return fastapi.Response(status_code=http.HTTPStatus.ACCEPTED.value) else: @@ -248,6 +287,8 @@ async def delete_project( ) await get_project_member().post_delete_project(name) + if force_delete: + return fastapi.Response(status_code=http.HTTPStatus.ACCEPTED.value) return fastapi.Response(status_code=http.HTTPStatus.NO_CONTENT.value) diff --git a/server/api/api/endpoints/projects_v2.py b/server/api/api/endpoints/projects_v2.py index ad119992733..4b6815533ac 100644 --- a/server/api/api/endpoints/projects_v2.py +++ b/server/api/api/endpoints/projects_v2.py @@ -58,22 +58,13 @@ async def delete_project( ): # check if project exists try: - await run_in_threadpool( + project = await run_in_threadpool( get_project_member().get_project, db_session, name, auth_info.session ) except mlrun.errors.MLRunNotFoundError: logger.info("Project not found, nothing to delete", project=name) return fastapi.Response(status_code=http.HTTPStatus.NO_CONTENT.value) - # usually the CRUD for delete project will check permissions, however, since we are running the crud in a background - # task, we need to check permissions here. skip permission check if the request is from the leader. - if not server.api.utils.helpers.is_request_from_leader(auth_info.projects_role): - await server.api.utils.auth.verifier.AuthVerifier().query_project_permissions( - name, - mlrun.common.schemas.AuthorizationAction.delete, - auth_info, - ) - # delete project can be responsible for deleting schedules. Schedules are running only on chief, # that is why we re-route requests to chief if ( @@ -90,6 +81,31 @@ async def delete_project( name=name, request=request, api_version="v2" ) + # usually the CRUD for delete project will check permissions, however, since we are running the crud in a background + # task, we need to check permissions here. skip permission check if the request is from the leader. + if not server.api.utils.helpers.is_request_from_leader(auth_info.projects_role): + skip_permission_check = False + if project.status.state == mlrun.common.schemas.ProjectState.archived: + try: + await run_in_threadpool( + get_project_member().get_project, + db_session, + name, + auth_info.session, + from_leader=True, + ) + except mlrun.errors.MLRunNotFoundError: + skip_permission_check = True + + if not skip_permission_check: + await ( + server.api.utils.auth.verifier.AuthVerifier().query_project_permissions( + name, + mlrun.common.schemas.AuthorizationAction.delete, + auth_info, + ) + ) + # we need to implement the verify_project_is_empty, since we don't want # to spawn a background task for this, only to return a response if deletion_strategy.strategy_to_check(): @@ -105,7 +121,7 @@ async def delete_project( task, task_name = await run_in_threadpool( server.api.api.utils.get_or_create_project_deletion_background_task, - name, + project, deletion_strategy, db_session, auth_info, diff --git a/server/api/api/utils.py b/server/api/api/utils.py index dfde191532a..18e00c1ae5b 100644 --- a/server/api/api/utils.py +++ b/server/api/api/utils.py @@ -1068,8 +1068,8 @@ def artifact_project_and_resource_name_extractor(artifact): def get_or_create_project_deletion_background_task( - project_name: str, deletion_strategy: str, db_session, auth_info -) -> typing.Tuple[typing.Callable, str]: + project: mlrun.common.schemas.Project, deletion_strategy: str, db_session, auth_info +) -> typing.Tuple[typing.Optional[typing.Callable], str]: """ This method is responsible for creating a background task for deleting a project. The project deletion flow is as follows: @@ -1114,7 +1114,7 @@ def get_or_create_project_deletion_background_task( # therefore doesn't wait for the project deletion to complete. wait_for_project_deletion = True - background_task_kind = background_task_kind_format.format(project_name) + background_task_kind = background_task_kind_format.format(project.metadata.name) try: task = server.api.utils.background_tasks.InternalBackgroundTasksHandler().get_active_background_task_by_kind( background_task_kind, @@ -1134,7 +1134,7 @@ def get_or_create_project_deletion_background_task( _delete_project, background_task_name, db_session=db_session, - project_name=project_name, + project=project, deletion_strategy=deletion_strategy, auth_info=auth_info, wait_for_project_deletion=wait_for_project_deletion, @@ -1144,24 +1144,54 @@ def get_or_create_project_deletion_background_task( async def _delete_project( db_session: sqlalchemy.orm.Session, - project_name: str, + project: mlrun.common.schemas.Project, deletion_strategy: mlrun.common.schemas.DeletionStrategy, auth_info: mlrun.common.schemas.AuthInfo, wait_for_project_deletion: bool, background_task_name: str, ): - await run_in_threadpool( - get_project_member().delete_project, - db_session, - project_name, - deletion_strategy, - auth_info.projects_role, - auth_info, - wait_for_completion=True, - background_task_name=background_task_name, - ) + force_delete = False + project_name = project.metadata.name + try: + await run_in_threadpool( + get_project_member().delete_project, + db_session, + project_name, + deletion_strategy, + auth_info.projects_role, + auth_info, + wait_for_completion=True, + background_task_name=background_task_name, + ) + except mlrun.errors.MLRunNotFoundError as exc: + if server.api.utils.helpers.is_request_from_leader(auth_info.projects_role): + raise exc + + if project.status.state != mlrun.common.schemas.ProjectState.archived: + raise mlrun.errors.MLRunPreconditionFailedError( + f"Failed to delete project {project_name}. " + "Project not found in leader, but it is not in archived state." + ) + + logger.warning( + "Project not found in leader, ensuring project is deleted in mlrun", + project_name=project_name, + exc=err_to_str(exc), + ) + force_delete = True + + if force_delete: + # In this case the wrapper delete project job is the one deleting the project because it + # doesn't exist in the leader. + await run_in_threadpool( + server.api.crud.Projects().delete_project, + db_session, + project_name, + deletion_strategy, + auth_info, + ) - if wait_for_project_deletion: + elif wait_for_project_deletion: await run_in_threadpool( verify_project_is_deleted, project_name, diff --git a/server/api/utils/clients/iguazio.py b/server/api/utils/clients/iguazio.py index 9e62332a5a5..3129181c1f6 100644 --- a/server/api/utils/clients/iguazio.py +++ b/server/api/utils/clients/iguazio.py @@ -576,13 +576,24 @@ def _get_project_from_iguazio_without_parsing( params = {"include": "owner"} if enrich_owner_access_key: params["enrich_owner_access_key"] = "true" - return self._send_request_to_api( - "GET", - f"projects/__name__/{name}", - "Failed getting project from Iguazio", - session, - params=params, - ) + try: + return self._send_request_to_api( + "GET", + f"projects/__name__/{name}", + "Failed getting project from Iguazio", + session, + params=params, + ) + except requests.HTTPError as exc: + if exc.response.status_code != http.HTTPStatus.NOT_FOUND.value: + raise + self._logger.debug( + "Project not found in Iguazio", + name=name, + ) + raise mlrun.errors.MLRunNotFoundError( + "Project not found in Iguazio" + ) from exc def _get_project_from_iguazio( self, session: str, name: str, include_owner_session: bool = False diff --git a/server/api/utils/projects/follower.py b/server/api/utils/projects/follower.py index 93c25569fa0..5dfec7c83de 100644 --- a/server/api/utils/projects/follower.py +++ b/server/api/utils/projects/follower.py @@ -212,7 +212,10 @@ def get_project( db_session: sqlalchemy.orm.Session, name: str, leader_session: typing.Optional[str] = None, + from_leader: bool = False, ) -> mlrun.common.schemas.Project: + if from_leader: + return self._leader_client.get_project(leader_session, name) return server.api.crud.Projects().get_project(db_session, name) def get_project_owner( diff --git a/server/api/utils/projects/leader.py b/server/api/utils/projects/leader.py index 148393a1071..48cad385394 100644 --- a/server/api/utils/projects/leader.py +++ b/server/api/utils/projects/leader.py @@ -125,6 +125,7 @@ def get_project( db_session: sqlalchemy.orm.Session, name: str, leader_session: typing.Optional[str] = None, + from_leader: bool = False, ) -> mlrun.common.schemas.Project: return self._leader_follower.get_project(db_session, name) diff --git a/server/api/utils/projects/member.py b/server/api/utils/projects/member.py index 05e48647361..adb8b543083 100644 --- a/server/api/utils/projects/member.py +++ b/server/api/utils/projects/member.py @@ -103,6 +103,7 @@ def get_project( db_session: sqlalchemy.orm.Session, name: str, leader_session: typing.Optional[str] = None, + from_leader: bool = False, ) -> mlrun.common.schemas.Project: pass diff --git a/tests/api/api/test_projects.py b/tests/api/api/test_projects.py index cb1530f8951..9b24e7c1341 100644 --- a/tests/api/api/test_projects.py +++ b/tests/api/api/test_projects.py @@ -968,27 +968,47 @@ def test_delete_project_not_found_in_leader( mock_project_follower_iguazio_client, delete_api_version: str, ) -> None: - project = mlrun.common.schemas.Project( - metadata=mlrun.common.schemas.ProjectMetadata(name="project-name"), + archived_project = mlrun.common.schemas.Project( + metadata=mlrun.common.schemas.ProjectMetadata(name="archived-project"), spec=mlrun.common.schemas.ProjectSpec(), + status=mlrun.common.schemas.ProjectStatus( + state=mlrun.common.schemas.ProjectState.archived + ), ) - response = unversioned_client.post("v1/projects", json=project.dict()) + online_project = mlrun.common.schemas.Project( + metadata=mlrun.common.schemas.ProjectMetadata(name="online-project"), + spec=mlrun.common.schemas.ProjectSpec(), + ) + + response = unversioned_client.post("v1/projects", json=archived_project.dict()) assert response.status_code == HTTPStatus.CREATED.value - _assert_project_response(project, response) + _assert_project_response(archived_project, response) + + response = unversioned_client.post("v1/projects", json=online_project.dict()) + assert response.status_code == HTTPStatus.CREATED.value + _assert_project_response(online_project, response) with unittest.mock.patch.object( mock_project_follower_iguazio_client, "delete_project", - side_effect=mlrun.errors.MLRunNotFoundError("Project not found in Iguazio"), + side_effect=mlrun.errors.MLRunNotFoundError("Project not found"), ): response = unversioned_client.delete( - f"{delete_api_version}/projects/{project.metadata.name}", + f"{delete_api_version}/projects/{archived_project.metadata.name}", ) - if delete_api_version == "v1": - assert response.status_code == HTTPStatus.NOT_FOUND.value - assert "Project not found in Iguazio" in response.json()["detail"] - else: + assert response.status_code == HTTPStatus.ACCEPTED.value + + response = unversioned_client.get( + f"v1/projects/{archived_project.metadata.name}", + ) + assert response.status_code == HTTPStatus.NOT_FOUND.value + + response = unversioned_client.delete( + f"{delete_api_version}/projects/{online_project.metadata.name}", + ) + if response.status_code == HTTPStatus.ACCEPTED.value: + assert delete_api_version == "v2" background_task = mlrun.common.schemas.BackgroundTask(**response.json()) background_task = server.api.utils.background_tasks.InternalBackgroundTasksHandler().get_background_task( background_task.metadata.name @@ -997,7 +1017,18 @@ def test_delete_project_not_found_in_leader( background_task.status.state == mlrun.common.schemas.BackgroundTaskState.failed ) - assert "Project not found in Iguazio" in background_task.status.error + assert ( + "Failed to delete project online-project. Project not found in leader, but it is not in archived state." + in background_task.status.error + ) + + else: + assert response.status_code == HTTPStatus.PRECONDITION_FAILED.value + + response = unversioned_client.get( + f"v1/projects/{online_project.metadata.name}", + ) + assert response.status_code == HTTPStatus.OK.value # Test should not run more than a few seconds because we test that if the background task fails, From b7ac5efebaf89896c12292061b3ab7b688a69ae4 Mon Sep 17 00:00:00 2001 From: Liran BG Date: Mon, 18 Mar 2024 10:10:13 +0200 Subject: [PATCH 078/119] [Kaniko] Bump kaniko for open source [1.6.x] (#5298) --- mlrun/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mlrun/config.py b/mlrun/config.py index 3bae32aa81a..a9c99ceec29 100644 --- a/mlrun/config.py +++ b/mlrun/config.py @@ -444,7 +444,7 @@ # pip install , e.g. mlrun==0.5.4, mlrun~=0.5, # git+https://github.com/mlrun/mlrun@development. by default uses the version "mlrun_version_specifier": "", - "kaniko_image": "gcr.io/kaniko-project/executor:v1.8.0", # kaniko builder image + "kaniko_image": "gcr.io/kaniko-project/executor:v1.21.1", # kaniko builder image "kaniko_init_container_image": "alpine:3.18", # image for kaniko init container when docker registry is ECR "kaniko_aws_cli_image": "amazon/aws-cli:2.7.10", From 7e4d7e2fdfdbdd872e2db15f000123c4b7b164b8 Mon Sep 17 00:00:00 2001 From: daniels290813 <78727943+daniels290813@users.noreply.github.com> Date: Tue, 19 Mar 2024 11:37:11 +0200 Subject: [PATCH 079/119] [CI] Release bundle tutorials and welcome [1.6.x] (#5301) --- .github/workflows/release.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index e44f659634b..a7e460721b7 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -264,7 +264,10 @@ jobs: - uses: actions/checkout@v3 - name: Create tutorials tar run: | - tar -cvf mlrun-tutorials.tar docs/tutorials + wget -c https://github.com/v3io/tutorials/blob/mlrun-release-stable/welcome.ipynb -P docs + wget -c https://github.com/v3io/tutorials/blob/mlrun-release-stable/README.md -P docs + tar -cvf mlrun-tutorials.tar docs/tutorials docs/README.md docs/welcome.ipynb + rm -rf docs/welcome.ipynb docs/README.md - name: Add tutorials tar to release uses: ncipollo/release-action@v1 with: From df53325df8b7c535404975bfafc0d89c3192e92f Mon Sep 17 00:00:00 2001 From: Assaf Ben-Amitai Date: Wed, 20 Mar 2024 10:29:32 +0000 Subject: [PATCH 080/119] [Tests] skipping test_v3io_large_object_upload as it hangs when running against the CI system [1.6.x] (#5308) --- tests/system/datastore/test_v3io.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/system/datastore/test_v3io.py b/tests/system/datastore/test_v3io.py index f1e107e1322..bb655315e53 100644 --- a/tests/system/datastore/test_v3io.py +++ b/tests/system/datastore/test_v3io.py @@ -86,6 +86,9 @@ def _setup_df_dir(self, first_file_path, second_file_path, file_extension): uploaded_data_item = mlrun.run.get_dataitem(dataitem_url) uploaded_data_item.upload(second_file_path) + @pytest.mark.skip( + reason="Skipping this test as it hangs when running against the CI system. ML-5598" + ) def test_v3io_large_object_upload(self, tmp_path): tempfile_1_path = os.path.join(tmp_path, "tempfile_1") tempfile_2_path = os.path.join(tmp_path, "tempfile_2") From 089ffb63a0dfa7b71c117f55ea4c7c15767c7c2b Mon Sep 17 00:00:00 2001 From: Liran BG Date: Thu, 21 Mar 2024 13:37:58 +0200 Subject: [PATCH 081/119] [Dependancies] Remedy vulnerabilities (#5312) --- server/log-collector/go.mod | 12 ++++++------ server/log-collector/go.sum | 20 ++++++++++---------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/server/log-collector/go.mod b/server/log-collector/go.mod index 651778d1136..6bf0293470d 100644 --- a/server/log-collector/go.mod +++ b/server/log-collector/go.mod @@ -12,7 +12,7 @@ require ( github.com/stretchr/testify v1.9.0 golang.org/x/sync v0.6.0 google.golang.org/grpc v1.62.0 - google.golang.org/protobuf v1.32.0 + google.golang.org/protobuf v1.33.0 k8s.io/api v0.29.2 k8s.io/apimachinery v0.29.2 k8s.io/client-go v0.29.2 @@ -41,11 +41,11 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - golang.org/x/net v0.21.0 // indirect - golang.org/x/oauth2 v0.17.0 // indirect - golang.org/x/sys v0.17.0 // indirect - golang.org/x/term v0.17.0 // indirect - golang.org/x/text v0.14.0 // indirectm + golang.org/x/net v0.22.0 // indirect + golang.org/x/oauth2 v0.18.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 // indirect + golang.org/x/text v0.14.0 // indirect; indirectm golang.org/x/time v0.5.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect diff --git a/server/log-collector/go.sum b/server/log-collector/go.sum index 752a21eee0b..a2948ca6b94 100644 --- a/server/log-collector/go.sum +++ b/server/log-collector/go.sum @@ -145,11 +145,11 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= +golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= -golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -170,12 +170,12 @@ golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= @@ -218,8 +218,8 @@ google.golang.org/grpc v1.62.0 h1:HQKZ/fa1bXkX1oFOvSjmZEUL8wLSaZTjCcLAlmZRtdk= google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= From e5107cf83f581b89ed448dac929388262da88c04 Mon Sep 17 00:00:00 2001 From: Liran BG Date: Thu, 21 Mar 2024 14:43:21 +0200 Subject: [PATCH 082/119] [DataStore] Condition http verify on mlrun config (#5311) --- mlrun/datastore/base.py | 38 ++++++++++++++++++++++---------------- 1 file changed, 22 insertions(+), 16 deletions(-) diff --git a/mlrun/datastore/base.py b/mlrun/datastore/base.py index b7ebb7ac34f..832426d86d6 100644 --- a/mlrun/datastore/base.py +++ b/mlrun/datastore/base.py @@ -27,6 +27,7 @@ import urllib3 from deprecated import deprecated +import mlrun.config import mlrun.errors from mlrun.errors import err_to_str from mlrun.utils import StorePrefix, is_ipython, logger @@ -34,10 +35,6 @@ from .store_resources import is_store_uri, parse_store_uri from .utils import filter_df_start_end_time, select_columns_from_df -verify_ssl = False -if not verify_ssl: - urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) - class FileStats: def __init__(self, size, modified, content_type=None): @@ -643,17 +640,6 @@ def basic_auth_header(user, password): return {"Authorization": authstr} -def http_get(url, headers=None, auth=None): - try: - response = requests.get(url, headers=headers, auth=auth, verify=verify_ssl) - except OSError as exc: - raise OSError(f"error: cannot connect to {url}: {err_to_str(exc)}") - - mlrun.errors.raise_for_status(response) - - return response.content - - class HttpStore(DataStore): def __init__(self, parent, schema, name, endpoint="", secrets: dict = None): super().__init__(parent, name, schema, endpoint, secrets) @@ -681,7 +667,7 @@ def put(self, key, data, append=False): raise ValueError("unimplemented") def get(self, key, size=None, offset=0): - data = http_get(self.url + self._join(key), self._headers, self.auth) + data = self._http_get(self.url + self._join(key), self._headers, self.auth) if offset: data = data[offset:] if size: @@ -701,6 +687,26 @@ def _validate_https_token(self): f"schema as it is not secure and is not recommended." ) + def _http_get( + self, + url, + headers=None, + auth=None, + ): + # import here to prevent import cycle + from mlrun.config import config as mlconf + + verify_ssl = mlconf.httpdb.http.verify + try: + if not verify_ssl: + urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + response = requests.get(url, headers=headers, auth=auth, verify=verify_ssl) + except OSError as exc: + raise OSError(f"error: cannot connect to {url}: {err_to_str(exc)}") + + mlrun.errors.raise_for_status(response) + return response.content + # This wrapper class is designed to extract the 'ds' schema and profile name from URL-formatted paths. # Within fsspec, the AbstractFileSystem::_strip_protocol() internal method is used to handle complete URL paths. From 467e492c609b314126e880f26a0c116253d4a48c Mon Sep 17 00:00:00 2001 From: Liran BG Date: Mon, 25 Mar 2024 21:03:34 +0200 Subject: [PATCH 083/119] [API] Enhance log collector [1.6.x] (#5321) --- server/api/db/base.py | 1 + server/api/db/sqldb/db.py | 4 + server/api/main.py | 29 ++++- server/api/utils/clients/log_collector.py | 35 +++++- server/log-collector/cmd/logcollector/main.go | 4 +- server/log-collector/go.mod | 2 + server/log-collector/go.sum | 4 + server/log-collector/pkg/common/consts.go | 9 +- .../logcollector/logcollector_test.go | 98 +++++++++++++--- .../pkg/services/logcollector/server.go | 108 +++++++++++++++++- .../logcollector/test/logcollector_test.go | 3 +- .../pkg/services/logcollector/test/nop/nop.go | 15 +++ .../log-collector/proto/log_collector.proto | 9 ++ tests/api/test_collect_runs_logs.py | 36 ++++-- tests/api/utils/clients/test_log_collector.py | 55 ++++++--- 15 files changed, 360 insertions(+), 52 deletions(-) diff --git a/server/api/db/base.py b/server/api/db/base.py index af1d98dd805..2cae70f11dc 100644 --- a/server/api/db/base.py +++ b/server/api/db/base.py @@ -71,6 +71,7 @@ def list_distinct_runs_uids( only_uids: bool = False, last_update_time_from: datetime.datetime = None, states: List[str] = None, + specific_uids: List[str] = None, ): pass diff --git a/server/api/db/sqldb/db.py b/server/api/db/sqldb/db.py index 2b2e1c0b369..0817dbc5370 100644 --- a/server/api/db/sqldb/db.py +++ b/server/api/db/sqldb/db.py @@ -247,6 +247,7 @@ def list_distinct_runs_uids( only_uids=True, last_update_time_from: datetime = None, states: typing.List[str] = None, + specific_uids: List[str] = None, ) -> typing.Union[typing.List[str], RunList]: """ List all runs uids in the DB @@ -277,6 +278,9 @@ def list_distinct_runs_uids( if requested_logs_modes is not None: query = query.filter(Run.requested_logs.in_(requested_logs_modes)) + if specific_uids: + query = query.filter(Run.uid.in_(specific_uids)) + if not only_uids: # group_by allows us to have a row per uid with the whole record rather than just the uid (as distinct does) # note we cannot promise that the same row will be returned each time per uid as the order is not guaranteed diff --git a/server/api/main.py b/server/api/main.py index 95b15c6e826..797173c1deb 100644 --- a/server/api/main.py +++ b/server/api/main.py @@ -511,12 +511,36 @@ async def _start_periodic_stop_logs(): async def _verify_log_collection_stopped_on_startup(): """ - Pulls runs from DB that are in terminal state and have logs requested, and call stop logs for them. + First, list runs that are currently being collected in the log collector. + Second, query the DB for those runs that are also in terminal state and have logs requested. + Lastly, call stop logs for the runs that met all of the above conditions. This is done so that the log collector won't keep trying to collect logs for runs that are already in terminal state. """ + logger.debug("Listing runs currently being log collected") + log_collector_client = server.api.utils.clients.log_collector.LogCollectorClient() + run_uids_in_progress = [] + failed_listing = False + try: + runs_in_progress_response_stream = log_collector_client.list_runs_in_progress() + # collate the run uids from the response stream to a list + async for run_uids in runs_in_progress_response_stream: + run_uids_in_progress.extend(run_uids) + except Exception as exc: + failed_listing = True + logger.warning( + "Failed listing runs currently being log collected", + exc=err_to_str(exc), + traceback=traceback.format_exc(), + ) + + if len(run_uids_in_progress) == 0 and not failed_listing: + logger.debug("No runs currently being log collected") + return + logger.debug( - "Getting all runs which have reached terminal state and already have logs requested", + "Getting current log collected runs which have reached terminal state and already have logs requested", + run_uids_in_progress_count=len(run_uids_in_progress), ) db_session = await fastapi.concurrency.run_in_threadpool(create_session) try: @@ -531,6 +555,7 @@ async def _verify_log_collection_stopped_on_startup(): # usually it happens when run pods get preempted mlrun.runtimes.constants.RunStates.unknown, ], + specific_uids=run_uids_in_progress, ) if len(runs) > 0: diff --git a/server/api/utils/clients/log_collector.py b/server/api/utils/clients/log_collector.py index 4ee268fa96d..e92ebada5e7 100644 --- a/server/api/utils/clients/log_collector.py +++ b/server/api/utils/clients/log_collector.py @@ -313,7 +313,40 @@ async def delete_logs( if verbose: logger.warning(msg, error=response.errorMessage) - def _retryable_error(self, error_message, retryable_error_patterns) -> bool: + async def list_runs_in_progress( + self, + project: str = None, + verbose: bool = True, + raise_on_error: bool = True, + ) -> typing.AsyncIterable[str]: + """ + List runs in progress from the log collector service + :param project: A project name to filter the runs by. If not provided, all runs in progress will be listed + :param verbose: Whether to log errors + :param raise_on_error: Whether to raise an exception on error + :return: A list of run uids + """ + request = self._log_collector_pb2.ListRunsRequest( + project=project, + ) + + response_stream = self._call_stream("ListRunsInProgress", request) + try: + async for chunk in response_stream: + yield chunk.runUIDs + except Exception as exc: + msg = "Failed to list runs in progress" + if raise_on_error: + raise LogCollectorErrorCode.map_error_code_to_mlrun_error( + LogCollectorErrorCode.ErrCodeInternal.value, + mlrun.errors.err_to_str(exc), + msg, + ) + if verbose: + logger.warning(msg, error=mlrun.errors.err_to_str(exc)) + + @staticmethod + def _retryable_error(error_message, retryable_error_patterns) -> bool: """ Check if the error is retryable :param error_message: The error message diff --git a/server/log-collector/cmd/logcollector/main.go b/server/log-collector/cmd/logcollector/main.go index 7826f6b6245..2fd5384893a 100644 --- a/server/log-collector/cmd/logcollector/main.go +++ b/server/log-collector/cmd/logcollector/main.go @@ -45,6 +45,7 @@ func StartServer() error { getLogsBufferSizeBytes := flag.Int("get-logs-buffer-buffer-size-bytes", common.GetEnvOrDefaultInt("MLRUN_LOG_COLLECTOR__GET_LOGS_BUFFER_SIZE_BYTES", common.DefaultGetLogsBufferSize), "Size of buffers in the buffer pool for getting logs, in bytes (default: 3.75MB)") logTimeUpdateBytesInterval := flag.Int("log-time-update-bytes-interval", common.GetEnvOrDefaultInt("MLRUN_LOG_COLLECTOR__LOG_TIME_UPDATE_BYTES_INTERVAL", common.LogTimeUpdateBytesInterval), "Amount of logs to read between updates of the last log time in the 'in memory' state, in bytes (default: 4KB)") clusterizationRole := flag.String("clusterization-role", common.GetEnvOrDefaultString("MLRUN_HTTPDB__CLUSTERIZATION__ROLE", "chief"), "The role of the log collector in the cluster (chief, worker)") + listRunsChunkSize := flag.Int("list-runs-chunk-size", common.GetEnvOrDefaultInt("MLRUN_LOG_COLLECTOR__LIST_RUNS_CHUNK_SIZE", common.DefaultListRunsChunkSize), "The chunk size for listing runs in progress") // if namespace is not passed, it will be taken from env namespace := flag.String("namespace", "", "The namespace to collect logs from") @@ -80,7 +81,8 @@ func StartServer() error { *logCollectionBufferSizeBytes, *getLogsBufferSizeBytes, *logTimeUpdateBytesInterval, - *advancedLogLevel) + *advancedLogLevel, + *listRunsChunkSize) if err != nil { return errors.Wrap(err, "Failed to create log collector server") } diff --git a/server/log-collector/go.mod b/server/log-collector/go.mod index 6bf0293470d..dd5762b8605 100644 --- a/server/log-collector/go.mod +++ b/server/log-collector/go.mod @@ -8,6 +8,7 @@ require ( github.com/nuclio/errors v0.0.4 github.com/nuclio/logger v0.0.1 github.com/nuclio/loggerus v0.0.6 + github.com/samber/lo v1.39.0 github.com/sirupsen/logrus v1.9.3 github.com/stretchr/testify v1.9.0 golang.org/x/sync v0.6.0 @@ -41,6 +42,7 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/spf13/pflag v1.0.5 // indirect + golang.org/x/exp v0.0.0-20220303212507-bbda1eaf7a17 // indirect golang.org/x/net v0.22.0 // indirect golang.org/x/oauth2 v0.18.0 // indirect golang.org/x/sys v0.18.0 // indirect diff --git a/server/log-collector/go.sum b/server/log-collector/go.sum index a2948ca6b94..9729fd468df 100644 --- a/server/log-collector/go.sum +++ b/server/log-collector/go.sum @@ -102,6 +102,8 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/samber/lo v1.39.0 h1:4gTz1wUhNYLhFSKl6O+8peW0v2F4BCY034GRpU9WnuA= +github.com/samber/lo v1.39.0/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.8.0/go.mod h1:4GuYW9TZmE769R5STWrRakJc4UqQ3+QQ95fyz7ENv1A= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= @@ -128,6 +130,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20220303212507-bbda1eaf7a17 h1:3MTrJm4PyNL9NBqvYDSj3DHl46qQakyfqfWo4jgfaEM= +golang.org/x/exp v0.0.0-20220303212507-bbda1eaf7a17/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= diff --git a/server/log-collector/pkg/common/consts.go b/server/log-collector/pkg/common/consts.go index 987044e8b4a..a28cd173227 100644 --- a/server/log-collector/pkg/common/consts.go +++ b/server/log-collector/pkg/common/consts.go @@ -23,9 +23,6 @@ const ( ErrCodeBadRequest ) -const DefaultErrorStackDepth = 3 - -// Buffer sizes const ( // DefaultLogCollectionBufferSize is the default buffer size for collecting logs from pods DefaultLogCollectionBufferSize int = 10 * 1024 * 1024 // 10MB @@ -37,6 +34,12 @@ const ( // LogTimeUpdateBytesInterval is the bytes amount to read between updates of the // last log time in the in memory state LogTimeUpdateBytesInterval int = 4 * 1024 // 4KB + + // DefaultListRunsChunkSize is the default chunk size for listing runs + DefaultListRunsChunkSize int = 10 + + // DefaultErrorStackDepth is the default stack depth for errors + DefaultErrorStackDepth = 3 ) // Custom errors diff --git a/server/log-collector/pkg/services/logcollector/logcollector_test.go b/server/log-collector/pkg/services/logcollector/logcollector_test.go index de1415d1cf8..29b1db28faf 100644 --- a/server/log-collector/pkg/services/logcollector/logcollector_test.go +++ b/server/log-collector/pkg/services/logcollector/logcollector_test.go @@ -68,6 +68,7 @@ func (suite *LogCollectorTestSuite) SetupSuite() { bufferSizeBytes := 100 clusterizationRole := "chief" advancedLogLevel := 0 + listRunsChunkSize := 10 // create base dir suite.baseDir = path.Join(os.TempDir(), "/log_collector_test") @@ -88,7 +89,8 @@ func (suite *LogCollectorTestSuite) SetupSuite() { bufferSizeBytes, bufferSizeBytes, common.LogTimeUpdateBytesInterval, - advancedLogLevel) + advancedLogLevel, + listRunsChunkSize) suite.Require().NoError(err, "Failed to create log collector server") suite.logger.InfoWith("Setup complete") @@ -628,24 +630,7 @@ func (suite *LogCollectorTestSuite) TestStopLog() { projectNum := 2 var projectToRuns = map[string][]string{} - for i := 0; i < projectNum; i++ { - projectName := fmt.Sprintf("project-%d", i) - - // add log item to the server's states, so no error will be returned - for j := 0; j < logItemsNum; j++ { - runUID := uuid.New().String() - projectToRuns[projectName] = append(projectToRuns[projectName], runUID) - selector := fmt.Sprintf("run=%s", runUID) - - // Add state to the log collector's state manifest - err = suite.logCollectorServer.stateManifest.AddLogItem(suite.ctx, runUID, selector, projectName) - suite.Require().NoError(err, "Failed to add log item to the state manifest") - - // Add state to the log collector's current state - err = suite.logCollectorServer.currentState.AddLogItem(suite.ctx, runUID, selector, projectName) - suite.Require().NoError(err, "Failed to add log item to the current state") - } - } + suite.createLogItems(projectNum, logItemsNum, projectToRuns) // write state err = suite.logCollectorServer.stateManifest.WriteState(suite.logCollectorServer.stateManifest.GetState()) @@ -802,6 +787,81 @@ func (suite *LogCollectorTestSuite) TestGetLogFilePath() { suite.Require().Equal(runFilePath, logFilePath, "Expected log file path to be the same as the run file path") } +func (suite *LogCollectorTestSuite) TestListRunsInProgress() { + listRunsInProgress := func(request *log_collector.ListRunsRequest) []string { + nopStream := &nop.ListRunsResponseStreamNop{} + err := suite.logCollectorServer.ListRunsInProgress(request, nopStream) + suite.Require().NoError(err, "Failed to list runs in progress") + return nopStream.RunUIDs + + } + + verifyRuns := func(expectedRunUIDs []string, responseRunUIDs []string) { + suite.Require().Equal(len(expectedRunUIDs), len(responseRunUIDs)) + for _, runUID := range responseRunUIDs { + suite.Require().Contains(expectedRunUIDs, runUID, "Expected runUID to be in the expected list") + } + } + + // list runs without any runs in progress + runsInProgress := listRunsInProgress(&log_collector.ListRunsRequest{}) + suite.Require().Empty(runsInProgress, "Expected no runs in progress") + + // create log items in progress + projectNum := 5 + logItemsNum := 5 + var projectToRuns = map[string][]string{} + suite.createLogItems(projectNum, logItemsNum, projectToRuns) + defer func() { + // remove projects from state manifest and current state when test is done to avoid conflicts with other tests + for project := range projectToRuns { + err := suite.logCollectorServer.stateManifest.RemoveProject(project) + suite.Assert().NoError(err, "Failed to remove project from state manifest") + + err = suite.logCollectorServer.currentState.RemoveProject(project) + suite.Assert().NoError(err, "Failed to remove project from current state") + } + }() + + var expectedRunUIDs []string + for _, runs := range projectToRuns { + expectedRunUIDs = append(expectedRunUIDs, runs...) + } + + // list runs in progress for all projects + runsInProgress = listRunsInProgress(&log_collector.ListRunsRequest{}) + verifyRuns(expectedRunUIDs, runsInProgress) + + // list runs in progress for a specific project + projectName := "project-1" + expectedRunUIDs = projectToRuns[projectName] + runsInProgress = listRunsInProgress(&log_collector.ListRunsRequest{Project: projectName}) + verifyRuns(expectedRunUIDs, runsInProgress) +} + +// createLogItems creates `logItemsNum` log items for `projectNum` projects, and adds them to the server's states +func (suite *LogCollectorTestSuite) createLogItems(projectNum int, logItemsNum int, projectToRuns map[string][]string) { + var err error + for i := 0; i < projectNum; i++ { + projectName := fmt.Sprintf("project-%d", i) + + // add log item to the server's states, so no error will be returned + for j := 0; j < logItemsNum; j++ { + runUID := uuid.New().String() + projectToRuns[projectName] = append(projectToRuns[projectName], runUID) + selector := fmt.Sprintf("run=%s", runUID) + + // Add state to the log collector's state manifest + err = suite.logCollectorServer.stateManifest.AddLogItem(suite.ctx, runUID, selector, projectName) + suite.Require().NoError(err, "Failed to add log item to the state manifest") + + // Add state to the log collector's current state + err = suite.logCollectorServer.currentState.AddLogItem(suite.ctx, runUID, selector, projectName) + suite.Require().NoError(err, "Failed to add log item to the current state") + } + } +} + func TestLogCollectorTestSuite(t *testing.T) { suite.Run(t, new(LogCollectorTestSuite)) } diff --git a/server/log-collector/pkg/services/logcollector/server.go b/server/log-collector/pkg/services/logcollector/server.go index 376012901e7..50065b4cf47 100644 --- a/server/log-collector/pkg/services/logcollector/server.go +++ b/server/log-collector/pkg/services/logcollector/server.go @@ -37,6 +37,7 @@ import ( "github.com/nuclio/errors" "github.com/nuclio/logger" + "github.com/samber/lo" "golang.org/x/sync/errgroup" "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -71,6 +72,8 @@ type Server struct { // interval durations readLogWaitTime time.Duration monitoringInterval time.Duration + + listRunsChunkSize int } // NewLogCollectorServer creates a new log collector server @@ -87,7 +90,8 @@ func NewLogCollectorServer(logger logger.Logger, logCollectionBufferSizeBytes, getLogsBufferSizeBytes, logTimeUpdateBytesInterval, - advancedLogLevel int) (*Server, error) { + advancedLogLevel, + listRunsChunkSize int) (*Server, error) { abstractServer, err := framework.NewAbstractMlrunGRPCServer(logger, nil) if err != nil { return nil, errors.Wrap(err, "Failed to create abstract server") @@ -145,6 +149,10 @@ func NewLogCollectorServer(logger logger.Logger, logCollectionBufferPool := bufferpool.NewSizedBytePool(logCollectionBufferPoolSize, logCollectionBufferSizeBytes) getLogsBufferPool := bufferpool.NewSizedBytePool(getLogsBufferPoolSize, getLogsBufferSizeBytes) + if listRunsChunkSize <= 0 { + listRunsChunkSize = common.DefaultListRunsChunkSize + } + return &Server{ AbstractMlrunGRPCServer: abstractServer, namespace: namespace, @@ -163,6 +171,7 @@ func NewLogCollectorServer(logger logger.Logger, startLogsFindingPodsInterval: 3 * time.Second, startLogsFindingPodsTimeout: 15 * time.Second, advancedLogLevel: advancedLogLevel, + listRunsChunkSize: listRunsChunkSize, }, nil } @@ -634,6 +643,82 @@ func (s *Server) DeleteLogs(ctx context.Context, request *protologcollector.Stop return s.successfulBaseResponse(), nil } +// ListRunsInProgress returns a list of runs that are currently being collected +func (s *Server) ListRunsInProgress(request *protologcollector.ListRunsRequest, responseStream protologcollector.LogCollector_ListRunsInProgressServer) error { + ctx := responseStream.Context() + + s.Logger.DebugWithCtx(ctx, + "Received list runs in progress request", + "project", request.Project) + + // get all runs in progress from the state manifest + logItemsInProgress, err := s.stateManifest.GetItemsInProgress() + if err != nil { + message := "Failed to list runs in progress from state manifest" + s.Logger.ErrorWithCtx(ctx, message) + return errors.Wrap(err, message) + } + + runsInProgress, err := s.getRunUIDsInProgress(ctx, logItemsInProgress, request.Project) + if err != nil { + message := "Failed to list runs in progress" + s.Logger.ErrorWithCtx(ctx, message) + return errors.Wrap(err, message) + + } + + // get all runs in progress from the current state, and add merge them with the runs from the state manifest + // this can only happen if some voodoo occurred after the server restarted + logItemsInProgressCurrentState, err := s.currentState.GetItemsInProgress() + if err != nil { + message := "Failed to get ms in progress from current state" + s.Logger.ErrorWithCtx(ctx, message) + return errors.Wrap(err, message) + } + + runsInProgressCurrentState, err := s.getRunUIDsInProgress(ctx, logItemsInProgressCurrentState, request.Project) + if err != nil { + message := "Failed to list runs in progress from current state" + s.Logger.ErrorWithCtx(ctx, message) + return errors.Wrap(err, message) + + } + + // merge the two maps + for _, runUID := range runsInProgressCurrentState { + if !lo.Contains[string](runsInProgress, runUID) { + runsInProgress = append(runsInProgress, runUID) + } + } + + // send empty response if no runs are in progress + if len(runsInProgress) == 0 { + s.Logger.DebugWithCtx(ctx, "No runs in progress to list") + if err := responseStream.Send(&protologcollector.ListRunsResponse{ + RunUIDs: []string{}, + }); err != nil { + return errors.Wrapf(err, "Failed to send empty response to stream") + } + return nil + } + + // send each run in progress to the stream in chunks of 10 due to gRPC message size limit + for i := 0; i < len(runsInProgress); i += s.listRunsChunkSize { + endIndex := i + s.listRunsChunkSize + if endIndex > len(runsInProgress) { + endIndex = len(runsInProgress) + } + + if err := responseStream.Send(&protologcollector.ListRunsResponse{ + RunUIDs: runsInProgress[i:endIndex], + }); err != nil { + return errors.Wrapf(err, "Failed to send runs in progress to stream") + } + } + + return nil +} + // startLogStreaming streams logs from a pod and writes them into a file func (s *Server) startLogStreaming(ctx context.Context, runUID, @@ -1212,3 +1297,24 @@ func (s *Server) deleteProjectLogs(project string) error { } return nil } + +func (s *Server) getRunUIDsInProgress(ctx context.Context, inProgressMap *sync.Map, project string) ([]string, error) { + var runUIDs []string + + inProgressMap.Range(func(projectKey, runUIDsToLogItemsValue interface{}) bool { + // if a project was provided, only return runUIDs for that project + if project != "" && project != projectKey { + return true + } + + runUIDsToLogItems := runUIDsToLogItemsValue.(*sync.Map) + runUIDsToLogItems.Range(func(key, value interface{}) bool { + runUID := key.(string) + runUIDs = append(runUIDs, runUID) + return true + }) + return true + }) + + return runUIDs, nil +} diff --git a/server/log-collector/pkg/services/logcollector/test/logcollector_test.go b/server/log-collector/pkg/services/logcollector/test/logcollector_test.go index 73fb25ab57f..ea2fb4b29ff 100644 --- a/server/log-collector/pkg/services/logcollector/test/logcollector_test.go +++ b/server/log-collector/pkg/services/logcollector/test/logcollector_test.go @@ -98,7 +98,8 @@ func (suite *LogCollectorTestSuite) SetupSuite() { suite.bufferSizeBytes, /* logCollectionBufferSizeBytes */ suite.bufferSizeBytes, /* getLogsBufferSizeBytes */ common.LogTimeUpdateBytesInterval, - 0) /* advancedLogLevel */ + 0, /* advancedLogLevel */ + 10) /* listRunsChunkSize */ suite.Require().NoError(err, "Failed to create log collector server") // start log collector server in a goroutine, so it won't block the test diff --git a/server/log-collector/pkg/services/logcollector/test/nop/nop.go b/server/log-collector/pkg/services/logcollector/test/nop/nop.go index 2816f61109e..f1ef8143339 100644 --- a/server/log-collector/pkg/services/logcollector/test/nop/nop.go +++ b/server/log-collector/pkg/services/logcollector/test/nop/nop.go @@ -36,3 +36,18 @@ func (m *GetLogsResponseStreamNop) Send(response *log_collector.GetLogsResponse) func (m *GetLogsResponseStreamNop) Context() context.Context { return context.Background() } + +// ListRunsResponseStreamNop is a nop implementation of the protologcollector.LogCollector_ListRunsServer interface +type ListRunsResponseStreamNop struct { + grpc.ServerStream + RunUIDs []string +} + +func (m *ListRunsResponseStreamNop) Send(response *log_collector.ListRunsResponse) error { + m.RunUIDs = append(m.RunUIDs, response.RunUIDs...) + return nil +} + +func (m *ListRunsResponseStreamNop) Context() context.Context { + return context.Background() +} diff --git a/server/log-collector/proto/log_collector.proto b/server/log-collector/proto/log_collector.proto index 341bb423070..35726b49bfe 100644 --- a/server/log-collector/proto/log_collector.proto +++ b/server/log-collector/proto/log_collector.proto @@ -24,6 +24,7 @@ service LogCollector { rpc GetLogSize(GetLogSizeRequest) returns (GetLogSizeResponse) {} rpc StopLogs(StopLogsRequest) returns (BaseResponse) {} rpc DeleteLogs(StopLogsRequest) returns (BaseResponse) {} + rpc ListRunsInProgress(ListRunsRequest) returns (stream ListRunsResponse) {} } message BaseResponse { @@ -71,6 +72,14 @@ message StopLogsRequest { repeated string runUIDs = 2; } +message ListRunsRequest { + string project = 1; +} + +message ListRunsResponse { + repeated string runUIDs = 1; +} + // StringArray is a wrapper around a repeated string field, used in map values. message StringArray { repeated string values = 1; diff --git a/tests/api/test_collect_runs_logs.py b/tests/api/test_collect_runs_logs.py index d548e4b0638..8df59af501a 100644 --- a/tests/api/test_collect_runs_logs.py +++ b/tests/api/test_collect_runs_logs.py @@ -25,7 +25,10 @@ import server.api.main import server.api.utils.clients.log_collector import server.api.utils.singletons.db -from tests.api.utils.clients.test_log_collector import BaseLogCollectorResponse +from tests.api.utils.clients.test_log_collector import ( + BaseLogCollectorResponse, + ListRunsResponse, +) class TestCollectRunSLogs: @@ -482,6 +485,9 @@ async def test_verify_stop_logs_on_startup( run_uids = [run_uid for run_uid, _ in run_uids_to_state] + # the first run is not currently being log collected + run_uids_log_collected = run_uids[1:] + # update requested logs field to True server.api.utils.singletons.db.get_db().update_runs_requested_logs( db, run_uids, True @@ -492,24 +498,29 @@ async def test_verify_stop_logs_on_startup( requested_logs_modes=[True], only_uids=False, ) - assert len(runs) == 5 + assert len(runs) == len(run_uids) log_collector._call = unittest.mock.AsyncMock(return_value=None) + log_collector._call_stream = unittest.mock.MagicMock( + return_value=ListRunsResponse(run_uids=run_uids_log_collected) + ) await server.api.main._verify_log_collection_stopped_on_startup() + assert log_collector._call_stream.call_count == 1 + assert log_collector._call_stream.call_args[0][0] == "ListRunsInProgress" assert log_collector._call.call_count == 1 assert log_collector._call.call_args[0][0] == "StopLogs" stop_log_request = log_collector._call.call_args[0][1] assert stop_log_request.project == project_name # one of the runs is in running state - run_uids = run_uids[: len(run_uids) - 1] - assert len(stop_log_request.runUIDs) == len(run_uids) + expected_run_uids = run_uids_log_collected[:-1] + assert len(stop_log_request.runUIDs) == len(expected_run_uids) assert ( deepdiff.DeepDiff( list(stop_log_request.runUIDs), - run_uids, + expected_run_uids, ignore_order=True, ) == {} @@ -517,7 +528,7 @@ async def test_verify_stop_logs_on_startup( # update requested logs field to False for one run server.api.utils.singletons.db.get_db().update_runs_requested_logs( - db, [run_uids[0]], False + db, [run_uids[1]], False ) runs = server.api.utils.singletons.db.get_db().list_distinct_runs_uids( @@ -527,17 +538,26 @@ async def test_verify_stop_logs_on_startup( ) assert len(runs) == 4 + # mock it again so the stream will run again + log_collector._call_stream = unittest.mock.MagicMock( + return_value=ListRunsResponse(run_uids=run_uids_log_collected) + ) + await server.api.main._verify_log_collection_stopped_on_startup() assert log_collector._call.call_count == 2 assert log_collector._call.call_args[0][0] == "StopLogs" stop_log_request = log_collector._call.call_args[0][1] assert stop_log_request.project == project_name - assert len(stop_log_request.runUIDs) == 3 + assert len(stop_log_request.runUIDs) == 2 + + # the first run is not currently being log collected, second run has requested logs set to False + # and the last run is in running state + expected_run_uids = run_uids_log_collected[1:-1] assert ( deepdiff.DeepDiff( list(stop_log_request.runUIDs), - run_uids[1:], + expected_run_uids, ignore_order=True, ) == {} diff --git a/tests/api/utils/clients/test_log_collector.py b/tests/api/utils/clients/test_log_collector.py index 2499fadb284..04279f69a77 100644 --- a/tests/api/utils/clients/test_log_collector.py +++ b/tests/api/utils/clients/test_log_collector.py @@ -16,9 +16,7 @@ import unittest.mock import deepdiff -import fastapi.testclient import pytest -import sqlalchemy.orm.session import mlrun import mlrun.common.schemas @@ -66,6 +64,23 @@ def __init__(self, success, error, log_size=None): self.logSize = log_size +class ListRunsResponse: + def __init__(self, run_uids=None, total_calls=1): + self.runUIDs = run_uids or [] + self.total_calls = total_calls + self.current_calls = 0 + + # the following methods are required for the async iterator protocol + def __aiter__(self): + return self + + async def __anext__(self): + if self.current_calls < self.total_calls: + self.current_calls += 1 + return self + raise StopAsyncIteration + + mlrun.mlconf.log_collector.address = "http://localhost:8080" mlrun.mlconf.log_collector.mode = mlrun.common.schemas.LogsCollectorMode.sidecar @@ -74,8 +89,6 @@ class TestLogCollector: @pytest.mark.asyncio async def test_start_log( self, - db: sqlalchemy.orm.session.Session, - client: fastapi.testclient.TestClient, monkeypatch, ): run_uid = "123" @@ -108,9 +121,7 @@ async def test_start_log( assert success is False and error == "Failed to start logs" @pytest.mark.asyncio - async def test_get_logs( - self, db: sqlalchemy.orm.session.Session, client: fastapi.testclient.TestClient - ): + async def test_get_logs(self): run_uid = "123" project_name = "some-project" log_collector = server.api.utils.clients.log_collector.LogCollectorClient() @@ -151,9 +162,7 @@ async def test_get_logs( assert log == b"" @pytest.mark.asyncio - async def test_get_log_with_retryable_error( - self, db: sqlalchemy.orm.session.Session, client: fastapi.testclient.TestClient - ): + async def test_get_log_with_retryable_error(self): run_uid = "123" project_name = "some-project" log_collector = server.api.utils.clients.log_collector.LogCollectorClient() @@ -186,9 +195,7 @@ async def test_get_log_with_retryable_error( assert log == b"" # should not get here @pytest.mark.asyncio - async def test_stop_logs( - self, db: sqlalchemy.orm.session.Session, client: fastapi.testclient.TestClient - ): + async def test_stop_logs(self): run_uids = ["123"] project_name = "some-project" log_collector = server.api.utils.clients.log_collector.LogCollectorClient() @@ -213,9 +220,7 @@ async def test_stop_logs( await log_collector.stop_logs(run_uids=run_uids, project=project_name) @pytest.mark.asyncio - async def test_delete_logs( - self, db: sqlalchemy.orm.session.Session, client: fastapi.testclient.TestClient - ): + async def test_delete_logs(self): run_uids = None project_name = "some-project" log_collector = server.api.utils.clients.log_collector.LogCollectorClient() @@ -247,6 +252,24 @@ async def test_delete_logs( assert stop_log_request.project == project_name assert stop_log_request.runUIDs == run_uids + @pytest.mark.asyncio + async def test_list_runs_in_progress(self): + project_name = "some-project" + log_collector = server.api.utils.clients.log_collector.LogCollectorClient() + + async def _verify_runs(run_uids_stream): + async for run_uid_list in run_uids_stream: + for run_uid in run_uid_list: + assert run_uid in run_uids + + # mock a short response for ListRunsInProgress + run_uids = [f"{str(i)}" for i in range(10)] + log_collector._call_stream = unittest.mock.MagicMock( + return_value=ListRunsResponse(run_uids=run_uids) + ) + run_uids_stream = log_collector.list_runs_in_progress(project=project_name) + await _verify_runs(run_uids_stream) + @pytest.mark.parametrize( "error_code,expected_mlrun_error", [ From ca18139fb6790417d45b1d9f3a71567a9055c25f Mon Sep 17 00:00:00 2001 From: Alon Maor <48641682+alonmr@users.noreply.github.com> Date: Thu, 28 Mar 2024 14:20:33 +0200 Subject: [PATCH 084/119] [ModelObj] Filter warnings when using to_dict [1.6.x] (#5339) --- mlrun/model.py | 1 + mlrun/utils/helpers.py | 12 ++++++++++++ 2 files changed, 13 insertions(+) diff --git a/mlrun/model.py b/mlrun/model.py index d2d616c9805..fd3b04511b8 100644 --- a/mlrun/model.py +++ b/mlrun/model.py @@ -62,6 +62,7 @@ def _verify_dict(param, name, new_type=None): return new_type.from_dict(param) return param + @mlrun.utils.filter_warnings("ignore", FutureWarning) def to_dict(self, fields=None, exclude=None): """convert the object to a python dictionary diff --git a/mlrun/utils/helpers.py b/mlrun/utils/helpers.py index b5389ddbebe..48af29b417d 100644 --- a/mlrun/utils/helpers.py +++ b/mlrun/utils/helpers.py @@ -1475,6 +1475,18 @@ def as_number(field_name, field_value): def filter_warnings(action, category): + """ + Decorator to filter warnings + + Example:: + @filter_warnings("ignore", FutureWarning) + def my_function(): + pass + + :param action: one of "error", "ignore", "always", "default", "module", or "once" + :param category: a class that the warning must be a subclass of + """ + def decorator(function): def wrapper(*args, **kwargs): # context manager that copies and, upon exit, restores the warnings filter and the showwarning() function. From ef37aba31a7046a9a2a08ed2dac4860861d63604 Mon Sep 17 00:00:00 2001 From: Alon Maor <48641682+alonmr@users.noreply.github.com> Date: Thu, 28 Mar 2024 15:02:59 +0200 Subject: [PATCH 085/119] [Builder] Normalize source dir to mount path [1.6.x] (#5340) --- server/api/utils/builder.py | 1 + tests/api/utils/test_builder.py | 27 +++++++++++++++++++++------ 2 files changed, 22 insertions(+), 6 deletions(-) diff --git a/server/api/utils/builder.py b/server/api/utils/builder.py index 426f5004454..50be16a2136 100644 --- a/server/api/utils/builder.py +++ b/server/api/utils/builder.py @@ -458,6 +458,7 @@ def build_image( source = parsed_url.path to_mount = True source_dir_to_mount, source_to_copy = path.split(source) + source_dir_to_mount = path.normpath(source_dir_to_mount) # source is a path without a scheme, we allow to copy absolute paths assuming they are valid paths # in the image, however, it is recommended to use `workdir` instead in such cases diff --git a/tests/api/utils/test_builder.py b/tests/api/utils/test_builder.py index b7b96a6bc6c..6dc86ed3758 100644 --- a/tests/api/utils/test_builder.py +++ b/tests/api/utils/test_builder.py @@ -792,19 +792,26 @@ def test_builder_workdir(monkeypatch, clone_target_dir, expected_source_dir): @pytest.mark.parametrize( - "source,expectation", + "source, expectation, expected_v3io_remote", [ - ("v3io://path/some-source.tar.gz", does_not_raise()), - ("/path/some-source.tar.gz", does_not_raise()), - ("/path/some-source.zip", does_not_raise()), + ("v3io:///path/some-source.tar.gz", does_not_raise(), "/path"), + ("v3io:///path//./some-source.tar.gz", does_not_raise(), "/path"), + ( + "v3io:///path/to//blank/.././some-source.tar.gz", + does_not_raise(), + "/path/to", + ), + ("/path/some-source.tar.gz", does_not_raise(), None), + ("/path/some-source.zip", does_not_raise(), None), ( "./relative/some-source", pytest.raises(mlrun.errors.MLRunInvalidArgumentError), + None, ), - ("./", pytest.raises(mlrun.errors.MLRunInvalidArgumentError)), + ("./", pytest.raises(mlrun.errors.MLRunInvalidArgumentError), None), ], ) -def test_builder_source(monkeypatch, source, expectation): +def test_builder_source(monkeypatch, source, expectation, expected_v3io_remote): _patch_k8s_helper(monkeypatch) with unittest.mock.patch( "server.api.utils.builder.make_kaniko_pod", new=unittest.mock.MagicMock() @@ -857,6 +864,14 @@ def test_builder_source(monkeypatch, source, expectation): dockerfile_lines[expected_line_index].strip() ) + # assert v3io remote is normalized + if expected_v3io_remote: + k8s_helper_mock = server.api.utils.singletons.k8s.get_k8s_helper() + mount_v3io_args = k8s_helper_mock.create_pod.call_args[0][ + 0 + ].mount_v3io.call_args + assert mount_v3io_args[-1]["remote"] == expected_v3io_remote + @pytest.mark.parametrize( "requirements, commands, with_mlrun, mlrun_version_specifier, client_version, expected_commands, " From ca07519c50ca6724750559f84d9d9005e01c3f83 Mon Sep 17 00:00:00 2001 From: davesh0812 <85231462+davesh0812@users.noreply.github.com> Date: Sun, 31 Mar 2024 14:32:57 +0300 Subject: [PATCH 086/119] [Model Monitoring] Test and Fix the saving of the features and the prediction columns to the stream and apps parquet [1.6.X] (#5350) --- mlrun/artifacts/model.py | 50 +-- mlrun/common/schemas/__init__.py | 1 + .../schemas/model_monitoring/__init__.py | 1 + .../schemas/model_monitoring/constants.py | 26 +- mlrun/data_types/data_types.py | 4 + mlrun/model_monitoring/controller.py | 7 - mlrun/model_monitoring/stream_processing.py | 86 ++--- .../crud/model_monitoring/model_endpoints.py | 62 ++-- .../assets/custom_evidently_app.py | 37 +-- .../system/model_monitoring/assets/models.py | 56 ++++ tests/system/model_monitoring/test_app.py | 300 ++++++++++++++++-- .../model_monitoring/test_model_monitoring.py | 14 +- 12 files changed, 496 insertions(+), 148 deletions(-) create mode 100644 tests/system/model_monitoring/assets/models.py diff --git a/mlrun/artifacts/model.py b/mlrun/artifacts/model.py index 1b14fc3d3e3..a2ce53dcd9c 100644 --- a/mlrun/artifacts/model.py +++ b/mlrun/artifacts/model.py @@ -13,8 +13,9 @@ # limitations under the License. import tempfile from os import path -from typing import List +from typing import Any +import pandas as pd import yaml from deprecated import deprecated @@ -68,8 +69,8 @@ def __init__( model_file=None, metrics=None, paraemeters=None, - inputs: List[Feature] = None, - outputs: List[Feature] = None, + inputs: list[Feature] = None, + outputs: list[Feature] = None, framework=None, algorithm=None, feature_vector=None, @@ -91,8 +92,8 @@ def __init__( self.model_file = model_file self.metrics = metrics or {} self.parameters = paraemeters or {} - self.inputs: List[Feature] = inputs or [] - self.outputs: List[Feature] = outputs or [] + self.inputs: list[Feature] = inputs or [] + self.outputs: list[Feature] = outputs or [] self.framework = framework self.algorithm = algorithm self.feature_vector = feature_vector @@ -101,21 +102,21 @@ def __init__( self.model_target_file = model_target_file @property - def inputs(self) -> List[Feature]: + def inputs(self) -> list[Feature]: """input feature list""" return self._inputs @inputs.setter - def inputs(self, inputs: List[Feature]): + def inputs(self, inputs: list[Feature]): self._inputs = ObjectList.from_list(Feature, inputs) @property - def outputs(self) -> List[Feature]: + def outputs(self) -> list[Feature]: """output feature list""" return self._outputs @outputs.setter - def outputs(self, outputs: List[Feature]): + def outputs(self, outputs: list[Feature]): self._outputs = ObjectList.from_list(Feature, outputs) @@ -175,22 +176,22 @@ def spec(self, spec): self._spec = self._verify_dict(spec, "spec", ModelArtifactSpec) @property - def inputs(self) -> List[Feature]: + def inputs(self) -> list[Feature]: """input feature list""" return self.spec.inputs @inputs.setter - def inputs(self, inputs: List[Feature]): + def inputs(self, inputs: list[Feature]): """input feature list""" self.spec.inputs = inputs @property - def outputs(self) -> List[Feature]: + def outputs(self) -> list[Feature]: """input feature list""" return self.spec.outputs @outputs.setter - def outputs(self, outputs: List[Feature]): + def outputs(self, outputs: list[Feature]): """input feature list""" self.spec.outputs = outputs @@ -260,6 +261,7 @@ def infer_from_df(self, df, label_columns=None, with_stats=True, num_bins=None): """ subset = df inferer = get_infer_interface(subset) + numeric_columns = self._extract_numeric_features(df) if label_columns: if not isinstance(label_columns, list): label_columns = [label_columns] @@ -273,9 +275,13 @@ def infer_from_df(self, df, label_columns=None, with_stats=True, num_bins=None): ) if with_stats: self.spec.feature_stats = inferer.get_stats( - df, options=InferOptions.Histogram, num_bins=num_bins + df[numeric_columns], options=InferOptions.Histogram, num_bins=num_bins ) + @staticmethod + def _extract_numeric_features(df: pd.DataFrame) -> list[Any]: + return [col for col in df.columns if pd.api.types.is_numeric_dtype(df[col])] + @property def is_dir(self): return True @@ -445,8 +451,8 @@ def __init__( self.model_file = model_file self.parameters = parameters or {} self.metrics = metrics or {} - self.inputs: List[Feature] = inputs or [] - self.outputs: List[Feature] = outputs or [] + self.inputs: list[Feature] = inputs or [] + self.outputs: list[Feature] = outputs or [] self.extra_data = extra_data or {} self.framework = framework self.algorithm = algorithm @@ -456,21 +462,21 @@ def __init__( self.model_target_file = model_target_file @property - def inputs(self) -> List[Feature]: + def inputs(self) -> list[Feature]: """input feature list""" return self._inputs @inputs.setter - def inputs(self, inputs: List[Feature]): + def inputs(self, inputs: list[Feature]): self._inputs = ObjectList.from_list(Feature, inputs) @property - def outputs(self) -> List[Feature]: + def outputs(self) -> list[Feature]: """output feature list""" return self._outputs @outputs.setter - def outputs(self, outputs: List[Feature]): + def outputs(self, outputs: list[Feature]): self._outputs = ObjectList.from_list(Feature, outputs) def infer_from_df(self, df, label_columns=None, with_stats=True, num_bins=None): @@ -642,8 +648,8 @@ def update_model( parameters: dict = None, metrics: dict = None, extra_data: dict = None, - inputs: List[Feature] = None, - outputs: List[Feature] = None, + inputs: list[Feature] = None, + outputs: list[Feature] = None, feature_vector: str = None, feature_weights: list = None, key_prefix: str = "", diff --git a/mlrun/common/schemas/__init__.py b/mlrun/common/schemas/__init__.py index c8575a3fe65..66394448995 100644 --- a/mlrun/common/schemas/__init__.py +++ b/mlrun/common/schemas/__init__.py @@ -114,6 +114,7 @@ EventFieldType, EventKeyMetrics, Features, + FeatureSetFeatures, FeatureValues, GrafanaColumn, GrafanaDataPoint, diff --git a/mlrun/common/schemas/model_monitoring/__init__.py b/mlrun/common/schemas/model_monitoring/__init__.py index 07d61891bc7..abf61fb9075 100644 --- a/mlrun/common/schemas/model_monitoring/__init__.py +++ b/mlrun/common/schemas/model_monitoring/__init__.py @@ -22,6 +22,7 @@ EventFieldType, EventKeyMetrics, EventLiveStats, + FeatureSetFeatures, FileTargetKind, FunctionURI, ModelEndpointTarget, diff --git a/mlrun/common/schemas/model_monitoring/constants.py b/mlrun/common/schemas/model_monitoring/constants.py index 3566e106147..17d83adbe93 100644 --- a/mlrun/common/schemas/model_monitoring/constants.py +++ b/mlrun/common/schemas/model_monitoring/constants.py @@ -77,6 +77,26 @@ class EventFieldType: SAMPLE_PARQUET_PATH = "sample_parquet_path" +class MonitoringStrEnum(StrEnum): + @classmethod + def list(cls): + return list(map(lambda c: c.value, cls)) + + +class FeatureSetFeatures(MonitoringStrEnum): + LATENCY = EventFieldType.LATENCY + ERROR_COUNT = EventFieldType.ERROR_COUNT + METRICS = EventFieldType.METRICS + + @classmethod + def time_stamp(cls): + return EventFieldType.TIMESTAMP + + @classmethod + def entity(cls): + return EventFieldType.ENDPOINT_ID + + class ApplicationEvent: APPLICATION_NAME = "application_name" CURRENT_STATS = "current_stats" @@ -89,7 +109,7 @@ class ApplicationEvent: OUTPUT_STREAM_URI = "output_stream_uri" -class WriterEvent(StrEnum): +class WriterEvent(MonitoringStrEnum): APPLICATION_NAME = "application_name" ENDPOINT_ID = "endpoint_id" START_INFER_TIME = "start_infer_time" @@ -101,10 +121,6 @@ class WriterEvent(StrEnum): RESULT_EXTRA_DATA = "result_extra_data" CURRENT_STATS = "current_stats" - @classmethod - def list(cls): - return list(map(lambda c: c.value, cls)) - class EventLiveStats: LATENCY_AVG_5M = "latency_avg_5m" diff --git a/mlrun/data_types/data_types.py b/mlrun/data_types/data_types.py index 5ff3f747d90..79a41b043f8 100644 --- a/mlrun/data_types/data_types.py +++ b/mlrun/data_types/data_types.py @@ -41,6 +41,7 @@ class ValueType(str, Enum): BYTES = "bytes" STRING = "str" DATETIME = "datetime" + LIST = "List" BYTES_LIST = "List[bytes]" STRING_LIST = "List[string]" INT32_LIST = "List[int32]" @@ -48,6 +49,7 @@ class ValueType(str, Enum): DOUBLE_LIST = "List[float]" FLOAT_LIST = "List[float32]" BOOL_LIST = "List[bool]" + Tuple = "Tuple" def pd_schema_to_value_type(value): @@ -102,6 +104,8 @@ def python_type_to_value_type(value_type): "datetime64[ns]": ValueType.INT64, "datetime64[ns, tz]": ValueType.INT64, "category": ValueType.STRING, + "list": ValueType.LIST, + "tuple": ValueType.Tuple, } if type_name in type_map: diff --git a/mlrun/model_monitoring/controller.py b/mlrun/model_monitoring/controller.py index 93be50ac035..26f5bcefd70 100644 --- a/mlrun/model_monitoring/controller.py +++ b/mlrun/model_monitoring/controller.py @@ -426,13 +426,6 @@ def model_endpoint_process( m_fs = fstore.get_feature_set( endpoint[mm_constants.EventFieldType.FEATURE_SET_URI] ) - labels = endpoint[mm_constants.EventFieldType.LABEL_NAMES] - if labels: - if isinstance(labels, str): - labels = json.loads(labels) - for label in labels: - if label not in list(m_fs.spec.features.keys()): - m_fs.add_feature(fstore.Feature(name=label, value_type="float")) for application in applications_names: batch_window = batch_window_generator.get_batch_window( diff --git a/mlrun/model_monitoring/stream_processing.py b/mlrun/model_monitoring/stream_processing.py index 46b48d0ed2f..090276bf745 100644 --- a/mlrun/model_monitoring/stream_processing.py +++ b/mlrun/model_monitoring/stream_processing.py @@ -24,6 +24,7 @@ import mlrun.common.model_monitoring.helpers import mlrun.config import mlrun.datastore.targets +import mlrun.feature_store as fstore import mlrun.feature_store.steps import mlrun.model_monitoring.prometheus import mlrun.serving.states @@ -49,7 +50,7 @@ def __init__( parquet_batching_timeout_secs: int, parquet_target: str, sample_window: int = 10, - aggregate_windows: typing.Optional[typing.List[str]] = None, + aggregate_windows: typing.Optional[list[str]] = None, aggregate_period: str = "30s", model_monitoring_access_key: str = None, ): @@ -587,6 +588,8 @@ def do(self, event): for key in [ EventFieldType.FEATURES, EventFieldType.NAMED_FEATURES, + EventFieldType.PREDICTION, + EventFieldType.NAMED_PREDICTIONS, ]: event.pop(key, None) @@ -629,14 +632,14 @@ def __init__( self.project: str = project # First and last requests timestamps (value) of each endpoint (key) - self.first_request: typing.Dict[str, str] = dict() - self.last_request: typing.Dict[str, str] = dict() + self.first_request: dict[str, str] = dict() + self.last_request: dict[str, str] = dict() # Number of errors (value) per endpoint (key) - self.error_count: typing.Dict[str, int] = collections.defaultdict(int) + self.error_count: dict[str, int] = collections.defaultdict(int) # Set of endpoints in the current events - self.endpoints: typing.Set[str] = set() + self.endpoints: set[str] = set() def do(self, full_event): event = full_event.body @@ -745,18 +748,12 @@ def do(self, full_event): # in list of events. This list will be used as the body for the storey event. events = [] for i, (feature, prediction) in enumerate(zip(features, predictions)): - # Validate that inputs are based on numeric values - if not self.is_valid( - endpoint_id, - self.is_list_of_numerics, - feature, - ["request", "inputs", f"[{i}]"], - ): - return None - if not isinstance(prediction, list): prediction = [prediction] + if not isinstance(feature, list): + feature = [feature] + events.append( { EventFieldType.FUNCTION_URI: function_uri, @@ -803,18 +800,6 @@ def _validate_last_request_timestamp(self, endpoint_id: str, timestamp: str): f"{self.last_request[endpoint_id]} - write to TSDB will be rejected" ) - @staticmethod - def is_list_of_numerics( - field: typing.List[typing.Union[int, float, dict, list]], - dict_path: typing.List[str], - ): - if all(isinstance(x, int) or isinstance(x, float) for x in field): - return True - logger.error( - f"List does not consist of only numeric values: {field} [Event -> {','.join(dict_path)}]" - ) - return False - def resume_state(self, endpoint_id): # Make sure process is resumable, if process fails for any reason, be able to pick things up close to where we # left them @@ -849,7 +834,7 @@ def is_valid( endpoint_id: str, validation_function, field: typing.Any, - dict_path: typing.List[str], + dict_path: list[str], ): if validation_function(field, dict_path): return True @@ -857,7 +842,7 @@ def is_valid( return False -def is_not_none(field: typing.Any, dict_path: typing.List[str]): +def is_not_none(field: typing.Any, dict_path: list[str]): if field is not None: return True logger.error( @@ -946,9 +931,11 @@ def _infer_label_columns_from_data(self, event): return self.label_columns[endpoint_id] return None - def do(self, event: typing.Dict): + def do(self, event: dict): endpoint_id = event[EventFieldType.ENDPOINT_ID] + feature_values = event[EventFieldType.FEATURES] + label_values = event[EventFieldType.PREDICTION] # Get feature names and label columns if endpoint_id not in self.feature_names: endpoint_record = get_endpoint_record( @@ -984,6 +971,12 @@ def do(self, event: typing.Dict): }, ) + update_monitoring_feature_set( + endpoint_record=endpoint_record, + feature_names=feature_names, + feature_values=feature_values, + ) + # Similar process with label columns if not label_columns and self._infer_columns_from_data: label_columns = self._infer_label_columns_from_data(event) @@ -1002,6 +995,11 @@ def do(self, event: typing.Dict): endpoint_id=endpoint_id, attributes={EventFieldType.LABEL_NAMES: json.dumps(label_columns)}, ) + update_monitoring_feature_set( + endpoint_record=endpoint_record, + feature_names=label_columns, + feature_values=label_values, + ) self.label_columns[endpoint_id] = label_columns self.feature_names[endpoint_id] = feature_names @@ -1019,7 +1017,6 @@ def do(self, event: typing.Dict): # Add feature_name:value pairs along with a mapping dictionary of all of these pairs feature_names = self.feature_names[endpoint_id] - feature_values = event[EventFieldType.FEATURES] self._map_dictionary_values( event=event, named_iters=feature_names, @@ -1029,7 +1026,6 @@ def do(self, event: typing.Dict): # Add label_name:value pairs along with a mapping dictionary of all of these pairs label_names = self.label_columns[endpoint_id] - label_values = event[EventFieldType.PREDICTION] self._map_dictionary_values( event=event, named_iters=label_names, @@ -1045,9 +1041,9 @@ def do(self, event: typing.Dict): @staticmethod def _map_dictionary_values( - event: typing.Dict, - named_iters: typing.List, - values_iters: typing.List, + event: dict, + named_iters: list, + values_iters: list, mapping_dictionary: str, ): """Adding name-value pairs to event dictionary based on two provided lists of names and values. These pairs @@ -1082,7 +1078,7 @@ def __init__(self, project: str, model_endpoint_store_target: str, **kwargs): self.project = project self.model_endpoint_store_target = model_endpoint_store_target - def do(self, event: typing.Dict): + def do(self, event: dict): update_endpoint_record( project=self.project, endpoint_id=event.pop(EventFieldType.ENDPOINT_ID), @@ -1117,7 +1113,7 @@ def __init__( self.table = table self.keys = set() - def do(self, event: typing.Dict): + def do(self, event: dict): key_set = set(event.keys()) if not key_set.issubset(self.keys): self.keys.update(key_set) @@ -1241,3 +1237,21 @@ def get_endpoint_record(project: str, endpoint_id: str): project=project, ) return model_endpoint_store.get_model_endpoint(endpoint_id=endpoint_id) + + +def update_monitoring_feature_set( + endpoint_record: dict[str, typing.Any], + feature_names: list[str], + feature_values: list[typing.Any], +): + monitoring_feature_set = fstore.get_feature_set( + endpoint_record[ + mlrun.common.schemas.model_monitoring.EventFieldType.FEATURE_SET_URI + ] + ) + for name, val in zip(feature_names, feature_values): + monitoring_feature_set.add_feature( + fstore.Feature(name=name, value_type=type(val)) + ) + + monitoring_feature_set.save() diff --git a/server/api/crud/model_monitoring/model_endpoints.py b/server/api/crud/model_monitoring/model_endpoints.py index fcd05a7493a..ca77c01b5cd 100644 --- a/server/api/crud/model_monitoring/model_endpoints.py +++ b/server/api/crud/model_monitoring/model_endpoints.py @@ -102,17 +102,19 @@ def create_model_endpoint( if not model_endpoint.spec.algorithm and model_obj.spec.algorithm: model_endpoint.spec.algorithm = model_obj.spec.algorithm + features = cls._get_features( + model=model_obj, + run_db=run_db, + project=model_endpoint.metadata.project, + ) + model_endpoint.spec.feature_names = [feature.name for feature in features] # Create monitoring feature set if monitoring found in model endpoint object if ( model_endpoint.spec.monitoring_mode == mlrun.common.schemas.model_monitoring.ModelMonitoringMode.enabled.value ): monitoring_feature_set = cls.create_monitoring_feature_set( - features=cls._get_features( - model=model_obj, - run_db=run_db, - project=model_endpoint.metadata.project, - ), + features=features, model_endpoint=model_endpoint, db_session=db_session, ) @@ -126,25 +128,19 @@ def create_model_endpoint( # sure to keep a clean version of the names if model_endpoint.status.feature_stats: logger.info("Feature stats found, cleaning feature names") - if model_endpoint.spec.feature_names: - # Validate that the length of feature_stats is equal to the length of feature_names and label_names - cls._validate_length_features_and_labels(model_endpoint=model_endpoint) - # Clean feature names in both feature_stats and feature_names - ( - model_endpoint.status.feature_stats, - model_endpoint.spec.feature_names, - ) = cls._adjust_feature_names_and_stats(model_endpoint=model_endpoint) + model_endpoint.status.feature_stats = cls._adjust_stats( + model_endpoint=model_endpoint + ) logger.info( - "Done preparing feature names and stats", + "Done preparing stats", feature_names=model_endpoint.spec.feature_names, ) # If none of the above was supplied, feature names will be assigned on first contact with the model monitoring # system logger.info("Creating model endpoint", endpoint_id=model_endpoint.metadata.uid) - # Write the new model endpoint model_endpoint_store = get_model_endpoint_store( project=model_endpoint.metadata.project, @@ -250,6 +246,9 @@ def create_monitoring_feature_set( :return: Feature set object for the monitoring of the current model endpoint. """ + # append general features + for feature in mlrun.common.schemas.model_monitoring.FeatureSetFeatures.list(): + features.append(mlrun.feature_store.Feature(name=feature)) # Define a new feature set ( _, @@ -264,8 +263,10 @@ def create_monitoring_feature_set( feature_set = mlrun.feature_store.FeatureSet( f"monitoring-{serving_function_name}-{model_name}", - entities=[mlrun.common.schemas.model_monitoring.EventFieldType.ENDPOINT_ID], - timestamp_key=mlrun.common.schemas.model_monitoring.EventFieldType.TIMESTAMP, + entities=[ + mlrun.common.schemas.model_monitoring.FeatureSetFeatures.entity() + ], + timestamp_key=mlrun.common.schemas.model_monitoring.FeatureSetFeatures.time_stamp(), description=f"Monitoring feature set for endpoint: {model_endpoint.spec.model}", ) # Set the run db instance with the current db session @@ -338,7 +339,7 @@ def get_model_endpoint( auth_info: mlrun.common.schemas.AuthInfo, project: str, endpoint_id: str, - metrics: typing.List[str] = None, + metrics: list[str] = None, start: str = "now-1h", end: str = "now", feature_analysis: bool = False, @@ -409,12 +410,12 @@ def list_model_endpoints( project: str, model: str = None, function: str = None, - labels: typing.List[str] = None, - metrics: typing.List[str] = None, + labels: list[str] = None, + metrics: list[str] = None, start: str = "now-1h", end: str = "now", top_level: bool = False, - uids: typing.List[str] = None, + uids: list[str] = None, ) -> mlrun.common.schemas.ModelEndpointList: """ Returns a list of `ModelEndpoint` objects, wrapped in `ModelEndpointList` object. Each `ModelEndpoint` @@ -588,20 +589,16 @@ def _validate_length_features_and_labels( ) @staticmethod - def _adjust_feature_names_and_stats( + def _adjust_stats( model_endpoint, - ) -> typing.Tuple[typing.Dict, typing.List]: + ) -> mlrun.common.model_monitoring.helpers.FeatureStats: """ - Create a clean matching version of feature names for both `feature_stats` and `feature_names`. Please note that - label names exist only in `feature_stats` and `label_names`. + Create a clean version of feature names for `feature_stats`. :param model_endpoint: An object representing the model endpoint. - :return: A tuple of: - [0] = Dictionary of feature stats with cleaned names - [1] = List of cleaned feature names + :return: A Dictionary of feature stats with cleaned names """ clean_feature_stats = {} - clean_feature_names = [] for feature, stats in model_endpoint.status.feature_stats.items(): clean_name = mlrun.feature_store.api.norm_column_name(feature) clean_feature_stats[clean_name] = stats @@ -611,14 +608,13 @@ def _adjust_feature_names_and_stats( and clean_name in model_endpoint.spec.label_names ): continue - clean_feature_names.append(clean_name) - return clean_feature_stats, clean_feature_names + return clean_feature_stats @staticmethod def _add_real_time_metrics( model_endpoint_store: mlrun.model_monitoring.ModelEndpointStore, model_endpoint_object: mlrun.common.schemas.ModelEndpoint, - metrics: typing.List[str] = None, + metrics: list[str] = None, start: str = "now-1h", end: str = "now", ) -> mlrun.common.schemas.ModelEndpoint: @@ -661,7 +657,7 @@ def _add_real_time_metrics( @staticmethod def _convert_into_model_endpoint_object( - endpoint: typing.Dict[str, typing.Any], feature_analysis: bool = False + endpoint: dict[str, typing.Any], feature_analysis: bool = False ) -> mlrun.common.schemas.ModelEndpoint: """ Create a `ModelEndpoint` object according to a provided model endpoint dictionary. diff --git a/tests/system/model_monitoring/assets/custom_evidently_app.py b/tests/system/model_monitoring/assets/custom_evidently_app.py index 8d7a81c7b0b..a8f76056e99 100644 --- a/tests/system/model_monitoring/assets/custom_evidently_app.py +++ b/tests/system/model_monitoring/assets/custom_evidently_app.py @@ -49,6 +49,7 @@ PlotType, ReportFilter, ) + from evidently.ui.type_aliases import STR_UUID from evidently.ui.workspace import Workspace _PROJECT_NAME = "Iris Monitoring" @@ -125,37 +126,37 @@ def _create_evidently_project( project.save() return project - def create_demo_project(workspace_path: str) -> tuple[Workspace, Project]: - workspace = Workspace.create(workspace_path) - project = _create_evidently_project(workspace) - return workspace, project - class CustomEvidentlyMonitoringApp(EvidentlyModelMonitoringApplicationBase): name = "evidently-app-test" - def _lazy_init(self, *args, **kwargs) -> None: - super()._lazy_init(*args, **kwargs) + def __init__( + self, + evidently_workspace_path: str, + evidently_project_id: "STR_UUID", + with_training_set: bool, + ) -> None: + super().__init__(evidently_workspace_path, evidently_project_id) self._init_evidently_project() - self._init_iris_data() + self._init_iris_data(with_training_set) - def _init_iris_data(self) -> None: + def _init_iris_data(self, with_training_set: bool) -> None: iris = load_iris() - self.columns = [ - "sepal_length_cm", - "sepal_width_cm", - "petal_length_cm", - "petal_width_cm", - ] + if with_training_set: + self.columns = [ + "sepal_length_cm", + "sepal_width_cm", + "petal_length_cm", + "petal_width_cm", + ] + else: + self.columns = [f"f{i}" for i in range(4)] self.train_set = pd.DataFrame(iris.data, columns=self.columns) def _init_evidently_project(self) -> None: if self.evidently_project is None: if isinstance(self.evidently_project_id, str): self.evidently_project_id = UUID(self.evidently_project_id) - self.context.logger.info( - "Creating evidently project", id=self.evidently_project_id - ) self.evidently_project = _create_evidently_project( self.evidently_workspace, self.evidently_project_id ) diff --git a/tests/system/model_monitoring/assets/models.py b/tests/system/model_monitoring/assets/models.py new file mode 100644 index 00000000000..969412d2507 --- /dev/null +++ b/tests/system/model_monitoring/assets/models.py @@ -0,0 +1,56 @@ +# Copyright 2024 Iguazio +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import mlrun.serving + + +class OneToOne(mlrun.serving.V2ModelServer): + """ + In this class the predict method returns one result to each input + """ + + def load(self): + pass + + def predict(self, body: dict) -> list: + inputs = body.get("inputs") + if isinstance(inputs[0], list) and len(inputs) == 600: # single image + outputs = 3 + elif isinstance(inputs[0], list) and len(inputs) == 2 and len(inputs[0]) == 600: + outputs = [2, 2] + elif isinstance(inputs[0], list) or ( + isinstance(inputs[0], str) and isinstance(inputs, list) + ): + outputs = [inp[0] for inp in inputs] + else: + outputs = inputs[0] + return outputs + + +class OneToMany(mlrun.serving.V2ModelServer): + """ + In this class the predict method returns 5 port outputs result to each input + """ + + def load(self): + pass + + def predict(self, body: dict) -> list: + inputs = body.get("inputs") + if isinstance(inputs[0], list) or ( + isinstance(inputs[0], str) and isinstance(inputs, list) + ): + outputs = [[inp[0], inp[0], 3.0, "a", 5] for inp in inputs] + else: + outputs = [inputs[0], inputs[0], 3.0, "a", 5] + return outputs diff --git a/tests/system/model_monitoring/test_app.py b/tests/system/model_monitoring/test_app.py index eb70eb2bb75..3a12fea2e9c 100644 --- a/tests/system/model_monitoring/test_app.py +++ b/tests/system/model_monitoring/test_app.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import concurrent +import concurrent.futures import json import pickle import time @@ -30,8 +32,11 @@ from sklearn.svm import SVC import mlrun +import mlrun.common.schemas.model_monitoring.constants as mm_constants import mlrun.feature_store +import mlrun.feature_store as fstore import mlrun.model_monitoring.api +from mlrun.datastore.targets import ParquetTarget from mlrun.model_monitoring import TrackingPolicy from mlrun.model_monitoring.application import ModelMonitoringApplicationBase from mlrun.model_monitoring.evidently_application import SUPPORTED_EVIDENTLY_VERSION @@ -49,7 +54,7 @@ @dataclass class _AppData: - class_: typing.Type[ModelMonitoringApplicationBase] + class_: type[ModelMonitoringApplicationBase] rel_path: str requirements: list[str] = field(default_factory=list) kwargs: dict[str, typing.Any] = field(default_factory=dict) @@ -118,7 +123,33 @@ def _test_tsdb_record(cls, ep_id: str) -> None: ), "The TSDB saved metrics are different than expected" @classmethod - def _test_v3io_records(cls, ep_id: str) -> None: + def _test_apps_parquet( + cls, ep_id: str, inputs: set[str], outputs: set[str] + ) -> None: + parquet_apps_directory = ( + mlrun.model_monitoring.helpers.get_monitoring_parquet_path( + mlrun.get_or_create_project(cls.project_name), + kind=mm_constants.FileTargetKind.APPS_PARQUET, + ) + ) + df = ParquetTarget( + path=f"{parquet_apps_directory}/key={ep_id}", + ).as_df() + + is_inputs_saved = inputs.issubset(df.columns) + assert is_inputs_saved, "Dataframe does not contain the input columns" + is_output_saved = outputs.issubset(df.columns) + assert is_output_saved, "Dataframe does not contain the output columns" + is_metadata_saved = set(mm_constants.FeatureSetFeatures.list()).issubset( + df.columns + ) + assert is_metadata_saved, "Dataframe does not contain the metadata columns" + + @classmethod + def _test_v3io_records( + cls, ep_id: str, inputs: set[str], outputs: set[str] + ) -> None: + cls._test_apps_parquet(ep_id, inputs, outputs) cls._test_kv_record(ep_id) cls._test_tsdb_record(ep_id) @@ -164,11 +195,11 @@ def custom_setup_class(cls) -> None: kwargs={ "evidently_workspace_path": cls.evidently_workspace_path, "evidently_project_id": cls.evidently_project_id, + "with_training_set": True, }, metrics={"data_drift_test"}, ), ] - cls.infer_path = f"v2/models/{cls.model_name}/infer" cls.infer_input = cls._generate_infer_input() cls.next_window_input = cls._generate_infer_input(num_events=1) @@ -193,27 +224,41 @@ def _set_and_deploy_monitoring_apps(self) -> None: ) executor.submit(fn.deploy) - def _log_model(self) -> None: + def _log_model(self, with_training_set: bool) -> tuple[set[str], set[str]]: + train_set = None dataset = load_iris() - train_set = pd.DataFrame( - dataset.data, - columns=dataset.feature_names, - ) + if with_training_set: + train_set = pd.DataFrame( + dataset.data, + columns=dataset.feature_names, + ) + inputs = { + mlrun.feature_store.api.norm_column_name(feature) + for feature in dataset.feature_names + } + else: + inputs = {f"f{i}" for i in range(len(dataset.feature_names))} + self.project.log_model( - self.model_name, + f"{self.model_name}_{with_training_set}", model_dir=str((Path(__file__).parent / "assets").absolute()), model_file="model.pkl", training_set=train_set, ) + outputs = {"p0"} + + return inputs, outputs @classmethod - def _deploy_model_serving(cls) -> mlrun.runtimes.serving.ServingRuntime: + def _deploy_model_serving( + cls, with_training_set: bool + ) -> mlrun.runtimes.serving.ServingRuntime: serving_fn = mlrun.import_function( "hub://v2_model_server", project=cls.project_name, new_name="model-serving" ) serving_fn.add_model( - cls.model_name, - model_path=f"store://models/{cls.project_name}/{cls.model_name}:latest", + f"{cls.model_name}_{with_training_set}", + model_path=f"store://models/{cls.project_name}/{cls.model_name}_{with_training_set}:latest", ) serving_fn.set_tracking(tracking_policy=TrackingPolicy()) if cls.image is not None: @@ -229,7 +274,7 @@ def _deploy_model_serving(cls) -> mlrun.runtimes.serving.ServingRuntime: return typing.cast(mlrun.runtimes.serving.ServingRuntime, serving_fn) @classmethod - def _get_model_enpoint_id(cls) -> str: + def _get_model_endpoint_id(cls) -> str: endpoints = mlrun.get_run_db().list_model_endpoints(project=cls.project_name) assert endpoints and len(endpoints) == 1 return endpoints[0].metadata.uid @@ -240,36 +285,50 @@ def _generate_infer_input(cls, num_events: typing.Optional[int] = None) -> str: num_events = cls.max_events return json.dumps({"inputs": [[0] * cls.num_features] * num_events}) - def test_app_flow(self) -> None: + @pytest.mark.parametrize("with_training_set", [True, False]) + def test_app_flow(self, with_training_set) -> None: self.project = typing.cast(mlrun.projects.MlrunProject, self.project) - self._log_model() + inputs, outputs = self._log_model(with_training_set) + + for i in range(len(self.apps_data)): + if "with_training_set" in self.apps_data[i].kwargs: + self.apps_data[i].kwargs["with_training_set"] = with_training_set with ThreadPoolExecutor() as executor: - executor.submit(self._submit_controller_and_deploy_writer) + executor.submit( + self._submit_controller_and_deploy_writer, + ) executor.submit(self._set_and_deploy_monitoring_apps) - future = executor.submit(self._deploy_model_serving) + future = executor.submit(self._deploy_model_serving, with_training_set) self.serving_fn = future.result() time.sleep(5) - self.serving_fn.invoke(self.infer_path, self.infer_input) + self.serving_fn.invoke( + f"v2/models/{self.model_name}_{with_training_set}/infer", self.infer_input + ) # mark the first window as "done" with another request time.sleep( self.app_interval_seconds + mlrun.mlconf.model_endpoint_monitoring.parquet_batching_timeout_secs + 2 ) - self.serving_fn.invoke(self.infer_path, self.next_window_input) + self.serving_fn.invoke( + f"v2/models/{self.model_name}_{with_training_set}/infer", + self.next_window_input, + ) # wait for the completed window to be processed time.sleep(1.2 * self.app_interval_seconds) - self._test_v3io_records(ep_id=self._get_model_enpoint_id()) + self._test_v3io_records( + ep_id=self._get_model_endpoint_id(), inputs=inputs, outputs=outputs + ) @TestMLRunSystem.skip_test_if_env_not_configured @pytest.mark.enterprise class TestRecordResults(TestMLRunSystem, _V3IORecordsChecker): - project_name = "test-monitoring-record-results" + project_name = "test-mm-record-results" name_prefix = "infer-monitoring" # Set image to "/mlrun:" for local testing image: typing.Optional[str] = None @@ -373,4 +432,201 @@ def test_inference_feature_set(self) -> None: time.sleep(2.4 * self.app_interval_seconds) - self._test_v3io_records(self.endpoint_id) + self._test_v3io_records( + self.endpoint_id, inputs=set(self.columns), outputs=set(self.y_name) + ) + + +@TestMLRunSystem.skip_test_if_env_not_configured +@pytest.mark.enterprise +class TestAllKindOfServing(TestMLRunSystem): + project_name = "test-mm-serving" + # Set image to "/mlrun:" for local testing + image: typing.Optional[str] = None + + @classmethod + def custom_setup_class(cls) -> None: + random_rgb_image_list = ( + np.random.randint(0, 256, (20, 30, 3), dtype=np.uint8) + .reshape(-1, 3) + .tolist() + ) + cls.models = { + "int_one_to_one": { + "name": "serving_1", + "model_name": "int_one_to_one", + "class_name": "OneToOne", + "data_point": [1, 2, 3], + "schema": ["f0", "f1", "f2", "p0"], + }, + "int_one_to_many": { + "name": "serving_2", + "model_name": "int_one_to_many", + "class_name": "OneToMany", + "data_point": [1, 2, 3], + "schema": ["f0", "f1", "f2", "p0", "p1", "p2", "p3", "p4"], + }, + "str_one_to_one": { + "name": "serving_3", + "model_name": "str_one_to_one", + "class_name": "OneToOne", + "data_point": "input_str", + "schema": ["f0", "p0"], + }, + "str_one_to_one_with_train": { + "name": "serving_4", + "model_name": "str_one_to_one_with_train", + "class_name": "OneToOne", + "data_point": "input_str", + "schema": ["str_in", "str_out"], + "training_set": pd.DataFrame( + data={"str_in": ["str_1", "str_2"], "str_out": ["str_3", "str_4"]} + ), + "label_column": "str_out", + }, + "str_one_to_many": { + "name": "serving_5", + "model_name": "str_one_to_many", + "class_name": "OneToMany", + "data_point": "input_str", + "schema": ["f0", "p0", "p1", "p2", "p3", "p4"], + }, + "img_one_to_one": { + "name": "serving_6", + "model_name": "img_one_to_one", + "class_name": "OneToOne", + "data_point": random_rgb_image_list, + "schema": [f"f{i}" for i in range(600)] + ["p0"], + }, + "int_and_str_one_to_one": { + "name": "serving_7", + "model_name": "int_and_str_one_to_one", + "class_name": "OneToOne", + "data_point": [1, "a", 3], + "schema": ["f0", "f1", "f2", "p0"], + }, + } + + def _log_model( + self, + model_name: str, + training_set: pd.DataFrame = None, + label_column: typing.Union[str, list[str]] = None, + ) -> None: + self.project.log_model( + model_name, + model_dir=str((Path(__file__).parent / "assets").absolute()), + model_file="model.pkl", + training_set=training_set, + label_column=label_column, + ) + + @classmethod + def _deploy_model_serving( + cls, name: str, model_name: str, class_name: str, **kwargs + ) -> mlrun.runtimes.serving.ServingRuntime: + serving_fn: mlrun.runtimes.serving.ServingRuntime = mlrun.code_to_function( + project=cls.project_name, + name=name, + filename=f"{str((Path(__file__).parent / 'assets').absolute())}/models.py", + kind="serving", + ) + serving_fn.add_model( + model_name, + model_path=f"store://models/{cls.project_name}/{model_name}:latest", + class_name=class_name, + ) + serving_fn.set_tracking(tracking_policy=TrackingPolicy()) + if cls.image is not None: + for attr in ( + "stream_image", + "default_batch_image", + "default_controller_image", + ): + setattr(serving_fn.spec.tracking_policy, attr, cls.image) + serving_fn.spec.image = serving_fn.spec.build.image = cls.image + + serving_fn.deploy() + return typing.cast(mlrun.runtimes.serving.ServingRuntime, serving_fn) + + def _test_endpoint(self, model_name, feature_set_uri) -> dict[str, typing.Any]: + model_dict = self.models[model_name] + serving_fn = self.project.get_function(model_dict.get("name")) + data_point = model_dict.get("data_point") + + serving_fn.invoke( + f"v2/models/{model_name}/infer", + json.dumps( + {"inputs": [data_point]}, + ), + ) + serving_fn.invoke( + f"v2/models/{model_name}/infer", + json.dumps({"inputs": [data_point, data_point]}), + ) + time.sleep( + mlrun.mlconf.model_endpoint_monitoring.parquet_batching_timeout_secs + 10 + ) + + offline_response_df = ParquetTarget( + name="temp", + path=fstore.get_feature_set(feature_set_uri).spec.targets[0].path, + ).as_df() + + is_schema_saved = set(model_dict.get("schema")).issubset( + offline_response_df.columns + ) + has_all_the_events = offline_response_df.shape[0] == 3 + + return { + "model_name": model_name, + "is_schema_saved": is_schema_saved, + "has_all_the_events": has_all_the_events, + } + + def test_all(self) -> None: + self.project.enable_model_monitoring( + base_period=1, + default_controller_image=self.image or "mlrun/mlrun", + ) + futures = [] + with ThreadPoolExecutor() as executor: + for model_name, model_dict in self.models.items(): + self._log_model( + model_name, + training_set=model_dict.get("training_set"), + label_column=model_dict.get("label_column"), + ) + future = executor.submit(self._deploy_model_serving, **model_dict) + futures.append(future) + + for future in concurrent.futures.as_completed(futures): + future.result() + + futures_2 = [] + with ThreadPoolExecutor() as executor: + self.db = mlrun.model_monitoring.get_model_endpoint_store( + project=self.project_name + ) + endpoints = self.db.list_model_endpoints() + for endpoint in endpoints: + future = executor.submit( + self._test_endpoint, + model_name=endpoint[mm_constants.EventFieldType.MODEL].split(":")[ + 0 + ], + feature_set_uri=endpoint[ + mm_constants.EventFieldType.FEATURE_SET_URI + ], + ) + futures_2.append(future) + + for future in concurrent.futures.as_completed(futures_2): + res_dict = future.result() + assert res_dict[ + "is_schema_saved" + ], f"For {res_dict['model_name']} the schema of parquet is missing columns" + + assert res_dict[ + "has_all_the_events" + ], f"For {res_dict['model_name']} Not all the events were saved" diff --git a/tests/system/model_monitoring/test_model_monitoring.py b/tests/system/model_monitoring/test_model_monitoring.py index 2cbaa2c45f4..ee31fa641bc 100644 --- a/tests/system/model_monitoring/test_model_monitoring.py +++ b/tests/system/model_monitoring/test_model_monitoring.py @@ -32,6 +32,7 @@ import mlrun.artifacts.model import mlrun.common.schemas.model_monitoring +import mlrun.common.schemas.model_monitoring.constants as mm_constants import mlrun.feature_store import mlrun.model_monitoring.api import mlrun.serving.routers @@ -1028,10 +1029,14 @@ def _test_feature_names(self) -> None: feature_set = self._get_monitoring_feature_set() features = feature_set.spec.features feature_names = [feat.name for feat in features] - assert feature_names == [ - mlrun.feature_store.api.norm_column_name(feat) - for feat in self.columns + [self.y_name] - ] + assert ( + feature_names + == [ + mlrun.feature_store.api.norm_column_name(feat) + for feat in self.columns + [self.y_name] + ] + + mm_constants.FeatureSetFeatures.list() + ) def test_inference_feature_set(self) -> None: self.project.log_model( # pyright: ignore[reportOptionalMemberAccess] @@ -1053,7 +1058,6 @@ def test_inference_feature_set(self) -> None: endpoint_id=self.endpoint_id, context=mlrun.get_or_create_ctx(name=f"{self.name_prefix}-context"), # pyright: ignore[reportGeneralTypeIssues] infer_results_df=self.infer_results_df, - trigger_monitoring_job=True, ) self._test_feature_names() From eceb276a25e5230c0c53c660a76ea0d895e972dd Mon Sep 17 00:00:00 2001 From: tomer-mamia <125267619+tomerm-iguazio@users.noreply.github.com> Date: Mon, 1 Apr 2024 12:01:27 +0300 Subject: [PATCH 087/119] [Requirements] Bump v3io to 0.6.4 - 1.6.x (#5355) (#5359) --- requirements.txt | 4 ++-- tests/test_requirements.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements.txt b/requirements.txt index adb19a6e16c..3e2870e7b16 100644 --- a/requirements.txt +++ b/requirements.txt @@ -21,7 +21,7 @@ pyyaml~=5.1 requests~=2.31 # >=0.8.6 from kfp 1.6.0 (and still up until 1.8.10) tabulate~=0.8.6 -v3io~=0.6.2 +v3io~=0.6.4 # pydantic 1.10.8 fixes a bug with literal and typing-extension 4.6.0 # https://docs.pydantic.dev/latest/changelog/#v1108-2023-05-23 pydantic~=1.10, >=1.10.8 @@ -32,7 +32,7 @@ dependency-injector~=4.41 # should be identical to gcs and s3fs. fsspec==2023.9.2 v3iofs~=0.1.17 -storey~=1.6.19 +storey~=1.6.20 inflection~=0.5.0 python-dotenv~=0.17.0 # older version of setuptools contains vulnerabilities, see `GHSA-r9hx-vwmv-q579`, so we limit to 65.5 and above diff --git a/tests/test_requirements.py b/tests/test_requirements.py index 62261335823..b267b1dd600 100644 --- a/tests/test_requirements.py +++ b/tests/test_requirements.py @@ -111,7 +111,7 @@ def test_requirement_specifiers_convention(): ignored_invalid_map = { # See comment near requirement for why we're limiting to patch changes only for all of these "aiobotocore": {">=2.5.0,<2.8"}, - "storey": {"~=1.6.19"}, + "storey": {"~=1.6.20"}, "nuclio-sdk": {">=0.5"}, "bokeh": {"~=2.4, >=2.4.2"}, # protobuf is limited just for docs From bcb163bdabcaa33cc56bf957671e8e3dd07331f9 Mon Sep 17 00:00:00 2001 From: Alon Maor <48641682+alonmr@users.noreply.github.com> Date: Mon, 1 Apr 2024 16:33:13 +0300 Subject: [PATCH 088/119] [Pipelines] Fix get workflow id timeouts and make it configurable per run [1.6.x] (#5352) --- mlrun/config.py | 5 +++-- mlrun/db/httpdb.py | 24 +++++++++++++----------- mlrun/projects/pipelines.py | 9 ++++++++- mlrun/projects/project.py | 29 ++++++++++++----------------- mlrun/run.py | 2 ++ 5 files changed, 38 insertions(+), 31 deletions(-) diff --git a/mlrun/config.py b/mlrun/config.py index a9c99ceec29..68dbbc21a79 100644 --- a/mlrun/config.py +++ b/mlrun/config.py @@ -611,8 +611,9 @@ }, "workflows": { "default_workflow_runner_name": "workflow-runner-{}", - # Default timeout seconds for retrieving workflow id after execution: - "timeouts": {"local": 120, "kfp": 30, "remote": 90}, + # Default timeout seconds for retrieving workflow id after execution + # Remote workflow timeout is the maximum between remote and the inner engine timeout + "timeouts": {"local": 120, "kfp": 60, "remote": 60 * 5}, }, "log_collector": { "address": "localhost:8282", diff --git a/mlrun/db/httpdb.py b/mlrun/db/httpdb.py index 7afe7ea0b0e..2236c0b3533 100644 --- a/mlrun/db/httpdb.py +++ b/mlrun/db/httpdb.py @@ -1611,19 +1611,21 @@ def submit_pipeline( artifact_path=None, ops=None, cleanup_ttl=None, + timeout=60, ): """Submit a KFP pipeline for execution. - :param project: The project of the pipeline - :param pipeline: Pipeline function or path to .yaml/.zip pipeline file. - :param arguments: A dictionary of arguments to pass to the pipeline. - :param experiment: A name to assign for the specific experiment. - :param run: A name for this specific run. - :param namespace: Kubernetes namespace to execute the pipeline in. - :param artifact_path: A path to artifacts used by this pipeline. - :param ops: Transformers to apply on all ops in the pipeline. - :param cleanup_ttl: pipeline cleanup ttl in secs (time to wait after workflow completion, at which point the - workflow and all its resources are deleted) + :param project: The project of the pipeline + :param pipeline: Pipeline function or path to .yaml/.zip pipeline file. + :param arguments: A dictionary of arguments to pass to the pipeline. + :param experiment: A name to assign for the specific experiment. + :param run: A name for this specific run. + :param namespace: Kubernetes namespace to execute the pipeline in. + :param artifact_path: A path to artifacts used by this pipeline. + :param ops: Transformers to apply on all ops in the pipeline. + :param cleanup_ttl: Pipeline cleanup ttl in secs (time to wait after workflow completion, at which point the + workflow and all its resources are deleted) + :param timeout: Timeout for the API call. """ if isinstance(pipeline, str): @@ -1665,7 +1667,7 @@ def submit_pipeline( "POST", f"projects/{project}/pipelines", params=params, - timeout=20, + timeout=timeout, body=data, headers=headers, ) diff --git a/mlrun/projects/pipelines.py b/mlrun/projects/pipelines.py index 429b343a75f..37d52ad0c58 100644 --- a/mlrun/projects/pipelines.py +++ b/mlrun/projects/pipelines.py @@ -608,6 +608,7 @@ def run( namespace=namespace, artifact_path=artifact_path, cleanup_ttl=workflow_spec.cleanup_ttl, + timeout=int(mlrun.mlconf.workflows.timeouts.kfp), ) # The user provided workflow code might have made changes to function specs that require cleanup @@ -865,15 +866,21 @@ def run( ) return + get_workflow_id_timeout = max( + int(mlrun.mlconf.workflows.timeouts.remote), + int(getattr(mlrun.mlconf.workflows.timeouts, inner_engine.engine)), + ) + logger.debug( "Workflow submitted, waiting for pipeline run to start", workflow_name=workflow_response.name, + get_workflow_id_timeout=get_workflow_id_timeout, ) # Getting workflow id from run: response = retry_until_successful( 1, - getattr(mlrun.mlconf.workflows.timeouts, inner_engine.engine), + get_workflow_id_timeout, logger, False, run_db.get_workflow_id, diff --git a/mlrun/projects/project.py b/mlrun/projects/project.py index 3e398bd1d12..b3b7e0ebe5d 100644 --- a/mlrun/projects/project.py +++ b/mlrun/projects/project.py @@ -2602,16 +2602,12 @@ def run( ) -> _PipelineRunStatus: """Run a workflow using kubeflow pipelines - :param name: Name of the workflow - :param workflow_path: - URL to a workflow file, if not a project workflow - :param arguments: - Kubeflow pipelines arguments (parameters) - :param artifact_path: - Target path/url for workflow artifacts, the string - '{{workflow.uid}}' will be replaced by workflow id - :param workflow_handler: - Workflow function handler (for running workflow function directly) + :param name: Name of the workflow + :param workflow_path: URL to a workflow file, if not a project workflow + :param arguments: Kubeflow pipelines arguments (parameters) + :param artifact_path: Target path/URL for workflow artifacts, the string '{{workflow.uid}}' will be + replaced by workflow id. + :param workflow_handler: Workflow function handler (for running workflow function directly) :param namespace: Kubernetes namespace if other than default :param sync: Force functions sync before run :param watch: Wait for pipeline completion @@ -2624,7 +2620,7 @@ def run( (which will be converted to the class using its `from_crontab` constructor), see this link for help: https://apscheduler.readthedocs.io/en/3.x/modules/triggers/cron.html#module-apscheduler.triggers.cron - for using the pre-defined workflow's schedule, set `schedule=True` + For using the pre-defined workflow's schedule, set `schedule=True` :param timeout: Timeout in seconds to wait for pipeline completion (watch will be activated) :param source: Source to use instead of the actual `project.spec.source` (used when engine is remote). Can be a one of: @@ -2633,12 +2629,11 @@ def run( Path can be absolute or relative to `project.spec.build.source_code_target_dir` if defined (enriched when building a project image with source, see `MlrunProject.build_image`). For other engines the source is used to validate that the code is up-to-date. - :param cleanup_ttl: - Pipeline cleanup ttl in secs (time to wait after workflow completion, at which point the - Workflow and all its resources are deleted) - :param notifications: - List of notifications to send for workflow completion - :returns: Run id + :param cleanup_ttl: Pipeline cleanup ttl in secs (time to wait after workflow completion, at which point the + workflow and all its resources are deleted) + :param notifications: List of notifications to send for workflow completion + + :returns: ~py:class:`~mlrun.projects.pipelines._PipelineRunStatus` instance """ arguments = arguments or {} diff --git a/mlrun/run.py b/mlrun/run.py index 968e5fad9e3..561997c813b 100644 --- a/mlrun/run.py +++ b/mlrun/run.py @@ -851,6 +851,7 @@ def _run_pipeline( ops=None, url=None, cleanup_ttl=None, + timeout=60, ): """remote KubeFlow pipeline execution @@ -888,6 +889,7 @@ def _run_pipeline( ops=ops, artifact_path=artifact_path, cleanup_ttl=cleanup_ttl, + timeout=timeout, ) logger.info(f"Pipeline run id={pipeline_run_id}, check UI for progress") return pipeline_run_id From ece9bde36853197a7e41c230282abcf9391ad6db Mon Sep 17 00:00:00 2001 From: Liran BG Date: Fri, 29 Mar 2024 14:02:03 +0300 Subject: [PATCH 089/119] [MLRun] Finalizing docs for 1.6.2 (#5345) --- docs/cli.md | 6 +++--- docs/install/compose.with-jupyter.yaml | 4 ++-- docs/install/compose.yaml | 4 ++-- docs/projects/ci-integration.md | 2 +- docs/runtimes/images.md | 6 +++--- hack/local/README.md | 8 ++++---- hack/local/mljupy.yaml | 2 +- hack/local/mlrun-local.yaml | 4 ++-- hack/mlrun-all.yaml | 4 ++-- hack/mlrunapi.yaml | 2 +- hack/mlrunui.yaml | 2 +- 11 files changed, 22 insertions(+), 22 deletions(-) diff --git a/docs/cli.md b/docs/cli.md index 2d6b1648b0f..7095d46110b 100644 --- a/docs/cli.md +++ b/docs/cli.md @@ -280,7 +280,7 @@ spec: image: .mlrun/func-default-remote-demo-ps-latest image_pull_policy: Always build: - base_image: mlrun/mlrun:1.6.1 + base_image: mlrun/mlrun:1.6.2 source: git://github.com/mlrun/mlrun ``` @@ -310,7 +310,7 @@ spec: image_pull_policy: Always build: commands: [] - base_image: mlrun/mlrun:1.6.1 + base_image: mlrun/mlrun:1.6.2 source: git://github.com/mlrun/ci-demo.git ``` @@ -338,7 +338,7 @@ spec: image_pull_policy: Always build: commands: [] - base_image: mlrun/mlrun:1.6.1 + base_image: mlrun/mlrun:1.6.2 ``` Next, run the following MLRun CLI command to build the function; replace the `<...>` placeholders to match your configuration: diff --git a/docs/install/compose.with-jupyter.yaml b/docs/install/compose.with-jupyter.yaml index b83aed8a449..4a85e612da7 100644 --- a/docs/install/compose.with-jupyter.yaml +++ b/docs/install/compose.with-jupyter.yaml @@ -39,7 +39,7 @@ services: - nuclio-platform-config:/etc/nuclio/config jupyter: - image: "mlrun/jupyter:${TAG:-1.6.1}" + image: "mlrun/jupyter:${TAG:-1.6.2}" ports: - "8080:8080" - "8888:8888" @@ -61,7 +61,7 @@ services: - mlrun mlrun-ui: - image: "mlrun/mlrun-ui:${TAG:-1.6.1}" + image: "mlrun/mlrun-ui:${TAG:-1.6.2}" ports: - "8060:8090" environment: diff --git a/docs/install/compose.yaml b/docs/install/compose.yaml index 44afb70c4b5..3fcface9cae 100644 --- a/docs/install/compose.yaml +++ b/docs/install/compose.yaml @@ -39,7 +39,7 @@ services: - nuclio-platform-config:/etc/nuclio/config mlrun-api: - image: "mlrun/mlrun-api:${TAG:-1.6.1}" + image: "mlrun/mlrun-api:${TAG:-1.6.2}" ports: - "8080:8080" environment: @@ -61,7 +61,7 @@ services: - mlrun mlrun-ui: - image: "mlrun/mlrun-ui:${TAG:-1.6.1}" + image: "mlrun/mlrun-ui:${TAG:-1.6.2}" ports: - "8060:8090" environment: diff --git a/docs/projects/ci-integration.md b/docs/projects/ci-integration.md index b128571d79f..a4689f0fb50 100644 --- a/docs/projects/ci-integration.md +++ b/docs/projects/ci-integration.md @@ -122,7 +122,7 @@ pipeline { } agent { docker { - image 'mlrun/mlrun:1.6.1' + image 'mlrun/mlrun:1.6.2' } } steps { diff --git a/docs/runtimes/images.md b/docs/runtimes/images.md index 8986004dcf9..e72d68c3a6e 100644 --- a/docs/runtimes/images.md +++ b/docs/runtimes/images.md @@ -100,8 +100,8 @@ This flow describes how to build the image externally, put it your private repo, ## MLRun images and external docker images There is no difference in the usage between the MLRun images and external docker images. However: -- MLRun images resolve auto tags: If you specify ```image="mlrun/mlrun"``` the API fills in the tag by the client version, e.g. changes it to `mlrun/mlrun:1.6.1`. So, if the client gets upgraded you'll automatically get a new image tag. -- Where the data node registry exists, MLRun Appends the registry prefix, so the image loads from the datanode registry. This pulls the image more quickly, and also supports air-gapped sites. When you specify an MLRun image, for example `mlrun/mlrun:1.6.1`, the actual image used is similar to `datanode-registry.iguazio-platform.app.vm/mlrun/mlrun:1.6.1`. +- MLRun images resolve auto tags: If you specify ```image="mlrun/mlrun"``` the API fills in the tag by the client version, e.g. changes it to `mlrun/mlrun:1.6.2`. So, if the client gets upgraded you'll automatically get a new image tag. +- Where the data node registry exists, MLRun Appends the registry prefix, so the image loads from the datanode registry. This pulls the image more quickly, and also supports air-gapped sites. When you specify an MLRun image, for example `mlrun/mlrun:1.6.2`, the actual image used is similar to `datanode-registry.iguazio-platform.app.vm/mlrun/mlrun:1.6.2`. These characteristics are great when you’re working in a POC or development environment. But MLRun typically upgrades packages as part of the image, and therefore the default MLRun images can break your product flow. @@ -110,5 +110,5 @@ These characteristics are great when you’re working in a POC or development en For production, **create your own images** to ensure that the image is fixed. ``` -- Pin the image tag, e.g. `image="mlrun/mlrun:1.6.1"`. This maintains the image tag at the version you specified, even when the client is upgraded. Otherwise, an upgrade of the client would also upgrade the image. (If you specify an external (not MLRun images) docker image, like python, the result is the docker/k8s default behavior, which defaults to `latest` when the tag is not provided.) +- Pin the image tag, e.g. `image="mlrun/mlrun:1.6.2"`. This maintains the image tag at the version you specified, even when the client is upgraded. Otherwise, an upgrade of the client would also upgrade the image. (If you specify an external (not MLRun images) docker image, like python, the result is the docker/k8s default behavior, which defaults to `latest` when the tag is not provided.) - Pin the versions of requirements, again to avoid breakages, e.g. `pandas==1.4.0`. (If you only specify the package name, e.g. pandas, then pip/conda (python's package managers) just pick up the latest version.) diff --git a/hack/local/README.md b/hack/local/README.md index a4d38936d44..70f72156c28 100644 --- a/hack/local/README.md +++ b/hack/local/README.md @@ -28,12 +28,12 @@ To use MLRun with your local Docker registry, run the MLRun API service, dashboa ``` SHARED_DIR=~/mlrun-data -docker pull mlrun/jupyter:1.6.1 -docker pull mlrun/mlrun-ui:1.6.1 +docker pull mlrun/jupyter:1.6.2 +docker pull mlrun/mlrun-ui:1.6.2 docker network create mlrun-network -docker run -it -p 8080:8080 -p 8888:8888 --rm -d --network mlrun-network --name jupyter -v ${SHARED_DIR}:/home/jovyan/data mlrun/jupyter:1.6.1 -docker run -it -p 4000:80 --rm -d --network mlrun-network --name mlrun-ui -e MLRUN_API_PROXY_URL=http://jupyter:8080 mlrun/mlrun-ui:1.6.1 +docker run -it -p 8080:8080 -p 8888:8888 --rm -d --network mlrun-network --name jupyter -v ${SHARED_DIR}:/home/jovyan/data mlrun/jupyter:1.6.2 +docker run -it -p 4000:80 --rm -d --network mlrun-network --name mlrun-ui -e MLRUN_API_PROXY_URL=http://jupyter:8080 mlrun/mlrun-ui:1.6.2 ``` When the execution completes — diff --git a/hack/local/mljupy.yaml b/hack/local/mljupy.yaml index 5bb48085078..528aef4258f 100644 --- a/hack/local/mljupy.yaml +++ b/hack/local/mljupy.yaml @@ -77,7 +77,7 @@ spec: spec: containers: - name: jupyter-notebook - image: mlrun/jupyter:1.6.1 + image: mlrun/jupyter:1.6.2 env: - name: MLRUN_NAMESPACE valueFrom: diff --git a/hack/local/mlrun-local.yaml b/hack/local/mlrun-local.yaml index 69bf82ff9e4..19bb31fc206 100644 --- a/hack/local/mlrun-local.yaml +++ b/hack/local/mlrun-local.yaml @@ -31,7 +31,7 @@ spec: spec: containers: - name: mlrun-api - image: mlrun/mlrun-api:1.6.1 + image: mlrun/mlrun-api:1.6.2 env: - name: MLRUN_NAMESPACE valueFrom: @@ -86,7 +86,7 @@ spec: spec: containers: - name: mlrun-ui - image: mlrun/mlrun-ui:1.6.1 + image: mlrun/mlrun-ui:1.6.2 env: - name: MLRUN_API_PROXY_URL value: http://mlrun-api:8080 diff --git a/hack/mlrun-all.yaml b/hack/mlrun-all.yaml index a5414424265..de8c9d8be9e 100644 --- a/hack/mlrun-all.yaml +++ b/hack/mlrun-all.yaml @@ -31,7 +31,7 @@ spec: spec: containers: - name: mlrun-api - image: mlrun/mlrun-api:1.6.1 + image: mlrun/mlrun-api:1.6.2 env: - name: MLRUN_NAMESPACE valueFrom: @@ -91,7 +91,7 @@ spec: spec: containers: - name: mlrun-ui - image: mlrun/mlrun-ui:1.6.1 + image: mlrun/mlrun-ui:1.6.2 env: - name: MLRUN_API_PROXY_URL value: http://mlrun-api:8080 diff --git a/hack/mlrunapi.yaml b/hack/mlrunapi.yaml index 64adbdf3b81..9da287f75fb 100644 --- a/hack/mlrunapi.yaml +++ b/hack/mlrunapi.yaml @@ -31,7 +31,7 @@ spec: spec: containers: - name: mlrun-api - image: mlrun/mlrun-api:1.6.1 + image: mlrun/mlrun-api:1.6.2 env: - name: MLRUN_HTTPDB__BUILDER__DOCKER_REGISTRY value: "default registry url e.g. index.docker.io/, if repository is not set it will default to mlrun" diff --git a/hack/mlrunui.yaml b/hack/mlrunui.yaml index f630cec533f..ac3a76a9be0 100644 --- a/hack/mlrunui.yaml +++ b/hack/mlrunui.yaml @@ -30,7 +30,7 @@ spec: spec: containers: - name: mlrun-ui - image: mlrun/mlrun-ui:1.6.1 + image: mlrun/mlrun-ui:1.6.2 env: - name: MLRUN_API_PROXY_URL value: http://mlrun-api:8080 From 4577c6fdb26edc287c7f9dd2a1eecbf1183b4652 Mon Sep 17 00:00:00 2001 From: Liran BG Date: Mon, 1 Apr 2024 13:07:16 +0300 Subject: [PATCH 090/119] [Docs] Update change log [1.6.x] (#5361) --- docs/change-log/index.md | 5 +++++ docs/index.md | 13 ++++--------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/change-log/index.md b/docs/change-log/index.md index 1b4f9461aca..ab77227ad5b 100644 --- a/docs/change-log/index.md +++ b/docs/change-log/index.md @@ -75,6 +75,10 @@ |NA|New: {ref}`project-setup`.| |NA|Improved serving function example, and new example of a serving function with Git integration. See {ref}`serving-function`. +### Breaking Changes +| ID |Description | +|---------|-----------------------------------------------------------------------------------------------------| +|ML-4741 | The default `target_dir` path of `with_source_archive` is now `/home/mlrun_code`. It was previously `/tmp`, which could be randomly deleted. If you are running a Spark job, and cloning the git repo, with mlrun <1.6.0, run `sj.with_source_archive(source=project.source, pull_at_runtime=False)`, then run: `sj.spec.image_pull_policy = "Always"`, `sj.spec.build.commands = ["mkdir -p /mlrun"]`, `sj.with_source_archive(source=project.source, pull_at_runtime=False, target_dir="/mlrun")`| ### Closed issues | ID |Description | @@ -883,6 +887,7 @@ with a drill-down to view the steps and their details. [Tech Preview] |ML-4956|A function created by SDK is initially in the "initialized" state in the UI and needs to be deployed before running it. | In **Edit**, press **Deploy** | v1.5.1 | |ML-5079|Cannot update git remote with `project.create_remote()`| NA | v1.5.1 | |ML-5204|The **Projects>Settings** does not validate label names. Errors are generated from the back end. |Use [Kubernetes limitations](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set). | v1.6.0 | +|ML-5573|The default value of feature-set ingest() infer_options is "all" (which includes Preview) and as a result, during ingest, preview is done as well. As a result, if a validator was configured for a feature, each violation causes two messages to be printed.|NA|v1.6.0| |ML-5732|When using an MLRun client previous to v1.6.0, the workflow step status might show completed when it is actually aborted.|Abort the job from the SDK instead of from the UI, or upgrade the client. |1.6.0| |ML-5776|Concurrent request to project deletion may fail thought first call would gracefully finish the flow, without experiencing any error. Other concurrent requests would not impact the project deletion flow.|NA| v1.6.0| diff --git a/docs/index.md b/docs/index.md index 9d708728262..a5fd9ce26b4 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,7 +1,7 @@ (using-mlrun)= # Using MLRun -```{div} full-width + MLRun is an open MLOps platform for quickly building and managing continuous ML applications across their lifecycle. MLRun integrates into your development and CI/CD environment and automates the delivery of production data, ML pipelines, and online applications. MLRun significantly reduces engineering efforts, time to production, and computation resources. With MLRun, you can choose any IDE on your local machine or on the cloud. MLRun breaks the silos between data, ML, software, and DevOps/MLOps teams, enabling collaboration and fast continuous improvements. @@ -12,15 +12,12 @@ This page explains how MLRun addresses the [**MLOps tasks**](#mlops-tasks), and See the supported data stores, development tools, services, platforms, etc., supported by MLRun's open architecture in **{ref}`ecosystem`**. -``` - - ## MLOps tasks -`````{div} full-width +`````{div} -````{grid} 4 +````{grid} 4 :gutter: 2 ```{grid-item-card} Project management and CI/CD automation @@ -143,8 +140,6 @@ Observability is built into the different MLRun objects (data, functions, jobs, MLRun includes the following major components: -`````{div} full-width - ````{grid} 6 :gutter: 2 @@ -206,7 +201,7 @@ MLRun includes the following major components: **{ref}`Real-time monitoring `:** Monitors data, models, resources, and production components and provides a feedback loop for exploring production data, identifying drift, alerting on anomalies or data quality issues, triggering retraining jobs, measuring business impact, etc. -````` + ```{toctree} :hidden: From adcd069f5471e204b52663b446dd0f11b5d405f8 Mon Sep 17 00:00:00 2001 From: Liran BG Date: Mon, 1 Apr 2024 13:07:38 +0300 Subject: [PATCH 091/119] [Docs] Update change log for v1.6.2 [1.6.x] (#5362) --- docs/change-log/index.md | 21 ++++++++++++++++++--- docs/conf.py | 2 +- docs/feature-store/transformations.md | 2 +- 3 files changed, 20 insertions(+), 5 deletions(-) diff --git a/docs/change-log/index.md b/docs/change-log/index.md index ab77227ad5b..2da8111e7d5 100644 --- a/docs/change-log/index.md +++ b/docs/change-log/index.md @@ -1,7 +1,7 @@ (change-log)= # Change log -- [v1.6.1](#v1-6-1-29-february-2024) | [v1.6.0](#v1-6-0-22-february-2024) +- [v1.6.2](#v1-6-2-29-march-2024) | [v1.6.1](#v1-6-1-29-february-2024) | [v1.6.0](#v1-6-0-22-february-2024) - [v1.5.2](#v1-5-2-30-november-2023) | [v1.5.1](#v1-5-1-2-november-2023) | [v1.5.0](#v1-5-0-23-october-2023) - [v1.4.1](#v1-4-1-8-august-2023) | [v1.4.0](#v1-4-0-23-july-2023) - [v1.3.4](#v1-3-4-23-august-2023) | [v1.3.3](#v1-3-3-7-jun-2023) | [v1.3.2](#v1-3-2-4-jun-2023) | [v1.3.1](#v1-3-1-18-may-2023) | [v1.3.0](#v1-3-0-22-march-2023) @@ -10,7 +10,20 @@ - [v1.0.6](#v1-0-6-16-august-2022) | [v1.0.5](#v1-0-5-11-august-2022) | [v1.0.4](#v1-0-4-13-june-2022) | [v1.0.3](#v1-0-3-7-june-2022) | [v1.0.2](#v1-0-2-19-may-2022) | [v1.0.0](#v1-0-0-22-april-2022) - [Open issues](#open-issues) - [Limitations](#limitations) -- [Deprecations](#deprecations-and-removed-code) +- [Deprecations and removed code](#deprecations-and-removed-code) + + + +## v1.6.2 (29 March 2024) + +### Closed issues +| ID |Description | +|----------|---------------------------------------------------------------------------| +|ML-5808|Fix selecting the project-owner user.| +|ML-5907|"Invite New Members" now returns the full list of users when there are 100+ users in system.| +|ML-5749, 6037|After the user removes ownership of the currently displayed project, the UI redirects to the Projects page.| +|ML-5977|The 'Members' tab in Project settings is now shown for groups with admin privileges.| + ## v1.6.1 (29 February 2024) @@ -889,7 +902,9 @@ with a drill-down to view the steps and their details. [Tech Preview] |ML-5204|The **Projects>Settings** does not validate label names. Errors are generated from the back end. |Use [Kubernetes limitations](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set). | v1.6.0 | |ML-5573|The default value of feature-set ingest() infer_options is "all" (which includes Preview) and as a result, during ingest, preview is done as well. As a result, if a validator was configured for a feature, each violation causes two messages to be printed.|NA|v1.6.0| |ML-5732|When using an MLRun client previous to v1.6.0, the workflow step status might show completed when it is actually aborted.|Abort the job from the SDK instead of from the UI, or upgrade the client. |1.6.0| -|ML-5776|Concurrent request to project deletion may fail thought first call would gracefully finish the flow, without experiencing any error. Other concurrent requests would not impact the project deletion flow.|NA| v1.6.0| +|ML-6048|UI: An admin user cannot change its role in the project. | NA |v1.6.2| +|ML-6045|UI: If a user-filter has a large number of matches, it may not display all the matching users. |Narrow your search to be sure you get all the matches.|v1.6.2| +|ML-6051|UI: After an admin user deletes itself from a project, the user stays in the Projects Members page even though it has no permissions and cannot view any aspects of the project.| NA |v1.6.2| ## Limitations diff --git a/docs/conf.py b/docs/conf.py index e2bbb10cd51..c6fa8f920c2 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -147,7 +147,7 @@ def current_version(): myst_heading_anchors = 2 myst_all_links_external = True -# These substitutions point to the relevant mlrun docs for the currect CE version +# These substitutions point to the relevant mlrun docs for the current CE version myst_substitutions = { "version": "version", "ceversion": "v1.4.0", diff --git a/docs/feature-store/transformations.md b/docs/feature-store/transformations.md index f8dd6989751..3cdf03b738d 100644 --- a/docs/feature-store/transformations.md +++ b/docs/feature-store/transformations.md @@ -215,7 +215,7 @@ steps. When implementing custom transformations, the code has to support all eng ```{admonition} Note The vast majority of MLRun's built-in transformations support all engines. The support matrix is available -[here](../serving/available-steps.html#data-transformations). +[here](../serving/available-steps.html#data-transformation-steps). ``` The following are the main differences between transformation steps executing on different engines: From e48bcf64d5c6c36239e481620674182e88911c68 Mon Sep 17 00:00:00 2001 From: Liran BG Date: Tue, 2 Apr 2024 12:12:56 +0300 Subject: [PATCH 092/119] [Version] Bump 1.6.x to 1.6.3 (#5366) --- automation/version/unstable_version_prefix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/automation/version/unstable_version_prefix b/automation/version/unstable_version_prefix index fdd3be6df54..266146b87cb 100644 --- a/automation/version/unstable_version_prefix +++ b/automation/version/unstable_version_prefix @@ -1 +1 @@ -1.6.2 +1.6.3 From a95096ef1c320ccc2f2b969e4b05d6bbeff4f723 Mon Sep 17 00:00:00 2001 From: Gal Topper Date: Mon, 8 Apr 2024 13:23:29 +0800 Subject: [PATCH 093/119] [Requirements] Tighten pydantic upperbound due to breakage upstream (#5391) --- requirements.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 3e2870e7b16..e157b1f4525 100644 --- a/requirements.txt +++ b/requirements.txt @@ -24,7 +24,9 @@ tabulate~=0.8.6 v3io~=0.6.4 # pydantic 1.10.8 fixes a bug with literal and typing-extension 4.6.0 # https://docs.pydantic.dev/latest/changelog/#v1108-2023-05-23 -pydantic~=1.10, >=1.10.8 +# TODO: loosen upperbound and remove the below comment once fixed upstream +# pydantic 1.10.15 breaks backwards compatibility due to https://github.com/pydantic/pydantic/pull/9042 +pydantic>=1.10.8, <1.10.15 mergedeep~=1.3 v3io-frames~=0.10.12 semver~=3.0 From f54975a7cda9c4ada5c560b99e188b521e661a38 Mon Sep 17 00:00:00 2001 From: moranbental <107995850+moranbental@users.noreply.github.com> Date: Mon, 8 Apr 2024 11:57:22 +0300 Subject: [PATCH 094/119] [Requirements] Tighten igz-mgmt to 0.1.0 (#5397) --- dockerfiles/mlrun-api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dockerfiles/mlrun-api/requirements.txt b/dockerfiles/mlrun-api/requirements.txt index 31c7a9dfeaf..83b39f00670 100644 --- a/dockerfiles/mlrun-api/requirements.txt +++ b/dockerfiles/mlrun-api/requirements.txt @@ -4,7 +4,7 @@ dask-kubernetes~=0.11.0 # 3.10.2 is bugged for python 3.9 apscheduler>=3.10.3,<4 objgraph~=3.6 -igz-mgmt~=0.1.0 +igz-mgmt==0.1.0 humanfriendly~=10.0 fastapi~=0.110.0 # in sqlalchemy>=2.0 there is breaking changes (such as in Table class autoload argument is removed) From c47148d3b838a96c185fbb0ee57aeb52c312fb1a Mon Sep 17 00:00:00 2001 From: davesh0812 <85231462+davesh0812@users.noreply.github.com> Date: Tue, 9 Apr 2024 10:41:02 +0300 Subject: [PATCH 095/119] [Model Monitoring] fix test_record, test_batch_drift system test [1.6.x] (#5395) --- mlrun/model_monitoring/features_drift_table.py | 6 ++++++ tests/system/model_monitoring/test_model_monitoring.py | 4 ++++ 2 files changed, 10 insertions(+) diff --git a/mlrun/model_monitoring/features_drift_table.py b/mlrun/model_monitoring/features_drift_table.py index 0c89a6915eb..8083ed244d8 100644 --- a/mlrun/model_monitoring/features_drift_table.py +++ b/mlrun/model_monitoring/features_drift_table.py @@ -19,6 +19,7 @@ from plotly.subplots import make_subplots import mlrun.common.schemas.model_monitoring +import mlrun.common.schemas.model_monitoring.constants as mm_constants # A type for representing a drift result, a tuple of the status and the drift mean: DriftResultType = Tuple[mlrun.common.schemas.model_monitoring.DriftStatus, float] @@ -112,6 +113,11 @@ def produce( :return: The full path to the html file of the plot. """ # Plot the drift table: + features = [ + feature + for feature in features + if feature not in mm_constants.FeatureSetFeatures.list() + ] figure = self._plot( features=features, sample_set_statistics=sample_set_statistics, diff --git a/tests/system/model_monitoring/test_model_monitoring.py b/tests/system/model_monitoring/test_model_monitoring.py index ee31fa641bc..dacea6076e0 100644 --- a/tests/system/model_monitoring/test_model_monitoring.py +++ b/tests/system/model_monitoring/test_model_monitoring.py @@ -774,6 +774,7 @@ class TestBatchDrift(TestMLRunSystem): """ project_name = "pr-batch-drift" + image: str = None def custom_setup(self): mlrun.runtimes.utils.global_context.set(None) @@ -842,6 +843,7 @@ def test_batch_drift(self): context=context, infer_results_df=infer_results_df, trigger_monitoring_job=True, + default_batch_image=self.image or "mlrun/mlrun", ) # Test the drift results @@ -1073,6 +1075,7 @@ class TestModelInferenceTSDBRecord(TestMLRunSystem): project_name = "infer-model-tsdb" name_prefix = "infer-model-only" + image: str = None @classmethod def custom_setup_class(cls) -> None: @@ -1134,5 +1137,6 @@ def test_record(self) -> None: trigger_monitoring_job=True, model_endpoint_name=f"{self.name_prefix}-test", context=mlrun.get_or_create_ctx(name=f"{self.name_prefix}-context"), # pyright: ignore[reportGeneralTypeIssues] + default_batch_image=self.image or "mlrun/mlrun", ) self._test_v3io_tsdb_record() From f92c61e36251c44954e73b3b0809b07df0e9833a Mon Sep 17 00:00:00 2001 From: davesh0812 <85231462+davesh0812@users.noreply.github.com> Date: Wed, 10 Apr 2024 15:42:08 +0300 Subject: [PATCH 096/119] [Model Monitoring] Fix security issue - delete pipelines access key from serving env [1.6.x] (#5412) --- mlrun/config.py | 27 ++- mlrun/model_monitoring/helpers.py | 5 +- server/api/api/endpoints/functions.py | 73 ++----- .../api/crud/model_monitoring/deployment.py | 188 ++++++++++++------ server/api/crud/model_monitoring/helpers.py | 26 ++- tests/model_monitoring/test_target_path.py | 4 +- 6 files changed, 183 insertions(+), 140 deletions(-) diff --git a/mlrun/config.py b/mlrun/config.py index 68dbbc21a79..5f9beeb4dfb 100644 --- a/mlrun/config.py +++ b/mlrun/config.py @@ -1062,7 +1062,7 @@ def get_model_monitoring_file_target_path( target: str = "online", artifact_path: str = None, application_name: str = None, - ) -> str: + ) -> typing.Union[str, list[str]]: """Get the full path from the configuration based on the provided project and kind. :param project: Project name. @@ -1078,7 +1078,8 @@ def get_model_monitoring_file_target_path( relative artifact path will be taken from the global MLRun artifact path. :param application_name: Application name, None for model_monitoring_stream. - :return: Full configured path for the provided kind. + :return: Full configured path for the provided kind. Can be either a single path + or a list of paths in the case of the online model monitoring stream path. """ if target != "offline": @@ -1099,12 +1100,22 @@ def get_model_monitoring_file_target_path( if application_name is None else f"{kind}-{application_name.lower()}", ) - return mlrun.mlconf.model_endpoint_monitoring.store_prefixes.default.format( - project=project, - kind=kind - if application_name is None - else f"{kind}-{application_name.lower()}", - ) + elif kind == "stream": # return list for mlrun<1.6.3 BC + return [ + mlrun.mlconf.model_endpoint_monitoring.store_prefixes.default.format( + project=project, + kind=kind, + ), # old stream uri (pipelines) for BC ML-6043 + mlrun.mlconf.model_endpoint_monitoring.store_prefixes.user_space.format( + project=project, + kind=kind, + ), # new stream uri (projects) + ] + else: + return mlrun.mlconf.model_endpoint_monitoring.store_prefixes.default.format( + project=project, + kind=kind, + ) # Get the current offline path from the configuration file_path = mlrun.mlconf.model_endpoint_monitoring.offline_storage_path.format( diff --git a/mlrun/model_monitoring/helpers.py b/mlrun/model_monitoring/helpers.py index 55992df9d66..e8d57debaf9 100644 --- a/mlrun/model_monitoring/helpers.py +++ b/mlrun/model_monitoring/helpers.py @@ -41,7 +41,7 @@ class _MLRunNoRunsFoundError(Exception): pass -def get_stream_path(project: str = None, application_name: str = None): +def get_stream_path(project: str = None, application_name: str = None) -> str: """ Get stream path from the project secret. If wasn't set, take it from the system configurations @@ -62,6 +62,9 @@ def get_stream_path(project: str = None, application_name: str = None): application_name=application_name, ) + if isinstance(stream_uri, list): # ML-6043 - user side gets only the new stream uri + stream_uri = stream_uri[1] + return mlrun.common.model_monitoring.helpers.parse_monitoring_stream_path( stream_uri=stream_uri, project=project, application_name=application_name ) diff --git a/server/api/api/endpoints/functions.py b/server/api/api/endpoints/functions.py index 4a7f4e31b0b..964f3ce2d41 100644 --- a/server/api/api/endpoints/functions.py +++ b/server/api/api/endpoints/functions.py @@ -793,7 +793,6 @@ def _deploy_nuclio_runtime( db_session, fn, model_monitoring_access_key, - monitoring_application, ) if monitoring_application: fn = _deploy_monitoring_application( @@ -814,7 +813,6 @@ def _deploy_serving_monitoring( db_session, fn, model_monitoring_access_key, - monitoring_application, ): try: # Handle model monitoring @@ -827,28 +825,26 @@ def _deploy_serving_monitoring( # Initialize tracking policy with default values fn.spec.tracking_policy = TrackingPolicy() - if not mlrun.mlconf.is_ce_mode(): - # create v3io stream for model_monitoring_stream - create_model_monitoring_stream( - project=fn.metadata.project, - function=fn, - monitoring_application=monitoring_application, - stream_path=server.api.crud.model_monitoring.get_stream_path( - project=fn.metadata.project, - application_name=mm_constants.MonitoringFunctionNames.STREAM, - ), - ) - # deploy model monitoring stream, model monitoring batch job, - monitoring_deploy = ( + monitoring_deployment = ( server.api.crud.model_monitoring.deployment.MonitoringDeployment() ) - monitoring_deploy.deploy_monitoring_functions( + + overwrite_stream = False + if not mlrun.mlconf.is_ce_mode(): + if not monitoring_deployment.is_monitoring_stream_has_the_new_stream_trigger( + project=fn.metadata.project, + db_session=db_session, + ): + overwrite_stream = True + + monitoring_deployment.deploy_monitoring_functions( project=fn.metadata.project, db_session=db_session, auth_info=auth_info, tracking_policy=fn.spec.tracking_policy, model_monitoring_access_key=model_monitoring_access_key, + overwrite_stream=overwrite_stream, ) except Exception as exc: @@ -863,23 +859,11 @@ def _deploy_serving_monitoring( def _deploy_monitoring_application( auth_info, fn, model_monitoring_access_key, monitoring_application ): - if not mlrun.mlconf.is_ce_mode(): - # create v3io stream for model monitoring application - create_model_monitoring_stream( - project=fn.metadata.project, - function=fn, - monitoring_application=monitoring_application, - stream_path=server.api.crud.model_monitoring.get_stream_path( - project=fn.metadata.project, - application_name=fn.metadata.name, - ), - access_key=model_monitoring_access_key, - ) # apply stream trigger to monitoring application monitoring_deploy = ( server.api.crud.model_monitoring.deployment.MonitoringDeployment() ) - fn = monitoring_deploy._apply_stream_trigger( + fn = monitoring_deploy._apply_and_create_stream_trigger( project=fn.metadata.project, function=fn, model_monitoring_access_key=model_monitoring_access_key, @@ -1086,16 +1070,13 @@ def _is_nuclio_deploy_status_changed( def create_model_monitoring_stream( project: str, - function, stream_path: str, - monitoring_application: bool = None, access_key: str = None, + stream_args: dict = None, ): if stream_path.startswith("v3io://"): import v3io.dataplane - _init_serving_function_stream_args(fn=function) - _, container, stream_path = parse_model_endpoint_store_prefix(stream_path) # TODO: How should we configure sharding here? @@ -1108,13 +1089,9 @@ def create_model_monitoring_stream( ) v3io_client = v3io.dataplane.Client( - endpoint=config.v3io_api, access_key=os.environ.get("V3IO_ACCESS_KEY") - ) - stream_args = ( - config.model_endpoint_monitoring.application_stream_args - if monitoring_application - else config.model_endpoint_monitoring.serving_stream_args + endpoint=config.v3io_api, access_key=access_key ) + response = v3io_client.stream.create( container=container, stream_path=stream_path, @@ -1126,21 +1103,3 @@ def create_model_monitoring_stream( if not (response.status_code == 400 and "ResourceInUse" in str(response.body)): response.raise_for_status([409, 204]) - - -def _init_serving_function_stream_args(fn: ServingRuntime): - logger.debug("Initializing serving function stream args") - if "stream_args" in fn.spec.parameters: - logger.debug("Adding access key to pipelines stream args") - if "access_key" not in fn.spec.parameters["stream_args"]: - logger.debug("pipelines access key added to stream args") - fn.spec.parameters["stream_args"]["access_key"] = os.environ.get( - "V3IO_ACCESS_KEY" - ) - else: - logger.debug("pipelines access key added to stream args") - fn.spec.parameters["stream_args"] = { - "access_key": os.environ.get("V3IO_ACCESS_KEY") - } - - fn.save(versioned=True) diff --git a/server/api/crud/model_monitoring/deployment.py b/server/api/crud/model_monitoring/deployment.py index b91428ed7a1..35c59bdff67 100644 --- a/server/api/crud/model_monitoring/deployment.py +++ b/server/api/crud/model_monitoring/deployment.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import os import pathlib import typing @@ -28,6 +28,7 @@ import server.api.utils.singletons.db import server.api.utils.singletons.k8s from mlrun import feature_store as fstore +from mlrun.config import config from mlrun.model_monitoring.writer import ModelMonitoringWriter from mlrun.utils import logger from server.api.api import deps @@ -78,6 +79,7 @@ def deploy_monitoring_functions( db_session: sqlalchemy.orm.Session, auth_info: mlrun.common.schemas.AuthInfo, tracking_policy: mlrun.model_monitoring.tracking_policy.TrackingPolicy, + overwrite_stream: bool = False, ): """ Invoking monitoring deploying functions. @@ -87,6 +89,8 @@ def deploy_monitoring_functions( :param db_session: A session that manages the current dialog with the database. :param auth_info: The auth info of the request. :param tracking_policy: Model monitoring configurations. + :param overwrite_stream If true, overwrite the existing model monitoring stream function. + Default is False. """ self.deploy_model_monitoring_stream_processing( project=project, @@ -94,6 +98,7 @@ def deploy_monitoring_functions( db_session=db_session, auth_info=auth_info, tracking_policy=tracking_policy, + overwrite=overwrite_stream, ) self.deploy_model_monitoring_batch_processing( project=project, @@ -112,6 +117,7 @@ def deploy_model_monitoring_stream_processing( db_session: sqlalchemy.orm.Session, auth_info: mlrun.common.schemas.AuthInfo, tracking_policy: mlrun.model_monitoring.tracking_policy.TrackingPolicy, + overwrite: bool = False, ) -> None: """ Deploying model monitoring stream real time nuclio function. The goal of this real time function is @@ -123,29 +129,41 @@ def deploy_model_monitoring_stream_processing( :param db_session: A session that manages the current dialog with the database. :param auth_info: The auth info of the request. :param tracking_policy: Model monitoring configurations. + :param overwrite: If true, overwrite the existing model monitoring stream function. + Default is False. """ - + if not overwrite: + logger.info( + "Checking if model monitoring stream is already deployed", + project=project, + ) + try: + # validate that the model monitoring stream has not yet been deployed + mlrun.runtimes.function.get_nuclio_deploy_status( + name="model-monitoring-stream", + project=project, + tag="", + auth_info=auth_info, + ) + logger.info( + "Detected model monitoring stream processing function already deployed", + project=project, + ) + return + except mlrun.errors.MLRunNotFoundError: + pass logger.info( - "Checking if model monitoring stream is already deployed", - project=project, + "Deploying model monitoring stream processing function", project=project ) - try: - # validate that the model monitoring stream has not yet been deployed - mlrun.runtimes.function.get_nuclio_deploy_status( + if overwrite and not self.is_monitoring_stream_has_the_new_stream_trigger( + project=project, db_session=db_session + ): # in case of only adding the new stream trigger + prev_stream_function = server.api.crud.Functions().get_function( name="model-monitoring-stream", + db_session=db_session, project=project, - tag="", - auth_info=auth_info, - ) - logger.info( - "Detected model monitoring stream processing function already deployed", - project=project, - ) - return - except mlrun.errors.MLRunNotFoundError: - logger.info( - "Deploying model monitoring stream processing function", project=project ) + tracking_policy.stream_image = prev_stream_function["spec"]["image"] # Get parquet target value for model monitoring stream function parquet_target = ( @@ -447,7 +465,7 @@ def _initial_model_monitoring_stream_processing_function( function.metadata.project = project # Add stream triggers - function = self._apply_stream_trigger( + function = self._apply_and_create_stream_trigger( project=project, function=function, model_monitoring_access_key=model_monitoring_access_key, @@ -622,7 +640,7 @@ def _generate_schedule_and_interval_dict( } return schedule, batch_dict - def _apply_stream_trigger( + def _apply_and_create_stream_trigger( self, project: str, function: mlrun.runtimes.ServingRuntime, @@ -647,41 +665,55 @@ def _apply_stream_trigger( """ # Get the stream path from the configuration - # stream_path = mlrun.mlconf.get_file_target_path(project=project, kind="stream", target="stream") - stream_path = server.api.crud.model_monitoring.get_stream_path( + stream_paths = server.api.crud.model_monitoring.get_stream_path( project=project, application_name=function_name ) + for i, stream_path in enumerate(stream_paths): + if stream_path.startswith("kafka://"): + topic, brokers = mlrun.datastore.utils.parse_kafka_url(url=stream_path) + # Generate Kafka stream source + stream_source = mlrun.datastore.sources.KafkaSource( + brokers=brokers, + topics=[topic], + ) + function = stream_source.add_nuclio_trigger(function) - if stream_path.startswith("kafka://"): - topic, brokers = mlrun.datastore.utils.parse_kafka_url(url=stream_path) - # Generate Kafka stream source - stream_source = mlrun.datastore.sources.KafkaSource( - brokers=brokers, - topics=[topic], - ) - function = stream_source.add_nuclio_trigger(function) - - if not mlrun.mlconf.is_ce_mode(): - function = self._apply_access_key_and_mount_function( - project=project, - function=function, - model_monitoring_access_key=model_monitoring_access_key, - auth_info=auth_info, - function_name=function_name, - ) - if stream_path.startswith("v3io://"): - kwargs = {} - if function_name != mm_constants.MonitoringFunctionNames.STREAM: - kwargs["access_key"] = model_monitoring_access_key - if mlrun.mlconf.is_explicit_ack(version=resolve_nuclio_version()): - kwargs["explicit_ack_mode"] = "explicitOnly" - kwargs["worker_allocation_mode"] = "static" - - # Generate V3IO stream trigger - function.add_v3io_stream_trigger( - stream_path=stream_path, - name=f"monitoring_{function_name or 'stream'}_trigger", - **kwargs, + if not mlrun.mlconf.is_ce_mode(): + if stream_path.startswith("v3io://"): + if "projects" in stream_path: + stream_args = ( + config.model_endpoint_monitoring.application_stream_args + ) + access_key = model_monitoring_access_key + kwargs = {"access_key": model_monitoring_access_key} + else: + stream_args = ( + config.model_endpoint_monitoring.serving_stream_args + ) + access_key = os.environ.get("V3IO_ACCESS_KEY") + kwargs = {} + if mlrun.mlconf.is_explicit_ack(version=resolve_nuclio_version()): + kwargs["explicit_ack_mode"] = "explicitOnly" + kwargs["worker_allocation_mode"] = "static" + server.api.api.endpoints.functions.create_model_monitoring_stream( + project=project, + stream_path=stream_path, + access_key=access_key, + stream_args=stream_args, + ) + # Generate V3IO stream trigger + function.add_v3io_stream_trigger( + stream_path=stream_path, + name=f"monitoring_{function_name or 'model-monitoring-stream'}" + f"_trigger{f'_{i}' if i != 0 else ''}", + **kwargs, + ) + function = self._apply_access_key_and_mount_function( + project=project, + function=function, + model_monitoring_access_key=model_monitoring_access_key, + auth_info=auth_info, + function_name=function_name, ) # Add the default HTTP source http_source = mlrun.datastore.sources.HttpSource() @@ -764,20 +796,8 @@ def _initial_model_monitoring_writer_function( # Set the project to the serving function function.metadata.project = project - # create v3io stream for model_monitoring_writer | model monitoring application - server.api.api.endpoints.functions.create_model_monitoring_stream( - project=project, - function=function, - monitoring_application=mm_constants.MonitoringFunctionNames.WRITER, - stream_path=server.api.crud.model_monitoring.get_stream_path( - project=project, - application_name=mm_constants.MonitoringFunctionNames.WRITER, - ), - access_key=model_monitoring_access_key, - ) - # Add stream triggers - function = self._apply_stream_trigger( + function = self._apply_and_create_stream_trigger( project=project, function=function, model_monitoring_access_key=model_monitoring_access_key, @@ -791,6 +811,44 @@ def _initial_model_monitoring_writer_function( return function + @staticmethod + def is_monitoring_stream_has_the_new_stream_trigger( + project: str, + db_session: sqlalchemy.orm.Session, + ) -> bool: + """ + Check if the monitoring stream function has the new stream trigger. + + :return: True if the monitoring stream function has the new stream trigger, otherwise False. + """ + + try: + function = server.api.crud.Functions().get_function( + name="model-monitoring-stream", + db_session=db_session, + project=project, + ) + except mlrun.errors.MLRunNotFoundError: + logger.info( + "The stream function is not deployed yet when the user will run `enable_model_monitoring` " + "the stream function will be deployed with the new & the old stream triggers", + project=project, + ) + return True + + if ( + function["spec"]["config"].get( + "spec.triggers.monitoring_model-monitoring-stream_trigger_1" + ) + is None + ): + logger.info( + "The stream function needs to be updated with the new stream trigger", + project=project, + ) + return False + return True + def get_endpoint_features( feature_names: typing.List[str], diff --git a/server/api/crud/model_monitoring/helpers.py b/server/api/crud/model_monitoring/helpers.py index fac9b232276..7a39562cff2 100644 --- a/server/api/crud/model_monitoring/helpers.py +++ b/server/api/crud/model_monitoring/helpers.py @@ -139,14 +139,16 @@ def get_monitoring_parquet_path( return parquet_path -def get_stream_path(project: str = None, application_name: str = None): +def get_stream_path( + project: str = None, application_name: str = None +) -> typing.Union[list[str]]: """ Get stream path from the project secret. If wasn't set, take it from the system configurations :param project: Project name. :param application_name: Application name, None for model_monitoring_stream. - :return: Monitoring stream path to the relevant application. + :return: Monitoring stream paths to the relevant application. """ stream_uri = server.api.crud.secrets.Secrets().get_project_secret( @@ -162,7 +164,19 @@ def get_stream_path(project: str = None, application_name: str = None): target="online", application_name=application_name, ) - - return mlrun.common.model_monitoring.helpers.parse_monitoring_stream_path( - stream_uri=stream_uri, project=project, application_name=application_name - ) + if isinstance( + stream_uri, list + ): # ML-6043 - server side gets the new and the old stream uris. + return [ + mlrun.common.model_monitoring.helpers.parse_monitoring_stream_path( + stream_uri=stream_uri_item, + project=project, + application_name=application_name, + ) + for stream_uri_item in stream_uri + ] + return [ + mlrun.common.model_monitoring.helpers.parse_monitoring_stream_path( + stream_uri=stream_uri, project=project, application_name=application_name + ) + ] diff --git a/tests/model_monitoring/test_target_path.py b/tests/model_monitoring/test_target_path.py index 0c03b98db36..ed3d7581b1d 100644 --- a/tests/model_monitoring/test_target_path.py +++ b/tests/model_monitoring/test_target_path.py @@ -59,9 +59,7 @@ def test_get_file_target_path(): def test_get_stream_path(): # default stream path stream_path = mlrun.model_monitoring.get_stream_path(project=TEST_PROJECT) - assert ( - stream_path == f"v3io:///users/pipelines/{TEST_PROJECT}/model-endpoints/stream" - ) + assert stream_path == f"v3io:///projects/{TEST_PROJECT}/model-endpoints/stream" mlrun.mlconf.ce.mode = "full" stream_path = mlrun.model_monitoring.get_stream_path(project=TEST_PROJECT) From a931aae0c0467328da1dd412d2ce5ef2faeb6485 Mon Sep 17 00:00:00 2001 From: davesh0812 <85231462+davesh0812@users.noreply.github.com> Date: Mon, 15 Apr 2024 12:31:43 +0300 Subject: [PATCH 097/119] [Model Monitoring] Delete pipelines `access_key` from SERVING_SPEC_ENV [1.6.x] (#5425) --- mlrun/model_monitoring/stream_processing.py | 1 - 1 file changed, 1 deletion(-) diff --git a/mlrun/model_monitoring/stream_processing.py b/mlrun/model_monitoring/stream_processing.py index 090276bf745..0a2ff24fc64 100644 --- a/mlrun/model_monitoring/stream_processing.py +++ b/mlrun/model_monitoring/stream_processing.py @@ -350,7 +350,6 @@ def apply_tsdb_target(name, after): rate="10/m", time_col=EventFieldType.TIMESTAMP, container=self.tsdb_container, - access_key=self.v3io_access_key, v3io_frames=self.v3io_framesd, infer_columns_from_data=True, index_cols=[ From 29fa3b6016d5efbd5a69843c6376e66551126d41 Mon Sep 17 00:00:00 2001 From: Laury Bueno Date: Wed, 17 Apr 2024 10:46:08 -0300 Subject: [PATCH 098/119] [Pipelines] Isolate version-specific KFP code on its own modules (#5417) --- dockerfiles/mlrun-api/Dockerfile | 4 +- dockerfiles/test/Dockerfile | 4 +- mlrun/__init__.py | 11 +- mlrun/__main__.py | 22 +- mlrun/db/httpdb.py | 13 +- mlrun/launcher/base.py | 5 +- mlrun/platforms/__init__.py | 12 - mlrun/platforms/iguazio.py | 201 ------ mlrun/projects/operations.py | 18 +- mlrun/projects/pipelines.py | 89 +-- mlrun/projects/project.py | 16 +- mlrun/run.py | 43 +- mlrun/runtimes/base.py | 6 +- mlrun/runtimes/function.py | 12 +- mlrun/runtimes/kubejob.py | 3 +- mlrun/runtimes/pod.py | 54 +- mlrun/runtimes/remotesparkjob.py | 2 +- mlrun/runtimes/sparkjob/spark3job.py | 2 +- mlrun/runtimes/utils.py | 38 -- mlrun/utils/helpers.py | 33 +- pipeline-adapters/.gitignore | 1 + .../mlrun-pipelines-kfp-common/setup.py | 42 ++ .../src/mlrun_pipelines/common/__init__.py | 14 + .../src/mlrun_pipelines/common/helpers.py | 127 ++++ .../src/mlrun_pipelines/common/models.py | 104 ++++ .../src/mlrun_pipelines/common/mounts.py | 49 ++ .../src/mlrun_pipelines/common/ops.py | 580 ++++++------------ .../mlrun-pipelines-kfp-v1-8/setup.py | 45 ++ .../src/mlrun_pipelines/helpers.py | 46 ++ .../src/mlrun_pipelines/mixins.py | 93 +++ .../src/mlrun_pipelines/models.py | 113 ++++ .../src/mlrun_pipelines/mounts.py | 382 ++++++++---- .../src/mlrun_pipelines/ops.py | 290 +++++++++ .../src/mlrun_pipelines/patcher.py | 95 +++ .../src/mlrun_pipelines/utils.py | 69 +++ pyproject.toml | 2 - requirements.txt | 6 +- server/api/api/endpoints/feature_store.py | 2 +- server/api/api/endpoints/pipelines.py | 3 +- server/api/api/endpoints/workflows.py | 5 +- .../api/crud/model_monitoring/deployment.py | 3 +- server/api/crud/pipelines.py | 109 +--- server/api/crud/projects.py | 6 +- server/api/crud/workflows.py | 4 +- tests/api/api/test_pipelines.py | 12 +- tests/api/api/test_projects.py | 9 +- tests/api/crud/test_pipelines.py | 10 +- tests/api/runtimes/test_dask.py | 2 +- tests/api/runtimes/test_kubejob.py | 2 +- tests/platforms/test_iguazio.py | 21 +- tests/platforms/test_other.py | 13 +- tests/runtimes/test_base.py | 5 +- tests/system/demos/churn/assets/workflow.py | 3 +- tests/system/demos/churn/test_churn.py | 3 +- tests/system/demos/horovod/assets/workflow.py | 3 +- tests/system/demos/horovod/test_horovod.py | 3 +- tests/system/demos/sklearn/assets/workflow.py | 3 +- tests/system/examples/dask/test_dask.py | 4 +- tests/system/examples/jobs/test_jobs.py | 2 +- .../feature_store/test_feature_store.py | 23 +- .../model_monitoring/test_model_monitoring.py | 7 +- tests/system/projects/test_project.py | 55 +- tests/system/runtimes/test_kfp.py | 3 +- tests/system/runtimes/test_nuclio.py | 3 +- tests/test_execution.py | 5 +- tests/test_kfp.py | 8 +- 66 files changed, 1828 insertions(+), 1149 deletions(-) create mode 100644 pipeline-adapters/.gitignore create mode 100644 pipeline-adapters/mlrun-pipelines-kfp-common/setup.py create mode 100644 pipeline-adapters/mlrun-pipelines-kfp-common/src/mlrun_pipelines/common/__init__.py create mode 100644 pipeline-adapters/mlrun-pipelines-kfp-common/src/mlrun_pipelines/common/helpers.py create mode 100644 pipeline-adapters/mlrun-pipelines-kfp-common/src/mlrun_pipelines/common/models.py create mode 100644 pipeline-adapters/mlrun-pipelines-kfp-common/src/mlrun_pipelines/common/mounts.py rename mlrun/kfpops.py => pipeline-adapters/mlrun-pipelines-kfp-common/src/mlrun_pipelines/common/ops.py (71%) create mode 100644 pipeline-adapters/mlrun-pipelines-kfp-v1-8/setup.py create mode 100644 pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/helpers.py create mode 100644 pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/mixins.py create mode 100644 pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/models.py rename mlrun/platforms/other.py => pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/mounts.py (65%) create mode 100644 pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/ops.py create mode 100644 pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/patcher.py create mode 100644 pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/utils.py diff --git a/dockerfiles/mlrun-api/Dockerfile b/dockerfiles/mlrun-api/Dockerfile index 639a01337bb..a208d705b47 100644 --- a/dockerfiles/mlrun-api/Dockerfile +++ b/dockerfiles/mlrun-api/Dockerfile @@ -65,7 +65,9 @@ ENV MLRUN_httpdb__dirpath=/mlrun/db ENV MLRUN_httpdb__port=8080 COPY . . -RUN python -m pip install .[complete-api] +RUN python -m pip install .[complete-api] &&\ + pip install ./pipeline-adapters/mlrun-pipelines-kfp-common &&\ + pip install ./pipeline-adapters/mlrun-pipelines-kfp-v1-8 VOLUME /mlrun/db CMD ["python", "-m", "server.api", "db"] diff --git a/dockerfiles/test/Dockerfile b/dockerfiles/test/Dockerfile index c4aabc5684c..adc8086020c 100644 --- a/dockerfiles/test/Dockerfile +++ b/dockerfiles/test/Dockerfile @@ -73,6 +73,8 @@ RUN python -m pip install \ COPY . . -RUN pip install -e .[complete] +RUN pip install -e .[complete] &&\ + pip install -e ./pipeline-adapters/mlrun-pipelines-kfp-common &&\ + pip install -e ./pipeline-adapters/mlrun-pipelines-kfp-v1-8 ENV NO_COLOR=1 diff --git a/mlrun/__init__.py b/mlrun/__init__.py index ee2711a7b33..9a198336486 100644 --- a/mlrun/__init__.py +++ b/mlrun/__init__.py @@ -22,11 +22,16 @@ "handler", "ArtifactType", "get_secret_or_env", + "mount_v3io", + "v3io_cred", + "auto_mount", + "VolumeMount", ] from os import environ, path import dotenv +import mlrun_pipelines from .config import config as mlconf from .datastore import DataItem, store_manager @@ -35,7 +40,6 @@ from .execution import MLClientCtx from .model import RunObject, RunTemplate, new_task from .package import ArtifactType, DefaultPackager, Packager, handler -from .platforms import VolumeMount, auto_mount, mount_v3io, v3io_cred from .projects import ( ProjectMetadata, build_function, @@ -65,6 +69,11 @@ __version__ = Version().get()["version"] +VolumeMount = mlrun_pipelines.common.mounts.VolumeMount +mount_v3io = mlrun_pipelines.mounts.mount_v3io +v3io_cred = mlrun_pipelines.mounts.v3io_cred +auto_mount = mlrun_pipelines.mounts.auto_mount + def get_version(): """get current mlrun version""" diff --git a/mlrun/__main__.py b/mlrun/__main__.py index ad456bc576f..c0a75248e7e 100644 --- a/mlrun/__main__.py +++ b/mlrun/__main__.py @@ -30,6 +30,7 @@ import dotenv import pandas as pd import yaml +from mlrun_pipelines.mounts import auto_mount as auto_mount_modifier from tabulate import tabulate import mlrun @@ -40,7 +41,6 @@ from .db import get_run_db from .errors import err_to_str from .model import RunTemplate -from .platforms import auto_mount as auto_mount_modifier from .projects import load_project from .run import ( get_object, @@ -469,6 +469,17 @@ def run( is_flag=True, help="ensure the project exists, if not, create project", ) +@click.option( + "--state-file-path", default="/tmp/state", help="path to file with state data" +) +@click.option( + "--image-file-path", default="/tmp/image", help="path to file with image data" +) +@click.option( + "--full-image-file-path", + default="/tmp/fullimage", + help="path to file with full image data", +) def build( func_url, name, @@ -488,6 +499,9 @@ def build( skip, env_file, ensure_project, + state_file_path, + image_file_path, + full_image_file_path, ): """Build a container image from code and requirements.""" @@ -575,12 +589,12 @@ def build( state = func.status.state image = func.spec.image if kfp: - with open("/tmp/state", "w") as fp: + with open(state_file_path, "w") as fp: fp.write(state or "none") full_image = func.full_image_path(image) or "" - with open("/tmp/image", "w") as fp: + with open(image_file_path, "w") as fp: fp.write(image) - with open("/tmp/fullimage", "w") as fp: + with open(full_image_file_path, "w") as fp: fp.write(full_image) print("Full image path = ", full_image) diff --git a/mlrun/db/httpdb.py b/mlrun/db/httpdb.py index 2236c0b3533..b43be77472d 100644 --- a/mlrun/db/httpdb.py +++ b/mlrun/db/httpdb.py @@ -14,7 +14,6 @@ import enum import http import re -import tempfile import time import traceback import typing @@ -24,9 +23,9 @@ from typing import Dict, List, Optional, Union from urllib.parse import urlparse -import kfp import requests import semver +from mlrun_pipelines.utils import compile_pipeline import mlrun import mlrun.common.schemas @@ -45,7 +44,6 @@ datetime_to_iso, dict_to_json, logger, - new_pipe_metadata, normalize_name, version, ) @@ -1631,14 +1629,11 @@ def submit_pipeline( if isinstance(pipeline, str): pipe_file = pipeline else: - pipe_file = tempfile.NamedTemporaryFile(suffix=".yaml", delete=False).name - conf = new_pipe_metadata( + pipe_file = compile_pipeline( artifact_path=artifact_path, cleanup_ttl=cleanup_ttl, - op_transformers=ops, - ) - kfp.compiler.Compiler().compile( - pipeline, pipe_file, type_check=False, pipeline_conf=conf + ops=ops, + pipeline=pipeline, ) if pipe_file.endswith(".yaml"): diff --git a/mlrun/launcher/base.py b/mlrun/launcher/base.py index 2a327172512..60b4a6168ea 100644 --- a/mlrun/launcher/base.py +++ b/mlrun/launcher/base.py @@ -18,10 +18,11 @@ import uuid from typing import Any, Callable, Dict, List, Optional, Union +import mlrun_pipelines.common.ops + import mlrun.common.schemas import mlrun.config import mlrun.errors -import mlrun.kfpops import mlrun.lists import mlrun.model import mlrun.runtimes @@ -390,7 +391,7 @@ def _wrap_run_result( return if result and runtime.kfp and err is None: - mlrun.kfpops.write_kfpmeta(result) + mlrun_pipelines.common.ops.write_kfpmeta(result) self._log_track_results(runtime.is_child, result, run) diff --git a/mlrun/platforms/__init__.py b/mlrun/platforms/__init__.py index 56e5967c40d..9ff4b22c07a 100644 --- a/mlrun/platforms/__init__.py +++ b/mlrun/platforms/__init__.py @@ -19,20 +19,8 @@ from .iguazio import ( V3ioStreamClient, - VolumeMount, add_or_refresh_credentials, is_iguazio_session_cookie, - mount_v3io, - v3io_cred, -) -from .other import ( - auto_mount, - mount_configmap, - mount_hostpath, - mount_pvc, - mount_s3, - mount_secret, - set_env_variables, ) diff --git a/mlrun/platforms/iguazio.py b/mlrun/platforms/iguazio.py index ac7116c8e48..ba9b64bf33a 100644 --- a/mlrun/platforms/iguazio.py +++ b/mlrun/platforms/iguazio.py @@ -15,217 +15,16 @@ import json import os import urllib -from collections import namedtuple from urllib.parse import urlparse -import kfp.dsl import requests -import semver import v3io import mlrun.errors -from mlrun.config import config as mlconf from mlrun.utils import dict_to_json _cached_control_session = None -VolumeMount = namedtuple("Mount", ["path", "sub_path"]) - - -def mount_v3io( - name="v3io", - remote="", - access_key="", - user="", - secret=None, - volume_mounts=None, -): - """Modifier function to apply to a Container Op to volume mount a v3io path - - :param name: the volume name - :param remote: the v3io path to use for the volume. ~/ prefix will be replaced with /users// - :param access_key: the access key used to auth against v3io. if not given V3IO_ACCESS_KEY env var will be used - :param user: the username used to auth against v3io. if not given V3IO_USERNAME env var will be used - :param secret: k8s secret name which would be used to get the username and access key to auth against v3io. - :param volume_mounts: list of VolumeMount. empty volume mounts & remote will default to mount /v3io & /User. - """ - volume_mounts, user = _enrich_and_validate_v3io_mounts( - remote=remote, - volume_mounts=volume_mounts, - user=user, - ) - - def _attach_volume_mounts_and_creds(container_op: kfp.dsl.ContainerOp): - from kubernetes import client as k8s_client - - vol = v3io_to_vol(name, remote, access_key, user, secret=secret) - container_op.add_volume(vol) - for volume_mount in volume_mounts: - container_op.container.add_volume_mount( - k8s_client.V1VolumeMount( - mount_path=volume_mount.path, - sub_path=volume_mount.sub_path, - name=name, - ) - ) - - if not secret: - container_op = v3io_cred(access_key=access_key, user=user)(container_op) - return container_op - - return _attach_volume_mounts_and_creds - - -def _enrich_and_validate_v3io_mounts(remote="", volume_mounts=None, user=""): - if remote and not volume_mounts: - raise mlrun.errors.MLRunInvalidArgumentError( - "volume_mounts must be specified when remote is given" - ) - - # Empty remote & volume_mounts defaults are volume mounts of /v3io and /User - if not remote and not volume_mounts: - user = _resolve_mount_user(user) - if not user: - raise mlrun.errors.MLRunInvalidArgumentError( - "user name/env must be specified when using empty remote and volume_mounts" - ) - volume_mounts = [ - VolumeMount(path="/v3io", sub_path=""), - VolumeMount(path="/User", sub_path="users/" + user), - ] - - if not isinstance(volume_mounts, list) and any( - [not isinstance(x, VolumeMount) for x in volume_mounts] - ): - raise TypeError("mounts should be a list of Mount") - - return volume_mounts, user - - -def _resolve_mount_user(user=None): - return user or os.environ.get("V3IO_USERNAME") - - -def mount_spark_conf(): - def _mount_spark(container_op: kfp.dsl.ContainerOp): - from kubernetes import client as k8s_client - - container_op.container.add_volume_mount( - k8s_client.V1VolumeMount( - name="spark-master-config", mount_path="/etc/config/spark" - ) - ) - return container_op - - return _mount_spark - - -def mount_v3iod(namespace, v3io_config_configmap): - def _mount_v3iod(container_op: kfp.dsl.ContainerOp): - from kubernetes import client as k8s_client - - def add_vol(name, mount_path, host_path): - vol = k8s_client.V1Volume( - name=name, - host_path=k8s_client.V1HostPathVolumeSource(path=host_path, type=""), - ) - container_op.add_volume(vol) - container_op.container.add_volume_mount( - k8s_client.V1VolumeMount(mount_path=mount_path, name=name) - ) - - # this is a legacy path for the daemon shared memory - host_path = "/dev/shm/" - - # path to shared memory for daemon was changed in Iguazio 3.2.3-b1 - igz_version = mlrun.mlconf.get_parsed_igz_version() - if igz_version and igz_version >= semver.VersionInfo.parse("3.2.3-b1"): - host_path = "/var/run/iguazio/dayman-shm/" - add_vol(name="shm", mount_path="/dev/shm", host_path=host_path + namespace) - - add_vol( - name="v3iod-comm", - mount_path="/var/run/iguazio/dayman", - host_path="/var/run/iguazio/dayman/" + namespace, - ) - - vol = k8s_client.V1Volume( - name="daemon-health", empty_dir=k8s_client.V1EmptyDirVolumeSource() - ) - container_op.add_volume(vol) - container_op.container.add_volume_mount( - k8s_client.V1VolumeMount( - mount_path="/var/run/iguazio/daemon_health", name="daemon-health" - ) - ) - - vol = k8s_client.V1Volume( - name="v3io-config", - config_map=k8s_client.V1ConfigMapVolumeSource( - name=v3io_config_configmap, default_mode=420 - ), - ) - container_op.add_volume(vol) - container_op.container.add_volume_mount( - k8s_client.V1VolumeMount(mount_path="/etc/config/v3io", name="v3io-config") - ) - - container_op.container.add_env_variable( - k8s_client.V1EnvVar( - name="CURRENT_NODE_IP", - value_from=k8s_client.V1EnvVarSource( - field_ref=k8s_client.V1ObjectFieldSelector( - api_version="v1", field_path="status.hostIP" - ) - ), - ) - ) - container_op.container.add_env_variable( - k8s_client.V1EnvVar( - name="IGZ_DATA_CONFIG_FILE", value="/igz/java/conf/v3io.conf" - ) - ) - - return container_op - - return _mount_v3iod - - -def v3io_cred(api="", user="", access_key=""): - """ - Modifier function to copy local v3io env vars to container - - Usage:: - - train = train_op(...) - train.apply(use_v3io_cred()) - """ - - def _use_v3io_cred(container_op: kfp.dsl.ContainerOp): - from os import environ - - from kubernetes import client as k8s_client - - web_api = api or environ.get("V3IO_API") or mlconf.v3io_api - _user = user or environ.get("V3IO_USERNAME") - _access_key = access_key or environ.get("V3IO_ACCESS_KEY") - v3io_framesd = mlconf.v3io_framesd or environ.get("V3IO_FRAMESD") - - return ( - container_op.container.add_env_variable( - k8s_client.V1EnvVar(name="V3IO_API", value=web_api) - ) - .add_env_variable(k8s_client.V1EnvVar(name="V3IO_USERNAME", value=_user)) - .add_env_variable( - k8s_client.V1EnvVar(name="V3IO_ACCESS_KEY", value=_access_key) - ) - .add_env_variable( - k8s_client.V1EnvVar(name="V3IO_FRAMESD", value=v3io_framesd) - ) - ) - - return _use_v3io_cred - def split_path(mntpath=""): if mntpath[0] == "/": diff --git a/mlrun/projects/operations.py b/mlrun/projects/operations.py index c96aee3751c..03f8101fe6b 100644 --- a/mlrun/projects/operations.py +++ b/mlrun/projects/operations.py @@ -15,7 +15,7 @@ import warnings from typing import Dict, List, Optional, Union -import kfp +from mlrun_pipelines.models import PipelineNodeWrapper import mlrun from mlrun.utils import hub_prefix @@ -76,7 +76,7 @@ def run_function( notifications: List[mlrun.model.Notification] = None, returns: Optional[List[Union[str, Dict[str, str]]]] = None, builder_env: Optional[list] = None, -) -> Union[mlrun.model.RunObject, kfp.dsl.ContainerOp]: +) -> Union[mlrun.model.RunObject, PipelineNodeWrapper]: """Run a local or remote task as part of a local/kubeflow pipeline run_function() allow you to execute a function locally, on a remote cluster, or as part of an automated workflow @@ -86,7 +86,7 @@ def run_function( when functions run as part of a workflow/pipeline (project.run()) some attributes can be set at the run level, e.g. local=True will run all the functions locally, setting artifact_path will direct all outputs to the same path. project runs provide additional notifications/reporting and exception handling. - inside a Kubeflow pipeline (KFP) run_function() generates KFP "ContainerOps" which are used to form a DAG + inside a Kubeflow pipeline (KFP) run_function() generates KFP node (see PipelineNodeWrapper) which forms a DAG some behavior may differ between regular runs and deferred KFP runs. example (use with function object):: @@ -159,7 +159,7 @@ def my_pipe(url=""): artifact type can be given there. The artifact key must appear in the dictionary as "key": "the_key". :param builder_env: env vars dict for source archive config/credentials e.g. builder_env={"GIT_TOKEN": token} - :return: MLRun RunObject or KubeFlow containerOp + :return: MLRun RunObject or PipelineNodeWrapper """ engine, function = _get_engine_and_function(function, project_object) task = mlrun.new_task( @@ -247,7 +247,7 @@ def build_function( overwrite_build_params: bool = False, extra_args: str = None, force_build: bool = False, -) -> Union[BuildStatus, kfp.dsl.ContainerOp]: +) -> Union[BuildStatus, PipelineNodeWrapper]: """deploy ML function, build container with its dependencies :param function: Name of the function (in the project) or function object @@ -287,7 +287,11 @@ def build_function( if overwrite_build_params: function.spec.build.commands = None if requirements or requirements_file: - function.with_requirements(requirements, requirements_file, overwrite=True) + function.with_requirements( + requirements=requirements, + requirements_file=requirements_file, + overwrite=True, + ) if commands: function.with_commands(commands) return function.deploy_step( @@ -351,7 +355,7 @@ def deploy_function( builder_env: dict = None, project_object=None, mock: bool = None, -) -> Union[DeployStatus, kfp.dsl.ContainerOp]: +) -> Union[DeployStatus, PipelineNodeWrapper]: """deploy real-time (nuclio based) functions :param function: name of the function (in the project) or function object diff --git a/mlrun/projects/pipelines.py b/mlrun/projects/pipelines.py index 37d52ad0c58..2f2d70caa0c 100644 --- a/mlrun/projects/pipelines.py +++ b/mlrun/projects/pipelines.py @@ -19,9 +19,10 @@ import typing import uuid -import kfp.compiler -from kfp import dsl +import mlrun_pipelines.common.models +import mlrun_pipelines.patcher from kfp.compiler import compiler +from mlrun_pipelines.helpers import new_pipe_metadata import mlrun import mlrun.common.schemas @@ -30,7 +31,6 @@ from mlrun.utils import ( get_ui_url, logger, - new_pipe_metadata, normalize_workflow_name, retry_until_successful, ) @@ -300,72 +300,6 @@ def _enrich_kfp_pod_security_context(kfp_pod_template, function): } -# When we run pipelines, the kfp.compile.Compile.compile() method takes the decorated function with @dsl.pipeline and -# converts it to a k8s object. As part of the flow in the Compile.compile() method, -# we call _create_and_write_workflow, which builds a dictionary from the workflow and then writes it to a file. -# Unfortunately, the kfp sdk does not provide an API for configuring priority_class_name and other attributes. -# I ran across the following problem when seeking for a method to set the priority_class_name: -# https://github.com/kubeflow/pipelines/issues/3594 -# When we patch the _create_and_write_workflow, we can eventually obtain the dictionary right before we write it -# to a file and enrich it with argo compatible fields, make sure you looking for the same argo version we use -# https://github.com/argoproj/argo-workflows/blob/release-2.7/pkg/apis/workflow/v1alpha1/workflow_types.go -def _create_enriched_mlrun_workflow( - self, - pipeline_func: typing.Callable, - pipeline_name: typing.Optional[typing.Text] = None, - pipeline_description: typing.Optional[typing.Text] = None, - params_list: typing.Optional[typing.List[dsl.PipelineParam]] = None, - pipeline_conf: typing.Optional[dsl.PipelineConf] = None, -): - """Call internal implementation of create_workflow and enrich with mlrun functions attributes""" - workflow = self._original_create_workflow( - pipeline_func, pipeline_name, pipeline_description, params_list, pipeline_conf - ) - # We don't want to interrupt the original flow and don't know all the scenarios the function could be called. - # that's why we have try/except on all the code of the enrichment and also specific try/except for errors that - # we know can be raised. - try: - functions = [] - if pipeline_context.functions: - try: - functions = pipeline_context.functions.values() - except Exception as err: - logger.debug( - "Unable to retrieve project functions, not enriching workflow with mlrun", - error=err_to_str(err), - ) - return workflow - - # enrich each pipeline step with your desire k8s attribute - for kfp_step_template in workflow["spec"]["templates"]: - if kfp_step_template.get("container"): - for function_obj in functions: - # we condition within each function since the comparison between the function and - # the kfp pod may change depending on the attribute type. - _set_function_attribute_on_kfp_pod( - kfp_step_template, - function_obj, - "PriorityClassName", - "priority_class_name", - ) - _enrich_kfp_pod_security_context( - kfp_step_template, - function_obj, - ) - except mlrun.errors.MLRunInvalidArgumentError: - raise - except Exception as err: - logger.debug( - "Something in the enrichment of kfp pods failed", error=err_to_str(err) - ) - return workflow - - -# patching function as class method -kfp.compiler.Compiler._original_create_workflow = kfp.compiler.Compiler._create_workflow -kfp.compiler.Compiler._create_workflow = _create_enriched_mlrun_workflow - - def get_db_function(project, key) -> mlrun.runtimes.BaseRuntime: project_instance, name, tag, hash_key = parse_versioned_object_uri( key, project.metadata.name @@ -451,7 +385,10 @@ def __init__( @property def state(self): - if self._state not in mlrun.run.RunStatuses.stable_statuses(): + if ( + self._state + not in mlrun_pipelines.common.models.RunStatuses.stable_statuses() + ): self._state = self._engine.get_state(self.run_id, self.project) return self._state @@ -748,7 +685,7 @@ def run( err = None try: workflow_handler(**workflow_spec.args) - state = mlrun.run.RunStatuses.succeeded + state = mlrun_pipelines.common.models.RunStatuses.succeeded except Exception as exc: err = exc logger.exception("Workflow run failed") @@ -756,7 +693,7 @@ def run( f":x: Workflow {workflow_id} run failed!, error: {err_to_str(exc)}", mlrun.common.schemas.NotificationSeverity.ERROR, ) - state = mlrun.run.RunStatuses.failed + state = mlrun_pipelines.common.models.RunStatuses.failed mlrun.run.wait_for_runs_completion(pipeline_context.runs_map.values()) project.notifiers.push_pipeline_run_results( pipeline_context.runs_map.values(), state=state @@ -899,9 +836,9 @@ def run( f":x: Workflow {workflow_name} run failed!, error: {err_to_str(exc)}", mlrun.common.schemas.NotificationSeverity.ERROR, ) - state = mlrun.run.RunStatuses.failed + state = mlrun_pipelines.common.models.RunStatuses.failed else: - state = mlrun.run.RunStatuses.succeeded + state = mlrun_pipelines.common.models.RunStatuses.succeeded project.notifiers.push_pipeline_start_message( project.metadata.name, ) @@ -1094,7 +1031,7 @@ def load_and_run( context.log_result(key="workflow_id", value=run.run_id) context.log_result(key="engine", value=run._engine.engine, commit=True) - if run.state == mlrun.run.RunStatuses.failed: + if run.state == mlrun_pipelines.common.models.RunStatuses.failed: raise RuntimeError(f"Workflow {workflow_log_message} failed") from run.exc if wait_for_completion: @@ -1109,7 +1046,7 @@ def load_and_run( pipeline_state, _, _ = project.get_run_status(run) context.log_result(key="workflow_state", value=pipeline_state, commit=True) - if pipeline_state != mlrun.run.RunStatuses.succeeded: + if pipeline_state != mlrun_pipelines.common.models.RunStatuses.succeeded: raise RuntimeError( f"Workflow {workflow_log_message} failed, state={pipeline_state}" ) diff --git a/mlrun/projects/project.py b/mlrun/projects/project.py index b3b7e0ebe5d..34657dbb5bf 100644 --- a/mlrun/projects/project.py +++ b/mlrun/projects/project.py @@ -30,10 +30,12 @@ import dotenv import git import git.exc -import kfp +import mlrun_pipelines.common.models +import mlrun_pipelines.mounts import nuclio import requests import yaml +from mlrun_pipelines.models import PipelineNodeWrapper import mlrun.common.helpers import mlrun.common.schemas.model_monitoring @@ -2705,7 +2707,7 @@ def run( notifications=notifications, ) # run is None when scheduling - if run and run.state == mlrun.run.RunStatuses.failed: + if run and run.state == mlrun_pipelines.common.models.RunStatuses.failed: return run if not workflow_spec.schedule: # Failure and schedule messages already logged @@ -2882,7 +2884,7 @@ def run_function( notifications: typing.List[mlrun.model.Notification] = None, returns: Optional[List[Union[str, Dict[str, str]]]] = None, builder_env: Optional[dict] = None, - ) -> typing.Union[mlrun.model.RunObject, kfp.dsl.ContainerOp]: + ) -> typing.Union[mlrun.model.RunObject, PipelineNodeWrapper]: """Run a local or remote task as part of a local/kubeflow pipeline example (use with project):: @@ -2935,7 +2937,7 @@ def run_function( artifact type can be given there. The artifact key must appear in the dictionary as "key": "the_key". :param builder_env: env vars dict for source archive config/credentials e.g. builder_env={"GIT_TOKEN": token} - :return: MLRun RunObject or KubeFlow containerOp + :return: MLRun RunObject or PipelineNodeWrapper """ return run_function( function, @@ -2978,7 +2980,7 @@ def build_function( requirements_file: str = None, extra_args: str = None, force_build: bool = False, - ) -> typing.Union[BuildStatus, kfp.dsl.ContainerOp]: + ) -> typing.Union[BuildStatus, PipelineNodeWrapper]: """deploy ML function, build container with its dependencies :param function: name of the function (in the project) or function object @@ -3100,7 +3102,7 @@ def build_image( requirements_file: str = None, extra_args: str = None, target_dir: str = None, - ) -> typing.Union[BuildStatus, kfp.dsl.ContainerOp]: + ) -> typing.Union[BuildStatus, PipelineNodeWrapper]: """Builder docker image for the project, based on the project's build config. Parameters allow to override the build config. If the project has a source configured and pull_at_runtime is not configured, this source will be cloned to the @@ -3220,7 +3222,7 @@ def deploy_function( verbose: bool = None, builder_env: dict = None, mock: bool = None, - ) -> typing.Union[DeployStatus, kfp.dsl.ContainerOp]: + ) -> typing.Union[DeployStatus, PipelineNodeWrapper]: """deploy real-time (nuclio based) functions :param function: name of the function (in the project) or function object diff --git a/mlrun/run.py b/mlrun/run.py index 561997c813b..b83fc431aac 100644 --- a/mlrun/run.py +++ b/mlrun/run.py @@ -29,11 +29,13 @@ import nuclio import yaml from kfp import Client +from mlrun_pipelines.common.models import RunStatuses +from mlrun_pipelines.common.ops import format_summary_from_kfp_run, show_kfp_run +from mlrun_pipelines.models import PipelineRun import mlrun.common.schemas import mlrun.errors import mlrun.utils.helpers -from mlrun.kfpops import format_summary_from_kfp_run, show_kfp_run from .common.helpers import parse_versioned_object_uri from .config import config as mlconf @@ -69,41 +71,6 @@ ) -class RunStatuses(object): - succeeded = "Succeeded" - failed = "Failed" - skipped = "Skipped" - error = "Error" - running = "Running" - - @staticmethod - def all(): - return [ - RunStatuses.succeeded, - RunStatuses.failed, - RunStatuses.skipped, - RunStatuses.error, - RunStatuses.running, - ] - - @staticmethod - def stable_statuses(): - return [ - RunStatuses.succeeded, - RunStatuses.failed, - RunStatuses.skipped, - RunStatuses.error, - ] - - @staticmethod - def transient_statuses(): - return [ - status - for status in RunStatuses.all() - if status not in RunStatuses.stable_statuses() - ] - - def function_to_module(code="", workdir=None, secrets=None, silent=False): """Load code, notebook or mlrun function as .py module this function can import a local/remote py file or notebook @@ -1004,7 +971,7 @@ def get_pipeline( :param project: the project of the pipeline run :param remote: read kfp data from mlrun service (default=True) - :return: kfp run dict + :return: kfp run """ namespace = namespace or mlconf.namespace if remote: @@ -1028,7 +995,7 @@ def get_pipeline( not format_ or format_ == mlrun.common.schemas.PipelinesFormat.summary.value ): - resp = format_summary_from_kfp_run(resp) + resp = format_summary_from_kfp_run(PipelineRun(resp)) show_kfp_run(resp) return resp diff --git a/mlrun/runtimes/base.py b/mlrun/runtimes/base.py index 96f547b92c1..62670eb6ab2 100644 --- a/mlrun/runtimes/base.py +++ b/mlrun/runtimes/base.py @@ -21,6 +21,7 @@ from typing import Callable, Dict, List, Optional, Union import requests.exceptions +from mlrun_pipelines.common.ops import mlrun_op from nuclio.build import mlrun_footer import mlrun.common.schemas @@ -36,7 +37,6 @@ from ..config import config from ..datastore import store_manager from ..errors import err_to_str -from ..kfpops import mlrun_op from ..lists import RunList from ..model import BaseMetadata, HyperParamOptions, ImageBuilder, ModelObj, RunObject from ..utils import ( @@ -700,11 +700,11 @@ def as_step( "key": "the_key". :param auto_build: when set to True and the function require build it will be built on the first function run, use only if you dont plan on changing the build config between runs - :return: KubeFlow containerOp + :return: mlrun_pipelines.models.PipelineNodeWrapper """ # if the function contain KFP PipelineParams (futures) pass the full spec to the - # ContainerOp this way KFP will substitute the params with previous step outputs + # PipelineNodeWrapper this way KFP will substitute the params with previous step outputs if use_db and not self._has_pipeline_param(): # if the same function is built as part of the pipeline we do not use the versioned function # rather the latest function w the same tag so we can pick up the updated image/status diff --git a/mlrun/runtimes/function.py b/mlrun/runtimes/function.py index 78e58a06f5b..aea654b55e6 100644 --- a/mlrun/runtimes/function.py +++ b/mlrun/runtimes/function.py @@ -25,6 +25,9 @@ import semver from aiohttp.client import ClientSession from kubernetes import client +from mlrun_pipelines.common.mounts import VolumeMount +from mlrun_pipelines.common.ops import deploy_op +from mlrun_pipelines.mounts import mount_v3io, v3io_cred from nuclio.deploy import find_dashboard_url, get_deploy_status from nuclio.triggers import V3IOStreamTrigger @@ -37,16 +40,9 @@ from ..config import config as mlconf from ..errors import err_to_str -from ..kfpops import deploy_op from ..lists import RunList from ..model import RunObject -from ..platforms.iguazio import ( - VolumeMount, - mount_v3io, - parse_path, - split_path, - v3io_cred, -) +from ..platforms.iguazio import parse_path, split_path from ..utils import get_in, logger, update_in from .base import FunctionStatus, RunError from .pod import KubeResource, KubeResourceSpec diff --git a/mlrun/runtimes/kubejob.py b/mlrun/runtimes/kubejob.py index 9035ef82d8a..534f906b53d 100644 --- a/mlrun/runtimes/kubejob.py +++ b/mlrun/runtimes/kubejob.py @@ -15,11 +15,12 @@ import time import warnings +from mlrun_pipelines.common.ops import build_op + import mlrun.common.schemas import mlrun.db import mlrun.errors -from ..kfpops import build_op from ..model import RunObject from ..utils import get_in, logger from .pod import KubeResource diff --git a/mlrun/runtimes/pod.py b/mlrun/runtimes/pod.py index d162ebbef4c..f6b201d4944 100644 --- a/mlrun/runtimes/pod.py +++ b/mlrun/runtimes/pod.py @@ -19,8 +19,9 @@ from enum import Enum import dotenv -import kfp.dsl import kubernetes.client as k8s_client +import mlrun_pipelines.mounts +from mlrun_pipelines.mixins import KfpAdapterMixin import mlrun.errors import mlrun.utils.regex @@ -40,7 +41,6 @@ from ..utils import logger, update_in from .base import BaseRuntime, FunctionSpec, spec_fields from .utils import ( - apply_kfp, get_gpu_from_resource_requirement, get_item_name, set_named_item, @@ -865,12 +865,12 @@ def default(): @classmethod def all_mount_modifiers(cls): return [ - mlrun.v3io_cred.__name__, - mlrun.mount_v3io.__name__, - mlrun.platforms.other.mount_pvc.__name__, - mlrun.auto_mount.__name__, - mlrun.platforms.mount_s3.__name__, - mlrun.platforms.set_env_variables.__name__, + mlrun_pipelines.mounts.v3io_cred.__name__, + mlrun_pipelines.mounts.mount_v3io.__name__, + mlrun_pipelines.mounts.mount_pvc.__name__, + mlrun_pipelines.mounts.auto_mount.__name__, + mlrun_pipelines.mounts.mount_s3.__name__, + mlrun_pipelines.mounts.set_env_variables.__name__, ] @classmethod @@ -887,27 +887,27 @@ def is_auto_modifier(cls, modifier): def _get_auto_modifier(): # If we're running on Iguazio - use v3io_cred if mlconf.igz_version != "": - return mlrun.v3io_cred + return mlrun_pipelines.mounts.v3io_cred # Else, either pvc mount if it's configured or do nothing otherwise pvc_configured = ( "MLRUN_PVC_MOUNT" in os.environ or "pvc_name" in mlconf.get_storage_auto_mount_params() ) - return mlrun.platforms.other.mount_pvc if pvc_configured else None + return mlrun_pipelines.mounts.mount_pvc if pvc_configured else None def get_modifier(self): return { AutoMountType.none: None, - AutoMountType.v3io_credentials: mlrun.v3io_cred, - AutoMountType.v3io_fuse: mlrun.mount_v3io, - AutoMountType.pvc: mlrun.platforms.other.mount_pvc, + AutoMountType.v3io_credentials: mlrun_pipelines.mounts.v3io_cred, + AutoMountType.v3io_fuse: mlrun_pipelines.mounts.mount_v3io, + AutoMountType.pvc: mlrun_pipelines.mounts.mount_pvc, AutoMountType.auto: self._get_auto_modifier(), - AutoMountType.s3: mlrun.platforms.mount_s3, - AutoMountType.env: mlrun.platforms.set_env_variables, + AutoMountType.s3: mlrun_pipelines.mounts.mount_s3, + AutoMountType.env: mlrun_pipelines.mounts.set_env_variables, }[self] -class KubeResource(BaseRuntime): +class KubeResource(BaseRuntime, KfpAdapterMixin): """ A parent class for runtimes that generate k8s resources when executing. """ @@ -916,7 +916,7 @@ class KubeResource(BaseRuntime): _is_nested = True def __init__(self, spec=None, metadata=None): - super().__init__(metadata, spec) + super().__init__(metadata=metadata, spec=spec) self.verbose = False @property @@ -949,26 +949,6 @@ def to_dict(self, fields=None, exclude=None, strip=False): spec["disable_auto_mount"] = False return struct - def apply(self, modify): - """ - Apply a modifier to the runtime which is used to change the runtimes k8s object's spec. - Modifiers can be either KFP modifiers or MLRun modifiers (which are compatible with KFP). All modifiers accept - a `kfp.dsl.ContainerOp` object, apply some changes on its spec and return it so modifiers can be chained - one after the other. - - :param modify: a modifier runnable object - :return: the runtime (self) after the modifications - """ - - # Kubeflow pipeline have a hook to add the component to the DAG on ContainerOp init - # we remove the hook to suppress kubeflow op registration and return it after the apply() - old_op_handler = kfp.dsl._container_op._register_op_handler - kfp.dsl._container_op._register_op_handler = lambda x: self.metadata.name - cop = kfp.dsl.ContainerOp("name", "image") - kfp.dsl._container_op._register_op_handler = old_op_handler - - return apply_kfp(modify, cop, self) - def set_env_from_secret(self, name, secret=None, secret_key=None): """set pod environment var from secret""" secret_key = secret_key or name diff --git a/mlrun/runtimes/remotesparkjob.py b/mlrun/runtimes/remotesparkjob.py index 2980f31c4b4..e5e1320a7d7 100644 --- a/mlrun/runtimes/remotesparkjob.py +++ b/mlrun/runtimes/remotesparkjob.py @@ -15,11 +15,11 @@ from subprocess import run import kubernetes.client +from mlrun_pipelines.mounts import mount_v3io, mount_v3iod import mlrun.errors from mlrun.config import config -from ..platforms.iguazio import mount_v3io, mount_v3iod from .kubejob import KubejobRuntime from .pod import KubeResourceSpec diff --git a/mlrun/runtimes/sparkjob/spark3job.py b/mlrun/runtimes/sparkjob/spark3job.py index 4a9051e1922..fa8e2c25134 100644 --- a/mlrun/runtimes/sparkjob/spark3job.py +++ b/mlrun/runtimes/sparkjob/spark3job.py @@ -15,6 +15,7 @@ import typing import kubernetes.client +from mlrun_pipelines.mounts import mount_v3io, mount_v3iod import mlrun.common.schemas.function import mlrun.errors @@ -23,7 +24,6 @@ from ...execution import MLClientCtx from ...model import RunObject -from ...platforms.iguazio import mount_v3io, mount_v3iod from ...utils import update_in, verify_field_regex from ..kubejob import KubejobRuntime from ..pod import KubeResourceSpec diff --git a/mlrun/runtimes/utils.py b/mlrun/runtimes/utils.py index 22b8eab3ca9..95929ff065b 100644 --- a/mlrun/runtimes/utils.py +++ b/mlrun/runtimes/utils.py @@ -21,7 +21,6 @@ from sys import stderr import pandas as pd -from kubernetes import client import mlrun import mlrun.common.constants @@ -281,43 +280,6 @@ def get_item_name(item, attr="name"): return getattr(item, attr, None) -def apply_kfp(modify, cop, runtime): - modify(cop) - - # Have to do it here to avoid circular dependencies - from .pod import AutoMountType - - if AutoMountType.is_auto_modifier(modify): - runtime.spec.disable_auto_mount = True - - api = client.ApiClient() - for k, v in cop.pod_labels.items(): - runtime.metadata.labels[k] = v - for k, v in cop.pod_annotations.items(): - runtime.metadata.annotations[k] = v - if cop.container.env: - env_names = [ - e.name if hasattr(e, "name") else e["name"] for e in runtime.spec.env - ] - for e in api.sanitize_for_serialization(cop.container.env): - name = e["name"] - if name in env_names: - runtime.spec.env[env_names.index(name)] = e - else: - runtime.spec.env.append(e) - env_names.append(name) - cop.container.env.clear() - - if cop.volumes and cop.container.volume_mounts: - vols = api.sanitize_for_serialization(cop.volumes) - mounts = api.sanitize_for_serialization(cop.container.volume_mounts) - runtime.spec.update_vols_and_mounts(vols, mounts) - cop.volumes.clear() - cop.container.volume_mounts.clear() - - return runtime - - def verify_limits( resources_field_name, mem=None, diff --git a/mlrun/utils/helpers.py b/mlrun/utils/helpers.py index 48af29b417d..ce385408a02 100644 --- a/mlrun/utils/helpers.py +++ b/mlrun/utils/helpers.py @@ -41,6 +41,7 @@ import yaml from dateutil import parser from deprecated import deprecated +from mlrun_pipelines.models import PipelineRun from pandas._libs.tslibs.timestamps import Timedelta, Timestamp from yaml.representer import RepresenterError @@ -784,34 +785,6 @@ def gen_list(items=None, tag="td"): return style + '\n' + out + "
    \n\n" -def new_pipe_metadata( - artifact_path: str = None, - cleanup_ttl: int = None, - op_transformers: typing.List[typing.Callable] = None, -): - from kfp.dsl import PipelineConf - - def _set_artifact_path(task): - from kubernetes import client as k8s_client - - task.add_env_variable( - k8s_client.V1EnvVar(name="MLRUN_ARTIFACT_PATH", value=artifact_path) - ) - return task - - conf = PipelineConf() - cleanup_ttl = cleanup_ttl or int(config.kfp_ttl) - - if cleanup_ttl: - conf.set_ttl_seconds_after_finished(cleanup_ttl) - if artifact_path: - conf.add_op_transformer(_set_artifact_path) - if op_transformers: - for op_transformer in op_transformers: - conf.add_op_transformer(op_transformer) - return conf - - def _convert_python_package_version_to_image_tag(version: typing.Optional[str]): return ( version.replace("+", "-").replace("0.0.0-", "") if version is not None else None @@ -1377,7 +1350,7 @@ def is_link_artifact(artifact): return artifact.kind == mlrun.common.schemas.ArtifactCategories.link.value -def format_run(run: dict, with_project=False) -> dict: +def format_run(run: PipelineRun, with_project=False) -> dict: fields = [ "id", "name", @@ -1414,7 +1387,7 @@ def format_run(run: dict, with_project=False) -> dict: # pipelines are yet to populate the status or workflow has failed # as observed https://jira.iguazeng.com/browse/ML-5195 # set to unknown to ensure a status is returned - if run["status"] is None: + if run.get("status", None) is None: run["status"] = inflection.titleize(mlrun.runtimes.constants.RunStates.unknown) return run diff --git a/pipeline-adapters/.gitignore b/pipeline-adapters/.gitignore new file mode 100644 index 00000000000..17c666c0d75 --- /dev/null +++ b/pipeline-adapters/.gitignore @@ -0,0 +1 @@ +**/*.egg-info diff --git a/pipeline-adapters/mlrun-pipelines-kfp-common/setup.py b/pipeline-adapters/mlrun-pipelines-kfp-common/setup.py new file mode 100644 index 00000000000..b142b79f473 --- /dev/null +++ b/pipeline-adapters/mlrun-pipelines-kfp-common/setup.py @@ -0,0 +1,42 @@ +# Copyright 2024 Iguazio +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from setuptools import find_namespace_packages, setup + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger("mlrun-kfp-setup") + +setup( + name="mlrun-pipelines-kfp-common-experiment", + version="0.1.3", + description="MLRun Pipelines package for providing KFP 1.8 compatibility", + author="Yaron Haviv", + author_email="yaronh@iguazio.com", + license="Apache License 2.0", + url="https://github.com/mlrun/mlrun", + packages=find_namespace_packages( + where="src/", + include=[ + "mlrun_pipelines.common", + ], + ), + package_dir={"": "src"}, + keywords=[ + "mlrun", + "kfp", + ], + python_requires=">=3.9, <3.12", +) diff --git a/pipeline-adapters/mlrun-pipelines-kfp-common/src/mlrun_pipelines/common/__init__.py b/pipeline-adapters/mlrun-pipelines-kfp-common/src/mlrun_pipelines/common/__init__.py new file mode 100644 index 00000000000..99be6280fc3 --- /dev/null +++ b/pipeline-adapters/mlrun-pipelines-kfp-common/src/mlrun_pipelines/common/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2024 Iguazio +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/pipeline-adapters/mlrun-pipelines-kfp-common/src/mlrun_pipelines/common/helpers.py b/pipeline-adapters/mlrun-pipelines-kfp-common/src/mlrun_pipelines/common/helpers.py new file mode 100644 index 00000000000..e30a4f4f498 --- /dev/null +++ b/pipeline-adapters/mlrun-pipelines-kfp-common/src/mlrun_pipelines/common/helpers.py @@ -0,0 +1,127 @@ +# Copyright 2024 Iguazio +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import inspect +from collections.abc import Iterator, MutableMapping +from typing import Any, NoReturn + +PROJECT_ANNOTATION = "mlrun/project" +RUN_ANNOTATION = "mlrun/pipeline-step-type" +FUNCTION_ANNOTATION = "mlrun/function-uri" + + +class FlexibleMapper(MutableMapping): + """ + Custom mapper implementation that provides flexibility in handling the mapping + of data between class attributes and a dictionary. + + This implementation includes compatibility with dictionary-like objects through + get, set, and delete methods. This allows the class to be handled like a native dict, + making it highly flexible. + + Inheritors of this class encapsulate KFP data models and abstract away from MLRun their + differences across different versions + """ + + _external_data: dict + + def __init__(self, external_data: Any): + """ + Constructs a FlexibleMapper from the given external_data source. + + :param external_data: the initial data source. Can be a dict or any object with a 'to_dict' method. + """ + if isinstance(external_data, dict): + self._external_data = external_data + elif hasattr(external_data, "to_dict"): + self._external_data = external_data.to_dict() + + def __getitem__(self, key: str) -> Any: + """ + Gets the value for the given key. If the key is not a class attribute, + it looks for it in the _external_data dict. + + :param key: the key to look up. + :return: the value associated with the key. + + :raises KeyError: if the key is not found. + """ + try: + return getattr(self, key) + except AttributeError: + return self._external_data[key] + + def __setitem__(self, key, value) -> NoReturn: + """ + Sets the value for the given key. If the key isn't a class attribute, + it sets it in the _external_data dict. + + :param key: the key to set. + :param value: the value to set for the key. + """ + try: + setattr(self, key, value) + except AttributeError: + self._external_data[key] = value + + def __delitem__(self, key) -> NoReturn: + """ + Deletes the item associated with the given key. If the key isn't a class attribute, + it deletes it in the _external_data dict. + + :param key: the key to delete. + :raises KeyError: if the key is not found. + """ + try: + delattr(self, key) + except AttributeError: + del self._external_data[key] + + def __len__(self) -> int: + """ + Returns the sum of the number of class attributes and items in the _external_data dict. + + :return: the length of the mapping. + """ + return len(self._external_data) + len(vars(self)) - 1 + + def __iter__(self) -> Iterator[str]: + """ + Returns an iterator over the keys of the mapping. It yields keys only from the class + attributes and not the _external_data dict. + + :return: an iterator over the object properties. + """ + yield from [ + m[0] + for m in inspect.getmembers(self) + if not callable(m[1]) and not m[0].startswith("_") + ] + + def __bool__(self) -> bool: + """ + Determines the boolean value of the mapping. The mapping is True if the _external_data dict is non-empty. + + :return: True if the external data mapping is non-empty; False otherwise. + """ + return bool(self._external_data) + + def to_dict(self) -> dict: + """ + Converts the mapping to a dict. This method follows the attribute rules defined on __iter__ + + :returns: a dict representation of the mapping. + """ + return {a: getattr(self, a, None) for a in self} diff --git a/pipeline-adapters/mlrun-pipelines-kfp-common/src/mlrun_pipelines/common/models.py b/pipeline-adapters/mlrun-pipelines-kfp-common/src/mlrun_pipelines/common/models.py new file mode 100644 index 00000000000..07492c5943c --- /dev/null +++ b/pipeline-adapters/mlrun-pipelines-kfp-common/src/mlrun_pipelines/common/models.py @@ -0,0 +1,104 @@ +# Copyright 2024 Iguazio +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from enum import Enum + + +class RunStatuses(Enum): + """ + Class for different types of statuses a 'PipelineRun' can have using an enum type. + Beyond enumerating all possible statuses, this class ensures comparisons are case-insensitive. + + Statuses commonly used by MLRun and associated with KFP 1.8: + - succeeded: Indicates that the run has successfully completed. + - failed: Indicates that the run has failed to complete. + - skipped: Indicates that the run was skipped. + - error: Indicates that an error occurred during the run. + - running: Indicates that the run is currently ongoing. + + The statuses specific to KFP 2.0: + - runtime_state_unspecified: Indicates that the run's status is not specified; similar to "" (Unknown) on Argo. + - pending: Indicates that the run is pending, waiting to be executed. + - canceling: Indicates that a cancel request has been made for the run. + - canceled: Indicates that the run has been canceled. + - paused: Indicates that the run is currently paused or on hold. + + This class also includes methods for computing all statuses, stable statuses + (ones that will no longer change), and transient statuses (ones that may still change). + """ + + # States available on KFP 1.8 and traditionally used by MLRun + succeeded = "Succeeded" + failed = "Failed" + skipped = "Skipped" + error = "Error" # available only on KFP 1.8 or lower + running = "Running" + + # States available only on KFP 2.0 + runtime_state_unspecified = "Runtime_State_Unspecified" + pending = "Pending" + canceling = "Canceling" + canceled = "Canceled" + paused = "Paused" + + def __eq__(self, other): + return self.value.casefold() == str(other).casefold() + + def __hash__(self): + return hash(self.value) + + def __str__(self): + return self.value + + @classmethod + def _missing_(cls, value: str): + value = value.casefold() + for member in cls: + if member.value.casefold() == value: + return member + return None + + @staticmethod + def all(): + return [ + RunStatuses.succeeded, + RunStatuses.failed, + RunStatuses.skipped, + RunStatuses.error, + RunStatuses.running, + RunStatuses.runtime_state_unspecified, + RunStatuses.pending, + RunStatuses.canceling, + RunStatuses.canceled, + RunStatuses.paused, + ] + + @staticmethod + def stable_statuses(): + return [ + RunStatuses.succeeded, + RunStatuses.failed, + RunStatuses.skipped, + RunStatuses.error, + RunStatuses.canceled, + ] + + @staticmethod + def transient_statuses(): + return [ + status + for status in RunStatuses.all() + if status not in RunStatuses.stable_statuses() + ] diff --git a/pipeline-adapters/mlrun-pipelines-kfp-common/src/mlrun_pipelines/common/mounts.py b/pipeline-adapters/mlrun-pipelines-kfp-common/src/mlrun_pipelines/common/mounts.py new file mode 100644 index 00000000000..1ce8c017d43 --- /dev/null +++ b/pipeline-adapters/mlrun-pipelines-kfp-common/src/mlrun_pipelines/common/mounts.py @@ -0,0 +1,49 @@ +# Copyright 2024 Iguazio +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +from collections import namedtuple + +import mlrun.errors + +VolumeMount = namedtuple("Mount", ["path", "sub_path"]) + + +def _enrich_and_validate_v3io_mounts(remote="", volume_mounts=None, user=""): + if remote and not volume_mounts: + raise mlrun.errors.MLRunInvalidArgumentError( + "volume_mounts must be specified when remote is given" + ) + + # Empty remote & volume_mounts defaults are volume mounts of /v3io and /User + if not remote and not volume_mounts: + user = _resolve_mount_user(user) + if not user: + raise mlrun.errors.MLRunInvalidArgumentError( + "user name/env must be specified when using empty remote and volume_mounts" + ) + volume_mounts = [ + VolumeMount(path="/v3io", sub_path=""), + VolumeMount(path="/User", sub_path="users/" + user), + ] + + if not isinstance(volume_mounts, list) and any( + [not isinstance(x, VolumeMount) for x in volume_mounts] + ): + raise TypeError("mounts should be a list of Mount") + + return volume_mounts, user + + +def _resolve_mount_user(user=None): + return user or os.environ.get("V3IO_USERNAME") diff --git a/mlrun/kfpops.py b/pipeline-adapters/mlrun-pipelines-kfp-common/src/mlrun_pipelines/common/ops.py similarity index 71% rename from mlrun/kfpops.py rename to pipeline-adapters/mlrun-pipelines-kfp-common/src/mlrun_pipelines/common/ops.py index 8e11e203939..e37152802d5 100644 --- a/mlrun/kfpops.py +++ b/pipeline-adapters/mlrun-pipelines-kfp-common/src/mlrun_pipelines/common/ops.py @@ -1,4 +1,4 @@ -# Copyright 2023 Iguazio +# Copyright 2024 Iguazio # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,22 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# + import json import os -import os.path from copy import deepcopy -from typing import Dict, List, Union +from typing import Union -import inflection -from kfp import dsl -from kubernetes import client as k8s_client +import mlrun_pipelines.common.models import mlrun +from mlrun.config import config from mlrun.errors import err_to_str - -from .config import config -from .model import HyperParamOptions, RunSpec -from .utils import ( +from mlrun.model import HyperParamOptions, RunSpec +from mlrun.utils import ( dict_to_yaml, gen_md_table, get_artifact_target, @@ -44,12 +42,6 @@ KFPMETA_DIR = "/tmp" KFP_ARTIFACTS_DIR = "/tmp" -project_annotation = "mlrun/project" -run_annotation = "mlrun/pipeline-step-type" -function_annotation = "mlrun/function-uri" - -dsl.ContainerOp._DISABLE_REUSABLE_COMPONENT_WARNING = True - class PipelineRunType: run = "run" @@ -57,125 +49,6 @@ class PipelineRunType: deploy = "deploy" -def is_num(v): - return isinstance(v, (int, float, complex)) - - -def write_kfpmeta(struct): - if "status" not in struct: - return - - results = struct["status"].get("results", {}) - metrics = { - "metrics": [ - {"name": k, "numberValue": v} for k, v in results.items() if is_num(v) - ], - } - with open(os.path.join(KFPMETA_DIR, "mlpipeline-metrics.json"), "w") as f: - json.dump(metrics, f) - - struct = deepcopy(struct) - uid = struct["metadata"].get("uid") - project = struct["metadata"].get("project", config.default_project) - output_artifacts, out_dict = get_kfp_outputs( - struct["status"].get(run_keys.artifacts, []), - struct["metadata"].get("labels", {}), - project, - ) - - results["run_id"] = results.get("run_id", "/".join([project, uid])) - for key in struct["spec"].get(run_keys.outputs, []): - val = "None" - if key in out_dict: - val = out_dict[key] - elif key in results: - val = results[key] - try: - # NOTE: if key has "../x", it would fail on path traversal - path = os.path.join(KFP_ARTIFACTS_DIR, key) - if not mlrun.utils.helpers.is_safe_path(KFP_ARTIFACTS_DIR, path): - logger.warning( - "Path traversal is not allowed ignoring", path=path, key=key - ) - continue - path = os.path.abspath(path) - logger.info("Writing artifact output", path=path, val=val) - with open(path, "w") as fp: - fp.write(str(val)) - except Exception as exc: - logger.warning("Failed writing to temp file. Ignoring", exc=repr(exc)) - pass - - text = "# Run Report\n" - if "iterations" in struct["status"]: - del struct["status"]["iterations"] - - text += "## Metadata\n```yaml\n" + dict_to_yaml(struct) + "```\n" - - metadata = { - "outputs": output_artifacts - + [{"type": "markdown", "storage": "inline", "source": text}] - } - with open(os.path.join(KFPMETA_DIR, "mlpipeline-ui-metadata.json"), "w") as f: - json.dump(metadata, f) - - -def get_kfp_outputs(artifacts, labels, project): - outputs = [] - out_dict = {} - for output in artifacts: - if is_legacy_artifact(output): - key = output["key"] - # The spec in a legacy artifact is contained in the main object, so using this assignment saves us a lot - # of if/else in the rest of this function. - output_spec = output - else: - key = output.get("metadata")["key"] - output_spec = output.get("spec", {}) - - target = output_spec.get("target_path", "") - target = output_spec.get("inline", target) - - out_dict[key] = get_artifact_target(output, project=project) - - if target.startswith("v3io:///"): - target = target.replace("v3io:///", "http://v3io-webapi:8081/") - - user = labels.get("v3io_user", "") or os.environ.get("V3IO_USERNAME", "") - if target.startswith("/User/"): - user = user or "admin" - target = "http://v3io-webapi:8081/users/" + user + target[5:] - - viewer = output_spec.get("viewer", "") - if viewer in ["web-app", "chart"]: - meta = {"type": "web-app", "source": target} - outputs += [meta] - - elif viewer == "table": - header = output_spec.get("header", None) - if header and target.endswith(".csv"): - meta = { - "type": "table", - "format": "csv", - "header": header, - "source": target, - } - outputs += [meta] - - elif output.get("kind") == "dataset": - header = output_spec.get("header") - preview = output_spec.get("preview") - if preview: - tbl_md = gen_md_table(header, preview) - text = f"## Dataset: {key} \n\n" + tbl_md - del output_spec["preview"] - - meta = {"type": "markdown", "storage": "inline", "source": text} - outputs += [meta] - - return outputs, out_dict - - def mlrun_op( name: str = "", project: str = "", @@ -202,7 +75,7 @@ def mlrun_op( hyper_param_options=None, verbose=None, scrape_metrics=False, - returns: List[Union[str, Dict[str, str]]] = None, + returns: list[Union[str, dict[str, str]]] = None, auto_build: bool = False, ): """mlrun KubeFlow pipelines operator, use to form pipeline steps @@ -292,6 +165,8 @@ def mlrun_pipeline( train.outputs['model-txt']).apply(mount_v3io()) """ + from mlrun_pipelines.ops import generate_pipeline_node + secrets = [] if secrets is None else secrets params = {} if params is None else params hyperparams = {} if hyperparams is None else hyperparams @@ -451,110 +326,18 @@ def mlrun_pipeline( image, mlrun.get_version(), str(version.Version().get_python_version()) ) - cop = dsl.ContainerOp( - name=name, - image=image, - command=cmd + [command], - file_outputs=file_outputs, - output_artifact_paths={ - "mlpipeline-ui-metadata": os.path.join( - KFPMETA_DIR, "mlpipeline-ui-metadata.json" - ), - "mlpipeline-metrics": os.path.join(KFPMETA_DIR, "mlpipeline-metrics.json"), - }, - ) - cop = add_default_function_resources(cop) - cop = add_function_node_selection_attributes(container_op=cop, function=function) - - add_annotations(cop, PipelineRunType.run, function, func_url, project) - add_labels(cop, function, scrape_metrics) - if code_env: - cop.container.add_env_variable( - k8s_client.V1EnvVar(name="MLRUN_EXEC_CODE", value=code_env) - ) - if registry: - cop.container.add_env_variable( - k8s_client.V1EnvVar( - name="MLRUN_HTTPDB__BUILDER__DOCKER_REGISTRY", value=registry - ) - ) - - add_default_env(k8s_client, cop) - - return cop - - -def deploy_op( - name, - function, - func_url=None, - source="", - project="", - models: list = None, - env: dict = None, - tag="", - verbose=False, -): - cmd = ["python", "-m", "mlrun", "deploy"] - if source: - cmd += ["-s", source] - if tag: - cmd += ["--tag", tag] - if verbose: - cmd += ["--verbose"] - if project: - cmd += ["-p", project] - - if models: - for m in models: - for key in ["key", "model_path", "model_url", "class_name", "model_url"]: - if key in m: - m[key] = str(m[key]) # verify we stringify pipeline params - if function.kind == mlrun.runtimes.RuntimeKinds.serving: - cmd += ["-m", json.dumps(m)] - else: - cmd += ["-m", f"{m['key']}={m['model_path']}"] - - if env: - for key, val in env.items(): - cmd += ["--env", f"{key}={val}"] - - if func_url: - cmd += ["-f", func_url] - else: - runtime = f"{function.to_dict()}" - cmd += [runtime] - - cop = dsl.ContainerOp( - name=name, - image=config.kfp_image, - command=cmd, - file_outputs={"endpoint": "/tmp/output", "name": "/tmp/name"}, + return generate_pipeline_node( + project, + name, + image, + cmd + [command], + file_outputs, + function, + func_url, + scrape_metrics, + code_env, + registry, ) - cop = add_default_function_resources(cop) - cop = add_function_node_selection_attributes(container_op=cop, function=function) - - add_annotations(cop, PipelineRunType.deploy, function, func_url) - add_default_env(k8s_client, cop) - return cop - - -def add_env(env=None): - """ - Modifier function to add env vars from dict - Usage: - train = train_op(...) - train.apply(add_env({'MY_ENV':'123'})) - """ - - env = {} if env is None else env - - def _add_env(task): - for k, v in env.items(): - task.add_env_variable(k8s_client.V1EnvVar(name=k, value=v)) - return task - - return _add_env def build_op( @@ -569,6 +352,7 @@ def build_op( skip_deployed=False, ): """build Docker image.""" + from mlrun_pipelines.ops import generate_image_builder_pipeline_node cmd = ["python", "-m", "mlrun", "build", "--kfp"] if function: @@ -594,78 +378,58 @@ def build_op( if func_url and not function: cmd += [func_url] - cop = dsl.ContainerOp( - name=name, - image=config.kfp_image, - command=cmd, - file_outputs={"state": "/tmp/state", "image": "/tmp/image"}, - ) - cop = add_default_function_resources(cop) - cop = add_function_node_selection_attributes(container_op=cop, function=function) - - add_annotations(cop, PipelineRunType.build, function, func_url) - if config.httpdb.builder.docker_registry: - cop.container.add_env_variable( - k8s_client.V1EnvVar( - name="MLRUN_HTTPDB__BUILDER__DOCKER_REGISTRY", - value=config.httpdb.builder.docker_registry, - ) - ) - if "IGZ_NAMESPACE_DOMAIN" in os.environ: - cop.container.add_env_variable( - k8s_client.V1EnvVar( - name="IGZ_NAMESPACE_DOMAIN", - value=os.environ.get("IGZ_NAMESPACE_DOMAIN"), - ) - ) - - is_v3io = function.spec.build.source and function.spec.build.source.startswith( - "v3io" - ) - if "V3IO_ACCESS_KEY" in os.environ and is_v3io: - cop.container.add_env_variable( - k8s_client.V1EnvVar( - name="V3IO_ACCESS_KEY", value=os.environ.get("V3IO_ACCESS_KEY") - ) - ) + return generate_image_builder_pipeline_node(name, function, func_url, cmd) - add_default_env(k8s_client, cop) - return cop +def deploy_op( + name, + function, + func_url=None, + source="", + project="", + models: list = None, + env: dict = None, + tag="", + verbose=False, +): + from mlrun_pipelines.ops import generate_deployer_pipeline_node + cmd = ["python", "-m", "mlrun", "deploy"] + if source: + cmd += ["-s", source] + if tag: + cmd += ["--tag", tag] + if verbose: + cmd += ["--verbose"] + if project: + cmd += ["-p", project] -def add_default_env(k8s_client, cop): - cop.container.add_env_variable( - k8s_client.V1EnvVar( - "MLRUN_NAMESPACE", - value_from=k8s_client.V1EnvVarSource( - field_ref=k8s_client.V1ObjectFieldSelector( - field_path="metadata.namespace" - ) - ), - ) - ) + if models: + for m in models: + for key in ["key", "model_path", "model_url", "class_name", "model_url"]: + if key in m: + m[key] = str(m[key]) # verify we stringify pipeline params + if function.kind == mlrun.runtimes.RuntimeKinds.serving: + cmd += ["-m", json.dumps(m)] + else: + cmd += ["-m", f"{m['key']}={m['model_path']}"] - if config.httpdb.api_url: - cop.container.add_env_variable( - k8s_client.V1EnvVar(name="MLRUN_DBPATH", value=config.httpdb.api_url) - ) + if env: + for key, val in env.items(): + cmd += ["--env", f"{key}={val}"] - if config.mpijob_crd_version: - cop.container.add_env_variable( - k8s_client.V1EnvVar( - name="MLRUN_MPIJOB_CRD_VERSION", value=config.mpijob_crd_version - ) - ) + if func_url: + cmd += ["-f", func_url] + else: + runtime = f"{function.to_dict()}" + cmd += [runtime] - auth_env_var = mlrun.runtimes.constants.FunctionEnvironmentVariables.auth_session - if auth_env_var in os.environ or "V3IO_ACCESS_KEY" in os.environ: - cop.container.add_env_variable( - k8s_client.V1EnvVar( - name=auth_env_var, - value=os.environ.get(auth_env_var) or os.environ.get("V3IO_ACCESS_KEY"), - ) - ) + return generate_deployer_pipeline_node( + name, + function, + func_url, + cmd, + ) def get_default_reg(): @@ -677,79 +441,16 @@ def get_default_reg(): return "" -def add_annotations(cop, kind, function, func_url=None, project=None): - if func_url and func_url.startswith("db://"): - func_url = func_url[len("db://") :] - cop.add_pod_annotation(run_annotation, kind) - cop.add_pod_annotation(project_annotation, project or function.metadata.project) - cop.add_pod_annotation(function_annotation, func_url or function.uri) - - -def add_labels(cop, function, scrape_metrics=False): - prefix = mlrun.runtimes.utils.mlrun_key - cop.add_pod_label(prefix + "class", function.kind) - cop.add_pod_label(prefix + "function", function.metadata.name) - cop.add_pod_label(prefix + "name", cop.human_name) - cop.add_pod_label(prefix + "project", function.metadata.project) - cop.add_pod_label(prefix + "tag", function.metadata.tag or "latest") - cop.add_pod_label(prefix + "scrape-metrics", "True" if scrape_metrics else "False") - - -def generate_kfp_dag_and_resolve_project(run, project=None): - workflow = run.get("pipeline_runtime", {}).get("workflow_manifest") - if not workflow: - return None, project, None - workflow = json.loads(workflow) - - templates = {} - for template in workflow["spec"]["templates"]: - project = project or get_in( - template, ["metadata", "annotations", project_annotation], "" - ) - name = template["name"] - templates[name] = { - "run_type": get_in( - template, ["metadata", "annotations", run_annotation], "" - ), - "function": get_in( - template, ["metadata", "annotations", function_annotation], "" - ), - } - - nodes = workflow["status"].get("nodes", {}) - dag = {} - for node in nodes.values(): - name = node["displayName"] - record = { - k: node[k] for k in ["phase", "startedAt", "finishedAt", "type", "id"] - } - - # snake case - # align kfp fields to mlrun snake case convention - # create snake_case for consistency. - # retain the camelCase for compatibility - for key in list(record.keys()): - record[inflection.underscore(key)] = record[key] - - record["parent"] = node.get("boundaryID", "") - record["name"] = name - record["children"] = node.get("children", []) - if name in templates: - record["function"] = templates[name].get("function") - record["run_type"] = templates[name].get("run_type") - dag[node["id"]] = record - - return dag, project, workflow["status"].get("message", "") - - def format_summary_from_kfp_run( kfp_run, project=None, run_db: "mlrun.db.RunDBInterface" = None ): + from mlrun_pipelines.ops import generate_kfp_dag_and_resolve_project + override_project = project if project and project != "*" else None dag, project, message = generate_kfp_dag_and_resolve_project( kfp_run, override_project ) - run_id = get_in(kfp_run, "run.id") + run_id = kfp_run.id logger.debug("Formatting summary from KFP run", run_id=run_id, project=project) # run db parameter allows us to use the same db session for the whole flow and avoid session isolation issues @@ -770,7 +471,7 @@ def format_summary_from_kfp_run( short_run = { "graph": dag, - "run": mlrun.utils.helpers.format_run(kfp_run["run"]), + "run": mlrun.utils.helpers.format_run(kfp_run), } short_run["run"]["project"] = project short_run["run"]["message"] = message @@ -780,9 +481,9 @@ def format_summary_from_kfp_run( def show_kfp_run(run, clear_output=False): phase_to_color = { - mlrun.run.RunStatuses.failed: "red", - mlrun.run.RunStatuses.succeeded: "green", - mlrun.run.RunStatuses.skipped: "white", + mlrun_pipelines.common.models.RunStatuses.failed: "red", + mlrun_pipelines.common.models.RunStatuses.succeeded: "green", + mlrun_pipelines.common.models.RunStatuses.skipped: "white", } runtype_to_shape = { PipelineRunType.run: "ellipse", @@ -838,31 +539,118 @@ def show_kfp_run(run, clear_output=False): logger.warning(f"failed to plot graph, {err_to_str(exc)}") -def add_default_function_resources( - container_op: dsl.ContainerOp, -) -> dsl.ContainerOp: - default_resources = config.get_default_function_pod_resources() - for resource_name, resource_value in default_resources["requests"].items(): - if resource_value: - container_op.container.add_resource_request(resource_name, resource_value) +def is_num(v): + return isinstance(v, (int, float, complex)) - for resource_name, resource_value in default_resources["limits"].items(): - if resource_value: - container_op.container.add_resource_limit(resource_name, resource_value) - return container_op +def write_kfpmeta(struct): + if "status" not in struct: + return -def add_function_node_selection_attributes( - function, container_op: dsl.ContainerOp -) -> dsl.ContainerOp: - if not mlrun.runtimes.RuntimeKinds.is_local_runtime(function.kind): - if getattr(function.spec, "node_selector"): - container_op.node_selector = function.spec.node_selector + results = struct["status"].get("results", {}) + metrics = { + "metrics": [ + {"name": k, "numberValue": v} for k, v in results.items() if is_num(v) + ], + } + with open(os.path.join(KFPMETA_DIR, "mlpipeline-metrics.json"), "w") as f: + json.dump(metrics, f) - if getattr(function.spec, "tolerations"): - container_op.tolerations = function.spec.tolerations + struct = deepcopy(struct) + uid = struct["metadata"].get("uid") + project = struct["metadata"].get("project", config.default_project) + output_artifacts, out_dict = get_kfp_outputs( + struct["status"].get(run_keys.artifacts, []), + struct["metadata"].get("labels", {}), + project, + ) + + # /tmp/run_id + results["run_id"] = results.get("run_id", "/".join([project, uid])) + for key in struct["spec"].get(run_keys.outputs, []): + val = "None" + if key in out_dict: + val = out_dict[key] + elif key in results: + val = results[key] + try: + # NOTE: if key has "../x", it would fail on path traversal + path = os.path.join(KFP_ARTIFACTS_DIR, key) + if not mlrun.utils.helpers.is_safe_path(KFP_ARTIFACTS_DIR, path): + logger.warning( + "Path traversal is not allowed ignoring", path=path, key=key + ) + continue + path = os.path.abspath(path) + logger.info("Writing artifact output", path=path, val=val) + with open(path, "w") as fp: + fp.write(str(val)) + except Exception as exc: + logger.warning("Failed writing to temp file. Ignoring", exc=err_to_str(exc)) + pass + + text = "# Run Report\n" + if "iterations" in struct["status"]: + del struct["status"]["iterations"] + + text += "## Metadata\n```yaml\n" + dict_to_yaml(struct) + "```\n" + + metadata = {"outputs": [{"type": "markdown", "storage": "inline", "source": text}]} + with open(os.path.join(KFPMETA_DIR, "mlpipeline-ui-metadata.json"), "w") as f: + json.dump(metadata, f) - if getattr(function.spec, "affinity"): - container_op.affinity = function.spec.affinity - return container_op +def get_kfp_outputs(artifacts, labels, project): + outputs = [] + out_dict = {} + for output in artifacts: + if is_legacy_artifact(output): + key = output["key"] + # The spec in a legacy artifact is contained in the main object, so using this assignment saves us a lot + # of if/else in the rest of this function. + output_spec = output + else: + key = output.get("metadata")["key"] + output_spec = output.get("spec", {}) + + target = output_spec.get("target_path", "") + target = output_spec.get("inline", target) + + out_dict[key] = get_artifact_target(output, project=project) + + if target.startswith("v3io:///"): + target = target.replace("v3io:///", "http://v3io-webapi:8081/") + + user = labels.get("v3io_user", "") or os.environ.get("V3IO_USERNAME", "") + if target.startswith("/User/"): + user = user or "admin" + target = "http://v3io-webapi:8081/users/" + user + target[5:] + + viewer = output_spec.get("viewer", "") + if viewer in ["web-app", "chart"]: + meta = {"type": "web-app", "source": target} + outputs += [meta] + + elif viewer == "table": + header = output_spec.get("header", None) + if header and target.endswith(".csv"): + meta = { + "type": "table", + "format": "csv", + "header": header, + "source": target, + } + outputs += [meta] + + elif output.get("kind") == "dataset": + header = output_spec.get("header") + preview = output_spec.get("preview") + if preview: + tbl_md = gen_md_table(header, preview) + text = f"## Dataset: {key} \n\n" + tbl_md + del output_spec["preview"] + + meta = {"type": "markdown", "storage": "inline", "source": text} + outputs += [meta] + + return outputs, out_dict diff --git a/pipeline-adapters/mlrun-pipelines-kfp-v1-8/setup.py b/pipeline-adapters/mlrun-pipelines-kfp-v1-8/setup.py new file mode 100644 index 00000000000..9783092adee --- /dev/null +++ b/pipeline-adapters/mlrun-pipelines-kfp-v1-8/setup.py @@ -0,0 +1,45 @@ +# Copyright 2024 Iguazio +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from setuptools import find_namespace_packages, setup + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger("mlrun-kfp-setup") + +setup( + name="mlrun-pipelines-kfp-v1-8-experiment", + version="0.1.3", + description="MLRun Pipelines package for providing KFP 1.8 compatibility", + author="Yaron Haviv", + author_email="yaronh@iguazio.com", + license="Apache License 2.0", + url="https://github.com/mlrun/mlrun", + packages=find_namespace_packages( + where="src/", + include=[ + "mlrun_pipelines", + ], + ), + package_dir={"": "src"}, + keywords=[ + "mlrun", + "kfp", + ], + python_requires=">=3.9, <3.12", + install_requires=[ + "kfp~=1.8", + ], +) diff --git a/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/helpers.py b/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/helpers.py new file mode 100644 index 00000000000..9da06df8ce5 --- /dev/null +++ b/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/helpers.py @@ -0,0 +1,46 @@ +# Copyright 2024 Iguazio +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import typing + +from mlrun.config import config + + +def new_pipe_metadata( + artifact_path: str = None, + cleanup_ttl: int = None, + op_transformers: list[typing.Callable] = None, +): + from kfp.dsl import PipelineConf + + def _set_artifact_path(task): + from kubernetes import client as k8s_client + + task.add_env_variable( + k8s_client.V1EnvVar(name="MLRUN_ARTIFACT_PATH", value=artifact_path) + ) + return task + + conf = PipelineConf() + cleanup_ttl = cleanup_ttl or int(config.kfp_ttl) + + if cleanup_ttl: + conf.set_ttl_seconds_after_finished(cleanup_ttl) + if artifact_path: + conf.add_op_transformer(_set_artifact_path) + if op_transformers: + for op_transformer in op_transformers: + conf.add_op_transformer(op_transformer) + return conf diff --git a/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/mixins.py b/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/mixins.py new file mode 100644 index 00000000000..607829a49ca --- /dev/null +++ b/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/mixins.py @@ -0,0 +1,93 @@ +# Copyright 2024 Iguazio +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import kfp +from mlrun_pipelines.common.helpers import PROJECT_ANNOTATION +from mlrun_pipelines.utils import apply_kfp + +import mlrun + + +class KfpAdapterMixin: + def apply(self, modify): + """ + Apply a modifier to the runtime which is used to change the runtimes k8s object's spec. + Modifiers can be either KFP modifiers or MLRun modifiers (which are compatible with KFP). All modifiers accept + a `kfp.dsl.ContainerOp` object, apply some changes on its spec and return it so modifiers can be chained + one after the other. + + :param modify: a modifier runnable object + :return: the runtime (self) after the modifications + """ + + # Kubeflow pipeline have a hook to add the component to the DAG on ContainerOp init + # we remove the hook to suppress kubeflow op registration and return it after the apply() + old_op_handler = kfp.dsl._container_op._register_op_handler + kfp.dsl._container_op._register_op_handler = lambda x: self.metadata.name + cop = kfp.dsl.ContainerOp("name", "image") + kfp.dsl._container_op._register_op_handler = old_op_handler + + return apply_kfp(modify, cop, self) + + +class PipelineProviderMixin: + def resolve_project_from_workflow_manifest(self, workflow_manifest): + templates = workflow_manifest.get("spec", {}).get("templates", []) + for template in templates: + project_from_annotation = ( + template.get("metadata", {}) + .get("annotations", {}) + .get(PROJECT_ANNOTATION) + ) + if project_from_annotation: + return project_from_annotation + command = template.get("container", {}).get("command", []) + action = None + for index, argument in enumerate(command): + if argument == "mlrun" and index + 1 < len(command): + action = command[index + 1] + break + if action: + if action == "deploy": + project = self._resolve_project_from_command( + command, + hyphen_p_is_also_project=True, + has_func_url_flags=True, + has_runtime_flags=False, + ) + if project: + return project + elif action == "run": + project = self._resolve_project_from_command( + command, + hyphen_p_is_also_project=False, + has_func_url_flags=True, + has_runtime_flags=True, + ) + if project: + return project + elif action == "build": + project = self._resolve_project_from_command( + command, + hyphen_p_is_also_project=False, + has_func_url_flags=False, + has_runtime_flags=True, + ) + if project: + return project + else: + raise NotImplementedError(f"Unknown action: {action}") + + return mlrun.mlconf.default_project diff --git a/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/models.py b/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/models.py new file mode 100644 index 00000000000..0cc61bc715b --- /dev/null +++ b/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/models.py @@ -0,0 +1,113 @@ +# Copyright 2024 Iguazio +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import json +from typing import Any, Union + +from kfp.dsl import ContainerOp +from kfp_server_api.models.api_run_detail import ApiRunDetail +from mlrun_pipelines.common.helpers import FlexibleMapper + +# class pointer for type checking on the main MLRun codebase +PipelineNodeWrapper = ContainerOp + + +class PipelineManifest(FlexibleMapper): + def __init__( + self, workflow_manifest: Union[str, dict] = "{}", pipeline_manifest: str = "{}" + ): + try: + main_manifest = json.loads(workflow_manifest) + except TypeError: + main_manifest = workflow_manifest + if pipeline_manifest: + pipeline_manifest = json.loads(pipeline_manifest) + main_manifest["status"] = pipeline_manifest.get("status", {}) + super().__init__(main_manifest) + + +class PipelineRun(FlexibleMapper): + _workflow_manifest: PipelineManifest + + def __init__(self, external_data: Any): + if isinstance(external_data, ApiRunDetail): + super().__init__(external_data.run) + self._workflow_manifest = PipelineManifest( + self._external_data.get("pipeline_spec", {}).get("workflow_manifest"), + external_data.pipeline_runtime.workflow_manifest, + ) + else: + super().__init__(external_data) + pipeline_spec = self._external_data.get("pipeline_spec", None) or {} + workflow_manifest = pipeline_spec.get("workflow_manifest", None) or {} + self._workflow_manifest = PipelineManifest(workflow_manifest) + + @property + def id(self): + return self._external_data["id"] + + @property + def name(self): + return self._external_data["name"] + + @name.setter + def name(self, name): + self._external_data["name"] = name + + @property + def status(self): + return self._external_data["status"] + + @status.setter + def status(self, status): + self._external_data["status"] = status + + @property + def description(self): + return self._external_data["description"] + + @description.setter + def description(self, description): + self._external_data["description"] = description + + @property + def created_at(self): + return self._external_data["created_at"] + + @created_at.setter + def created_at(self, created_at): + self._external_data["created_at"] = created_at + + @property + def scheduled_at(self): + return self._external_data["scheduled_at"] + + @scheduled_at.setter + def scheduled_at(self, scheduled_at): + self._external_data["scheduled_at"] = scheduled_at + + @property + def finished_at(self): + return self._external_data["finished_at"] + + @finished_at.setter + def finished_at(self, finished_at): + self._external_data["finished_at"] = finished_at + + +class PipelineExperiment(FlexibleMapper): + @property + def id(self): + return self._external_data["id"] diff --git a/mlrun/platforms/other.py b/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/mounts.py similarity index 65% rename from mlrun/platforms/other.py rename to pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/mounts.py index bfb73cd0300..8a6318cb14d 100644 --- a/mlrun/platforms/other.py +++ b/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/mounts.py @@ -1,4 +1,4 @@ -# Copyright 2023 Iguazio +# Copyright 2024 Iguazio # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,18 +12,284 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# this file is based on the code from kubeflow pipelines git import os -from typing import Dict import kfp.dsl +import semver +from mlrun_pipelines.common.mounts import _enrich_and_validate_v3io_mounts -import mlrun from mlrun.config import config +from mlrun.config import config as mlconf from mlrun.errors import MLRunInvalidArgumentError -from mlrun.utils.helpers import logger +from mlrun.platforms.iguazio import v3io_to_vol +from mlrun.utils import logger -from .iguazio import mount_v3io + +def v3io_cred(api="", user="", access_key=""): + """ + Modifier function to copy local v3io env vars to container + + Usage:: + + train = train_op(...) + train.apply(use_v3io_cred()) + """ + + def _use_v3io_cred(container_op: kfp.dsl.ContainerOp): + from os import environ + + from kubernetes import client as k8s_client + + web_api = api or environ.get("V3IO_API") or mlconf.v3io_api + _user = user or environ.get("V3IO_USERNAME") + _access_key = access_key or environ.get("V3IO_ACCESS_KEY") + v3io_framesd = mlconf.v3io_framesd or environ.get("V3IO_FRAMESD") + + return ( + container_op.container.add_env_variable( + k8s_client.V1EnvVar(name="V3IO_API", value=web_api) + ) + .add_env_variable(k8s_client.V1EnvVar(name="V3IO_USERNAME", value=_user)) + .add_env_variable( + k8s_client.V1EnvVar(name="V3IO_ACCESS_KEY", value=_access_key) + ) + .add_env_variable( + k8s_client.V1EnvVar(name="V3IO_FRAMESD", value=v3io_framesd) + ) + ) + + return _use_v3io_cred + + +def mount_v3io( + name="v3io", + remote="", + access_key="", + user="", + secret=None, + volume_mounts=None, +): + """Modifier function to apply to a Container Op to volume mount a v3io path + + :param name: the volume name + :param remote: the v3io path to use for the volume. ~/ prefix will be replaced with /users// + :param access_key: the access key used to auth against v3io. if not given V3IO_ACCESS_KEY env var will be used + :param user: the username used to auth against v3io. if not given V3IO_USERNAME env var will be used + :param secret: k8s secret name which would be used to get the username and access key to auth against v3io. + :param volume_mounts: list of VolumeMount. empty volume mounts & remote will default to mount /v3io & /User. + """ + volume_mounts, user = _enrich_and_validate_v3io_mounts( + remote=remote, + volume_mounts=volume_mounts, + user=user, + ) + + def _attach_volume_mounts_and_creds(container_op: kfp.dsl.ContainerOp): + from kubernetes import client as k8s_client + + vol = v3io_to_vol(name, remote, access_key, user, secret=secret) + container_op.add_volume(vol) + for volume_mount in volume_mounts: + container_op.container.add_volume_mount( + k8s_client.V1VolumeMount( + mount_path=volume_mount.path, + sub_path=volume_mount.sub_path, + name=name, + ) + ) + + if not secret: + container_op = v3io_cred(access_key=access_key, user=user)(container_op) + return container_op + + return _attach_volume_mounts_and_creds + + +def mount_spark_conf(): + def _mount_spark(container_op: kfp.dsl.ContainerOp): + from kubernetes import client as k8s_client + + container_op.container.add_volume_mount( + k8s_client.V1VolumeMount( + name="spark-master-config", mount_path="/etc/config/spark" + ) + ) + return container_op + + return _mount_spark + + +def mount_v3iod(namespace, v3io_config_configmap): + def _mount_v3iod(container_op: kfp.dsl.ContainerOp): + from kubernetes import client as k8s_client + + def add_vol(name, mount_path, host_path): + vol = k8s_client.V1Volume( + name=name, + host_path=k8s_client.V1HostPathVolumeSource(path=host_path, type=""), + ) + container_op.add_volume(vol) + container_op.container.add_volume_mount( + k8s_client.V1VolumeMount(mount_path=mount_path, name=name) + ) + + # this is a legacy path for the daemon shared memory + host_path = "/dev/shm/" + + # path to shared memory for daemon was changed in Iguazio 3.2.3-b1 + igz_version = mlconf.get_parsed_igz_version() + if igz_version and igz_version >= semver.VersionInfo.parse("3.2.3-b1"): + host_path = "/var/run/iguazio/dayman-shm/" + add_vol(name="shm", mount_path="/dev/shm", host_path=host_path + namespace) + + add_vol( + name="v3iod-comm", + mount_path="/var/run/iguazio/dayman", + host_path="/var/run/iguazio/dayman/" + namespace, + ) + + vol = k8s_client.V1Volume( + name="daemon-health", empty_dir=k8s_client.V1EmptyDirVolumeSource() + ) + container_op.add_volume(vol) + container_op.container.add_volume_mount( + k8s_client.V1VolumeMount( + mount_path="/var/run/iguazio/daemon_health", name="daemon-health" + ) + ) + + vol = k8s_client.V1Volume( + name="v3io-config", + config_map=k8s_client.V1ConfigMapVolumeSource( + name=v3io_config_configmap, default_mode=420 + ), + ) + container_op.add_volume(vol) + container_op.container.add_volume_mount( + k8s_client.V1VolumeMount(mount_path="/etc/config/v3io", name="v3io-config") + ) + + container_op.container.add_env_variable( + k8s_client.V1EnvVar( + name="CURRENT_NODE_IP", + value_from=k8s_client.V1EnvVarSource( + field_ref=k8s_client.V1ObjectFieldSelector( + api_version="v1", field_path="status.hostIP" + ) + ), + ) + ) + container_op.container.add_env_variable( + k8s_client.V1EnvVar( + name="IGZ_DATA_CONFIG_FILE", value="/igz/java/conf/v3io.conf" + ) + ) + + return container_op + + return _mount_v3iod + + +def mount_s3( + secret_name=None, + aws_access_key="", + aws_secret_key="", + endpoint_url=None, + prefix="", + aws_region=None, + non_anonymous=False, +): + """Modifier function to add s3 env vars or secrets to container + + **Warning:** + Using this function to configure AWS credentials will expose these credentials in the pod spec of the runtime + created. It is recommended to use the `secret_name` parameter, or set the credentials as project-secrets and avoid + using this function. + + :param secret_name: kubernetes secret name (storing the access/secret keys) + :param aws_access_key: AWS_ACCESS_KEY_ID value. If this parameter is not specified and AWS_ACCESS_KEY_ID env. + variable is defined, the value will be taken from the env. variable + :param aws_secret_key: AWS_SECRET_ACCESS_KEY value. If this parameter is not specified and AWS_SECRET_ACCESS_KEY + env. variable is defined, the value will be taken from the env. variable + :param endpoint_url: s3 endpoint address (for non AWS s3) + :param prefix: string prefix to add before the env var name (for working with multiple s3 data stores) + :param aws_region: amazon region + :param non_anonymous: force the S3 API to use non-anonymous connection, even if no credentials are provided + (for authenticating externally, such as through IAM instance-roles) + """ + + if secret_name and (aws_access_key or aws_secret_key): + raise MLRunInvalidArgumentError( + "can use k8s_secret for credentials or specify them (aws_access_key, aws_secret_key) not both" + ) + + if not secret_name and ( + aws_access_key + or os.environ.get(prefix + "AWS_ACCESS_KEY_ID") + or aws_secret_key + or os.environ.get(prefix + "AWS_SECRET_ACCESS_KEY") + ): + logger.warning( + "it is recommended to use k8s secret (specify secret_name), " + "specifying the aws_access_key/aws_secret_key directly is unsafe" + ) + + def _use_s3_cred(container_op): + from os import environ + + from kubernetes import client as k8s_client + + _access_key = aws_access_key or environ.get(prefix + "AWS_ACCESS_KEY_ID") + _secret_key = aws_secret_key or environ.get(prefix + "AWS_SECRET_ACCESS_KEY") + _endpoint_url = endpoint_url or environ.get(prefix + "S3_ENDPOINT_URL") + + container = container_op.container + if _endpoint_url: + container.add_env_variable( + k8s_client.V1EnvVar(name=prefix + "S3_ENDPOINT_URL", value=endpoint_url) + ) + if aws_region: + container.add_env_variable( + k8s_client.V1EnvVar(name=prefix + "AWS_REGION", value=aws_region) + ) + if non_anonymous: + container.add_env_variable( + k8s_client.V1EnvVar(name=prefix + "S3_NON_ANONYMOUS", value="true") + ) + + if secret_name: + container.add_env_variable( + k8s_client.V1EnvVar( + name=prefix + "AWS_ACCESS_KEY_ID", + value_from=k8s_client.V1EnvVarSource( + secret_key_ref=k8s_client.V1SecretKeySelector( + name=secret_name, key="AWS_ACCESS_KEY_ID" + ) + ), + ) + ).add_env_variable( + k8s_client.V1EnvVar( + name=prefix + "AWS_SECRET_ACCESS_KEY", + value_from=k8s_client.V1EnvVarSource( + secret_key_ref=k8s_client.V1SecretKeySelector( + name=secret_name, key="AWS_SECRET_ACCESS_KEY" + ) + ), + ) + ) + + else: + return container_op.add_env_variable( + k8s_client.V1EnvVar( + name=prefix + "AWS_ACCESS_KEY_ID", value=_access_key + ) + ).add_env_variable( + k8s_client.V1EnvVar( + name=prefix + "AWS_SECRET_ACCESS_KEY", value=_secret_key + ) + ) + + return _use_s3_cred def mount_pvc(pvc_name=None, volume_name="pipeline", volume_mount_path="/mnt/pipeline"): @@ -172,109 +438,7 @@ def _mount_hostpath(task): return _mount_hostpath -def mount_s3( - secret_name=None, - aws_access_key="", - aws_secret_key="", - endpoint_url=None, - prefix="", - aws_region=None, - non_anonymous=False, -): - """Modifier function to add s3 env vars or secrets to container - - **Warning:** - Using this function to configure AWS credentials will expose these credentials in the pod spec of the runtime - created. It is recommended to use the `secret_name` parameter, or set the credentials as project-secrets and avoid - using this function. - - :param secret_name: kubernetes secret name (storing the access/secret keys) - :param aws_access_key: AWS_ACCESS_KEY_ID value. If this parameter is not specified and AWS_ACCESS_KEY_ID env. - variable is defined, the value will be taken from the env. variable - :param aws_secret_key: AWS_SECRET_ACCESS_KEY value. If this parameter is not specified and AWS_SECRET_ACCESS_KEY - env. variable is defined, the value will be taken from the env. variable - :param endpoint_url: s3 endpoint address (for non AWS s3) - :param prefix: string prefix to add before the env var name (for working with multiple s3 data stores) - :param aws_region: amazon region - :param non_anonymous: force the S3 API to use non-anonymous connection, even if no credentials are provided - (for authenticating externally, such as through IAM instance-roles) - """ - - if secret_name and (aws_access_key or aws_secret_key): - raise mlrun.errors.MLRunInvalidArgumentError( - "can use k8s_secret for credentials or specify them (aws_access_key, aws_secret_key) not both" - ) - - if not secret_name and ( - aws_access_key - or os.environ.get(prefix + "AWS_ACCESS_KEY_ID") - or aws_secret_key - or os.environ.get(prefix + "AWS_SECRET_ACCESS_KEY") - ): - logger.warning( - "it is recommended to use k8s secret (specify secret_name), " - "specifying the aws_access_key/aws_secret_key directly is unsafe" - ) - - def _use_s3_cred(container_op): - from os import environ - - from kubernetes import client as k8s_client - - _access_key = aws_access_key or environ.get(prefix + "AWS_ACCESS_KEY_ID") - _secret_key = aws_secret_key or environ.get(prefix + "AWS_SECRET_ACCESS_KEY") - _endpoint_url = endpoint_url or environ.get(prefix + "S3_ENDPOINT_URL") - - container = container_op.container - if _endpoint_url: - container.add_env_variable( - k8s_client.V1EnvVar(name=prefix + "S3_ENDPOINT_URL", value=endpoint_url) - ) - if aws_region: - container.add_env_variable( - k8s_client.V1EnvVar(name=prefix + "AWS_REGION", value=aws_region) - ) - if non_anonymous: - container.add_env_variable( - k8s_client.V1EnvVar(name=prefix + "S3_NON_ANONYMOUS", value="true") - ) - - if secret_name: - container.add_env_variable( - k8s_client.V1EnvVar( - name=prefix + "AWS_ACCESS_KEY_ID", - value_from=k8s_client.V1EnvVarSource( - secret_key_ref=k8s_client.V1SecretKeySelector( - name=secret_name, key="AWS_ACCESS_KEY_ID" - ) - ), - ) - ).add_env_variable( - k8s_client.V1EnvVar( - name=prefix + "AWS_SECRET_ACCESS_KEY", - value_from=k8s_client.V1EnvVarSource( - secret_key_ref=k8s_client.V1SecretKeySelector( - name=secret_name, key="AWS_SECRET_ACCESS_KEY" - ) - ), - ) - ) - - else: - return container_op.add_env_variable( - k8s_client.V1EnvVar( - name=prefix + "AWS_ACCESS_KEY_ID", value=_access_key - ) - ).add_env_variable( - k8s_client.V1EnvVar( - name=prefix + "AWS_SECRET_ACCESS_KEY", value=_secret_key - ) - ) - - return _use_s3_cred - - -def set_env_variables(env_vars_dict: Dict[str, str] = None, **kwargs): +def set_env_variables(env_vars_dict: dict[str, str] = None, **kwargs): """ Modifier function to apply a set of environment variables to a runtime. Variables may be passed as either a dictionary of name-value pairs, or as arguments to the function. diff --git a/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/ops.py b/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/ops.py new file mode 100644 index 00000000000..964b4bd3c46 --- /dev/null +++ b/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/ops.py @@ -0,0 +1,290 @@ +# Copyright 2024 Iguazio +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import os.path + +import inflection +from kfp import dsl +from kubernetes import client as k8s_client +from mlrun_pipelines.common.helpers import ( + FUNCTION_ANNOTATION, + PROJECT_ANNOTATION, + RUN_ANNOTATION, +) +from mlrun_pipelines.common.ops import KFPMETA_DIR, PipelineRunType + +import mlrun +from mlrun.config import config +from mlrun.utils import get_in + +dsl.ContainerOp._DISABLE_REUSABLE_COMPONENT_WARNING = True + + +def generate_deployer_pipeline_node( + name, + function, + func_url=None, + cmd=None, +): + cop = dsl.ContainerOp( + name=name, + image=config.kfp_image, + command=cmd, + file_outputs={"endpoint": "/tmp/output", "name": "/tmp/name"}, + ) + cop = add_default_function_resources(cop) + cop = add_function_node_selection_attributes(container_op=cop, function=function) + + add_annotations(cop, PipelineRunType.deploy, function, func_url) + add_default_env(k8s_client, cop) + return cop + + +def add_env(env=None): + """ + Modifier function to add env vars from dict + Usage: + train = train_op(...) + train.apply(add_env({'MY_ENV':'123'})) + """ + + env = {} if env is None else env + + def _add_env(task): + for k, v in env.items(): + task.add_env_variable(k8s_client.V1EnvVar(name=k, value=v)) + return task + + return _add_env + + +def generate_image_builder_pipeline_node( + name, + function=None, + func_url=None, + cmd=None, +): + cop = dsl.ContainerOp( + name=name, + image=config.kfp_image, + command=cmd, + file_outputs={"state": "/tmp/state", "image": "/tmp/image"}, + ) + cop = add_default_function_resources(cop) + cop = add_function_node_selection_attributes(container_op=cop, function=function) + + add_annotations(cop, PipelineRunType.build, function, func_url) + if config.httpdb.builder.docker_registry: + cop.container.add_env_variable( + k8s_client.V1EnvVar( + name="MLRUN_HTTPDB__BUILDER__DOCKER_REGISTRY", + value=config.httpdb.builder.docker_registry, + ) + ) + if "IGZ_NAMESPACE_DOMAIN" in os.environ: + cop.container.add_env_variable( + k8s_client.V1EnvVar( + name="IGZ_NAMESPACE_DOMAIN", + value=os.environ.get("IGZ_NAMESPACE_DOMAIN"), + ) + ) + + is_v3io = function.spec.build.source and function.spec.build.source.startswith( + "v3io" + ) + if "V3IO_ACCESS_KEY" in os.environ and is_v3io: + cop.container.add_env_variable( + k8s_client.V1EnvVar( + name="V3IO_ACCESS_KEY", value=os.environ.get("V3IO_ACCESS_KEY") + ) + ) + + add_default_env(k8s_client, cop) + + return cop + + +def generate_pipeline_node( + project_name: str, + name: str, + image: str, + command: list, + file_outputs: dict, + function, + func_url: str, + scrape_metrics: bool, + code_env: str, + registry: str, +): + cop = dsl.ContainerOp( + name=name, + image=image, + command=command, + file_outputs=file_outputs, + output_artifact_paths={ + "mlpipeline-ui-metadata": os.path.join( + KFPMETA_DIR, "mlpipeline-ui-metadata.json" + ), + "mlpipeline-metrics": os.path.join(KFPMETA_DIR, "mlpipeline-metrics.json"), + }, + ) + cop = add_default_function_resources(cop) + cop = add_function_node_selection_attributes(container_op=cop, function=function) + + add_annotations(cop, PipelineRunType.run, function, func_url, project_name) + add_labels(cop, function, scrape_metrics) + if code_env: + cop.container.add_env_variable( + k8s_client.V1EnvVar(name="MLRUN_EXEC_CODE", value=code_env) + ) + if registry: + cop.container.add_env_variable( + k8s_client.V1EnvVar( + name="MLRUN_HTTPDB__BUILDER__DOCKER_REGISTRY", value=registry + ) + ) + + add_default_env(k8s_client, cop) + + return cop + + +def add_default_env(k8s_client, cop): + cop.container.add_env_variable( + k8s_client.V1EnvVar( + "MLRUN_NAMESPACE", + value_from=k8s_client.V1EnvVarSource( + field_ref=k8s_client.V1ObjectFieldSelector( + field_path="metadata.namespace" + ) + ), + ) + ) + + if config.httpdb.api_url: + cop.container.add_env_variable( + k8s_client.V1EnvVar(name="MLRUN_DBPATH", value=config.httpdb.api_url) + ) + + if config.mpijob_crd_version: + cop.container.add_env_variable( + k8s_client.V1EnvVar( + name="MLRUN_MPIJOB_CRD_VERSION", value=config.mpijob_crd_version + ) + ) + + auth_env_var = mlrun.runtimes.constants.FunctionEnvironmentVariables.auth_session + if auth_env_var in os.environ or "V3IO_ACCESS_KEY" in os.environ: + cop.container.add_env_variable( + k8s_client.V1EnvVar( + name=auth_env_var, + value=os.environ.get(auth_env_var) or os.environ.get("V3IO_ACCESS_KEY"), + ) + ) + + +def add_annotations(cop, kind, function, func_url=None, project=None): + if func_url and func_url.startswith("db://"): + func_url = func_url[len("db://") :] + cop.add_pod_annotation(RUN_ANNOTATION, kind) + cop.add_pod_annotation(PROJECT_ANNOTATION, project or function.metadata.project) + cop.add_pod_annotation(FUNCTION_ANNOTATION, func_url or function.uri) + + +def add_labels(cop, function, scrape_metrics=False): + prefix = mlrun.runtimes.utils.mlrun_key + cop.add_pod_label(prefix + "class", function.kind) + cop.add_pod_label(prefix + "function", function.metadata.name) + cop.add_pod_label(prefix + "name", cop.human_name) + cop.add_pod_label(prefix + "project", function.metadata.project) + cop.add_pod_label(prefix + "tag", function.metadata.tag or "latest") + cop.add_pod_label(prefix + "scrape-metrics", "True" if scrape_metrics else "False") + + +def add_default_function_resources( + container_op: dsl.ContainerOp, +) -> dsl.ContainerOp: + default_resources = config.get_default_function_pod_resources() + for resource_name, resource_value in default_resources["requests"].items(): + if resource_value: + container_op.container.add_resource_request(resource_name, resource_value) + + for resource_name, resource_value in default_resources["limits"].items(): + if resource_value: + container_op.container.add_resource_limit(resource_name, resource_value) + return container_op + + +def add_function_node_selection_attributes( + function, container_op: dsl.ContainerOp +) -> dsl.ContainerOp: + if not mlrun.runtimes.RuntimeKinds.is_local_runtime(function.kind): + if getattr(function.spec, "node_selector"): + container_op.node_selector = function.spec.node_selector + + if getattr(function.spec, "tolerations"): + container_op.tolerations = function.spec.tolerations + + if getattr(function.spec, "affinity"): + container_op.affinity = function.spec.affinity + + return container_op + + +def generate_kfp_dag_and_resolve_project(run, project=None): + workflow = run._workflow_manifest + if not workflow: + return None, project, None + + templates = {} + for template in workflow["spec"]["templates"]: + project = project or get_in( + template, ["metadata", "annotations", PROJECT_ANNOTATION], "" + ) + name = template["name"] + templates[name] = { + "run_type": get_in( + template, ["metadata", "annotations", RUN_ANNOTATION], "" + ), + "function": get_in( + template, ["metadata", "annotations", FUNCTION_ANNOTATION], "" + ), + } + + nodes = workflow["status"].get("nodes", {}) + dag = {} + for node in nodes.values(): + name = node["displayName"] + record = { + k: node[k] for k in ["phase", "startedAt", "finishedAt", "type", "id"] + } + + # snake case + # align kfp fields to mlrun snake case convention + # create snake_case for consistency. + # retain the camelCase for compatibility + for key in list(record.keys()): + record[inflection.underscore(key)] = record[key] + + record["parent"] = node.get("boundaryID", "") + record["name"] = name + record["children"] = node.get("children", []) + if name in templates: + record["function"] = templates[name].get("function") + record["run_type"] = templates[name].get("run_type") + dag[node["id"]] = record + + return dag, project, workflow["status"].get("message", "") diff --git a/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/patcher.py b/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/patcher.py new file mode 100644 index 00000000000..2b3c6eef45e --- /dev/null +++ b/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/patcher.py @@ -0,0 +1,95 @@ +# Copyright 2024 Iguazio +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import typing + +import kfp.compiler +from kfp import dsl + +import mlrun +from mlrun.errors import err_to_str +from mlrun.utils import logger + + +# When we run pipelines, the kfp.compile.Compile.compile() method takes the decorated function with @dsl.pipeline and +# converts it to a k8s object. As part of the flow in the Compile.compile() method, +# we call _create_and_write_workflow, which builds a dictionary from the workflow and then writes it to a file. +# Unfortunately, the kfp sdk does not provide an API for configuring priority_class_name and other attributes. +# I ran across the following problem when seeking for a method to set the priority_class_name: +# https://github.com/kubeflow/pipelines/issues/3594 +# When we patch the _create_and_write_workflow, we can eventually obtain the dictionary right before we write it +# to a file and enrich it with argo compatible fields, make sure you looking for the same argo version we use +# https://github.com/argoproj/argo-workflows/blob/release-2.7/pkg/apis/workflow/v1alpha1/workflow_types.go +def _create_enriched_mlrun_workflow( + self, + pipeline_func: typing.Callable, + pipeline_name: typing.Optional[str] = None, + pipeline_description: typing.Optional[str] = None, + params_list: typing.Optional[list[dsl.PipelineParam]] = None, + pipeline_conf: typing.Optional[dsl.PipelineConf] = None, +): + """Call internal implementation of create_workflow and enrich with mlrun functions attributes""" + from mlrun import pipeline_context + from mlrun.projects.pipelines import ( + _enrich_kfp_pod_security_context, + _set_function_attribute_on_kfp_pod, + ) + + workflow = self._original_create_workflow( + pipeline_func, pipeline_name, pipeline_description, params_list, pipeline_conf + ) + # We don't want to interrupt the original flow and don't know all the scenarios the function could be called. + # that's why we have try/except on all the code of the enrichment and also specific try/except for errors that + # we know can be raised. + try: + functions = [] + if pipeline_context.functions: + try: + functions = pipeline_context.functions.values() + except Exception as err: + logger.debug( + "Unable to retrieve project functions, not enriching workflow with mlrun", + error=err_to_str(err), + ) + return workflow + + # enrich each pipeline step with your desire k8s attribute + for kfp_step_template in workflow["spec"]["templates"]: + if kfp_step_template.get("container"): + for function_obj in functions: + # we condition within each function since the comparison between the function and + # the kfp pod may change depending on the attribute type. + _set_function_attribute_on_kfp_pod( + kfp_step_template, + function_obj, + "PriorityClassName", + "priority_class_name", + ) + _enrich_kfp_pod_security_context( + kfp_step_template, + function_obj, + ) + except mlrun.errors.MLRunInvalidArgumentError: + raise + except Exception as err: + logger.debug( + "Something in the enrichment of kfp pods failed", error=err_to_str(err) + ) + return workflow + + +# patching function as class method +kfp.compiler.Compiler._original_create_workflow = kfp.compiler.Compiler._create_workflow +kfp.compiler.Compiler._create_workflow = _create_enriched_mlrun_workflow diff --git a/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/utils.py b/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/utils.py new file mode 100644 index 00000000000..926e5e0f78b --- /dev/null +++ b/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/utils.py @@ -0,0 +1,69 @@ +# Copyright 2024 Iguazio +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import tempfile + +import kfp +from kubernetes import client +from mlrun_pipelines.helpers import new_pipe_metadata + + +def apply_kfp(modify, cop, runtime): + modify(cop) + + # Have to do it here to avoid circular dependencies + from mlrun.runtimes.pod import AutoMountType + + if AutoMountType.is_auto_modifier(modify): + runtime.spec.disable_auto_mount = True + + api = client.ApiClient() + for k, v in cop.pod_labels.items(): + runtime.metadata.labels[k] = v + for k, v in cop.pod_annotations.items(): + runtime.metadata.annotations[k] = v + if cop.container.env: + env_names = [ + e.name if hasattr(e, "name") else e["name"] for e in runtime.spec.env + ] + for e in api.sanitize_for_serialization(cop.container.env): + name = e["name"] + if name in env_names: + runtime.spec.env[env_names.index(name)] = e + else: + runtime.spec.env.append(e) + env_names.append(name) + cop.container.env.clear() + + if cop.volumes and cop.container.volume_mounts: + vols = api.sanitize_for_serialization(cop.volumes) + mounts = api.sanitize_for_serialization(cop.container.volume_mounts) + runtime.spec.update_vols_and_mounts(vols, mounts) + cop.volumes.clear() + cop.container.volume_mounts.clear() + + return runtime + + +def compile_pipeline(artifact_path, cleanup_ttl, ops, pipeline): + pipe_file = tempfile.NamedTemporaryFile(suffix=".yaml", delete=False).name + conf = new_pipe_metadata( + artifact_path=artifact_path, + cleanup_ttl=cleanup_ttl, + op_transformers=ops, + ) + kfp.compiler.Compiler().compile( + pipeline, pipe_file, type_check=False, pipeline_conf=conf + ) + return pipe_file diff --git a/pyproject.toml b/pyproject.toml index fb0bbfcc850..5551152e960 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -59,7 +59,6 @@ forbidden_modules = [ "mlrun.execution", "mlrun.features", "mlrun.k8s_utils", - "mlrun.kfpops", "mlrun.lists", "mlrun.model", "mlrun.render", @@ -103,7 +102,6 @@ source_modules = [ "mlrun.execution", "mlrun.features", "mlrun.k8s_utils", - "mlrun.kfpops", "mlrun.lists", "mlrun.model", "mlrun.render", diff --git a/requirements.txt b/requirements.txt index 3e2870e7b16..9939b9b738b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,9 +4,6 @@ GitPython~=3.1, >=3.1.41 aiohttp~=3.9 aiohttp-retry~=2.8 click~=8.1 -# 3.0/3.2 iguazio system uses 1.0.1, but we needed >=1.6.0 to be compatible with k8s>=12.0 to fix scurity issue -# since the sdk is still mark as beta (and not stable) I'm limiting to only patch changes -kfp~=1.8 nest-asyncio~=1.0 ipython~=8.10 nuclio-jupyter~=0.9.15 @@ -41,3 +38,6 @@ deprecated~=1.2 jinja2~=3.1, >=3.1.3 anyio~=3.7 orjson~=3.9 +# mlrun pipeline adapters +mlrun-pipelines-kfp-common-experiment~=0.1.0 +mlrun-pipelines-kfp-v1-8-experiment~=0.1.0 diff --git a/server/api/api/endpoints/feature_store.py b/server/api/api/endpoints/feature_store.py index b56fc0f92ff..b1f356963f4 100644 --- a/server/api/api/endpoints/feature_store.py +++ b/server/api/api/endpoints/feature_store.py @@ -18,6 +18,7 @@ from fastapi import APIRouter, Depends, Header, Query, Response from fastapi.concurrency import run_in_threadpool +from mlrun_pipelines.mounts import v3io_cred from sqlalchemy.orm import Session import mlrun.common.schemas @@ -26,7 +27,6 @@ import server.api.crud import server.api.utils.auth.verifier import server.api.utils.singletons.project_member -from mlrun import v3io_cred from mlrun.data_types import InferOptions from mlrun.datastore.targets import get_default_prefix_for_target from mlrun.feature_store.api import RunConfig, ingest diff --git a/server/api/api/endpoints/pipelines.py b/server/api/api/endpoints/pipelines.py index 20d9046143d..3c8623dd4f2 100644 --- a/server/api/api/endpoints/pipelines.py +++ b/server/api/api/endpoints/pipelines.py @@ -20,6 +20,7 @@ import yaml from fastapi import APIRouter, Depends, Query, Request from fastapi.concurrency import run_in_threadpool +from mlrun_pipelines.models import PipelineManifest from sqlalchemy.orm import Session import mlrun.common.schemas @@ -258,5 +259,5 @@ def _try_resolve_project_from_body( return None workflow_manifest = yaml.load(data, Loader=yaml.FullLoader) return server.api.crud.Pipelines().resolve_project_from_workflow_manifest( - workflow_manifest + PipelineManifest(workflow_manifest) ) diff --git a/server/api/api/endpoints/workflows.py b/server/api/api/endpoints/workflows.py index 5c07c058e40..f5b5ab43faf 100644 --- a/server/api/api/endpoints/workflows.py +++ b/server/api/api/endpoints/workflows.py @@ -20,6 +20,7 @@ from typing import Dict, Optional import fastapi +import mlrun_pipelines.common.models from fastapi.concurrency import run_in_threadpool from sqlalchemy.orm import Session @@ -217,7 +218,7 @@ async def submit_workflow( workflow_request=updated_request, auth_info=auth_info, ) - status = mlrun.run.RunStatuses.running + status = mlrun_pipelines.common.models.RunStatuses.running run_uid = run.uid() except Exception as error: logger.error(traceback.format_exc()) @@ -231,7 +232,7 @@ async def submit_workflow( return mlrun.common.schemas.WorkflowResponse( project=project.metadata.name, name=workflow_spec.name, - status=status, + status=str(status), run_id=run_uid, schedule=workflow_spec.schedule, ) diff --git a/server/api/crud/model_monitoring/deployment.py b/server/api/crud/model_monitoring/deployment.py index b91428ed7a1..2d67ebdeeac 100644 --- a/server/api/crud/model_monitoring/deployment.py +++ b/server/api/crud/model_monitoring/deployment.py @@ -15,6 +15,7 @@ import pathlib import typing +import mlrun_pipelines.mounts import sqlalchemy.orm from fastapi import Depends @@ -726,7 +727,7 @@ def _apply_access_key_and_mount_function( ) function.metadata.credentials.access_key = model_monitoring_access_key - function.apply(mlrun.v3io_cred()) + function.apply(mlrun_pipelines.mounts.v3io_cred()) # Ensure that the auth env vars are set server.api.api.utils.ensure_function_has_auth_set(function, auth_info) diff --git a/server/api/crud/pipelines.py b/server/api/crud/pipelines.py index 8414ba03a95..fc4ed59e04d 100644 --- a/server/api/crud/pipelines.py +++ b/server/api/crud/pipelines.py @@ -14,20 +14,23 @@ # import ast import http -import json import tempfile import traceback import typing import kfp import kfp_server_api +import mlrun_pipelines +import mlrun_pipelines.common.helpers +import mlrun_pipelines.common.ops import sqlalchemy.orm +from mlrun_pipelines.mixins import PipelineProviderMixin +from mlrun_pipelines.models import PipelineExperiment, PipelineRun import mlrun import mlrun.common.helpers import mlrun.common.schemas import mlrun.errors -import mlrun.kfpops import mlrun.utils.helpers import mlrun.utils.singleton import server.api.api.utils @@ -36,6 +39,7 @@ class Pipelines( + PipelineProviderMixin, metaclass=mlrun.utils.singleton.Singleton, ): def list_pipelines( @@ -64,7 +68,7 @@ def list_pipelines( kfp_client = self.initialize_kfp_client(namespace) if project != "*": - run_dicts = [] + runs = [] while page_token is not None: # kfp doesn't allow us to pass both a page_token and the `filter` and `sort_by` params. # When we have a token from previous call, we will strip out the filter and use the token to continue @@ -75,13 +79,13 @@ def list_pipelines( sort_by=sort_by if page_token == "" else "", filter=filter_ if page_token == "" else "", ) - run_dicts.extend([run.to_dict() for run in response.runs or []]) + runs.extend([PipelineRun(run) for run in response.runs or []]) page_token = response.next_page_token project_runs = [] - for run_dict in run_dicts: - run_project = self.resolve_project_from_pipeline(run_dict) + for run in runs: + run_project = self.resolve_project_from_pipeline(run) if run_project == project: - project_runs.append(run_dict) + project_runs.append(run) runs = self._filter_runs_by_name(project_runs, name_contains) total_size = len(runs) next_page_token = None @@ -102,7 +106,7 @@ def list_pipelines( raise mlrun.errors.err_for_status_code( exc.status, err_to_str(error_message) ) from exc - runs = [run.to_dict() for run in response.runs or []] + runs = [PipelineRun(run) for run in response.runs or []] runs = self._filter_runs_by_name(runs, name_contains) next_page_token = response.next_page_token # In-memory filtering turns Kubeflow's counting inaccurate if there are multiple pages of data @@ -198,8 +202,8 @@ def get_pipeline( run = None try: api_run_detail = kfp_client.get_run(run_id) - if api_run_detail.run: - run = api_run_detail.to_dict()["run"] + run = PipelineRun(api_run_detail) + if run: if project and project != "*": run_project = self.resolve_project_from_pipeline(run) if run_project != project: @@ -214,9 +218,7 @@ def get_pipeline( project=project, format_=format_, ) - run = self._format_run( - db_session, run, format_, api_run_detail.to_dict() - ) + run = self._format_run(db_session, run, format_) except kfp_server_api.ApiException as exc: raise mlrun.errors.err_for_status_code(exc.status, err_to_str(exc)) from exc except mlrun.errors.MLRunHTTPStatusError: @@ -264,9 +266,13 @@ def create_pipeline( try: kfp_client = self.initialize_kfp_client(namespace) - experiment = kfp_client.create_experiment(name=experiment_name) - run = kfp_client.run_pipeline( - experiment.id, run_name, pipeline_file.name, params=arguments + experiment = PipelineExperiment( + kfp_client.create_experiment(name=experiment_name) + ) + run = PipelineRun( + kfp_client.run_pipeline( + experiment.id, run_name, pipeline_file.name, params=arguments + ) ) except Exception as exc: logger.warning( @@ -306,26 +312,21 @@ def _format_runs( def _format_run( self, db_session: sqlalchemy.orm.Session, - run: dict, + run: PipelineRun, format_: mlrun.common.schemas.PipelinesFormat = mlrun.common.schemas.PipelinesFormat.metadata_only, - api_run_detail: typing.Optional[dict] = None, ) -> dict: - run["project"] = self.resolve_project_from_pipeline(run) + run.project = self.resolve_project_from_pipeline(run) if format_ == mlrun.common.schemas.PipelinesFormat.full: - return run + return run.to_dict() elif format_ == mlrun.common.schemas.PipelinesFormat.metadata_only: return mlrun.utils.helpers.format_run(run, with_project=True) elif format_ == mlrun.common.schemas.PipelinesFormat.name_only: return run.get("name") elif format_ == mlrun.common.schemas.PipelinesFormat.summary: - if not api_run_detail: - raise mlrun.errors.MLRunRuntimeError( - "The full kfp api_run_detail object is needed to generate the summary format" - ) run_db = server.api.api.utils.get_run_db_instance(db_session) - return mlrun.kfpops.format_summary_from_kfp_run( - api_run_detail, run["project"], run_db=run_db + return mlrun_pipelines.common.ops.format_summary_from_kfp_run( + run, run["project"], run_db=run_db ) else: raise NotImplementedError( @@ -383,60 +384,8 @@ def _resolve_project_from_command( return None - def resolve_project_from_pipeline(self, pipeline): - workflow_manifest = json.loads( - pipeline.get("pipeline_spec", {}).get("workflow_manifest") or "{}" - ) - return self.resolve_project_from_workflow_manifest(workflow_manifest) - - def resolve_project_from_workflow_manifest(self, workflow_manifest): - templates = workflow_manifest.get("spec", {}).get("templates", []) - for template in templates: - project_from_annotation = ( - template.get("metadata", {}) - .get("annotations", {}) - .get(mlrun.kfpops.project_annotation) - ) - if project_from_annotation: - return project_from_annotation - command = template.get("container", {}).get("command", []) - action = None - for index, argument in enumerate(command): - if argument == "mlrun" and index + 1 < len(command): - action = command[index + 1] - break - if action: - if action == "deploy": - project = self._resolve_project_from_command( - command, - hyphen_p_is_also_project=True, - has_func_url_flags=True, - has_runtime_flags=False, - ) - if project: - return project - elif action == "run": - project = self._resolve_project_from_command( - command, - hyphen_p_is_also_project=False, - has_func_url_flags=True, - has_runtime_flags=True, - ) - if project: - return project - elif action == "build": - project = self._resolve_project_from_command( - command, - hyphen_p_is_also_project=False, - has_func_url_flags=False, - has_runtime_flags=True, - ) - if project: - return project - else: - raise NotImplementedError(f"Unknown action: {action}") - - return mlrun.mlconf.default_project + def resolve_project_from_pipeline(self, pipeline: PipelineRun): + return self.resolve_project_from_workflow_manifest(pipeline._workflow_manifest) @staticmethod def _get_experiment_id_from_run(run: dict) -> str: diff --git a/server/api/crud/projects.py b/server/api/crud/projects.py index 62e9e8430b3..8e701e6d0ca 100644 --- a/server/api/crud/projects.py +++ b/server/api/crud/projects.py @@ -19,6 +19,7 @@ import fastapi.concurrency import humanfriendly +import mlrun_pipelines.common.models import sqlalchemy.orm import mlrun.common.schemas @@ -400,7 +401,10 @@ async def _calculate_pipelines_counters( return collections.defaultdict(lambda: None) for pipeline in pipelines: - if pipeline["status"] not in mlrun.run.RunStatuses.stable_statuses(): + if ( + pipeline["status"] + not in mlrun_pipelines.common.models.RunStatuses.stable_statuses() + ): project_to_running_pipelines_count[pipeline["project"]] += 1 return project_to_running_pipelines_count diff --git a/server/api/crud/workflows.py b/server/api/crud/workflows.py index e1d1ca0e7a4..5def845ab61 100644 --- a/server/api/crud/workflows.py +++ b/server/api/crud/workflows.py @@ -16,6 +16,7 @@ import uuid from typing import Dict +import mlrun_pipelines.common.models from sqlalchemy.orm import Session import mlrun.common.schemas @@ -276,7 +277,8 @@ def get_workflow_id( if workflow_id is None: if ( engine == "local" - and state.casefold() == mlrun.run.RunStatuses.running.casefold() + and state.casefold() + == mlrun_pipelines.common.models.RunStatuses.running.casefold() ): workflow_id = "" else: diff --git a/tests/api/api/test_pipelines.py b/tests/api/api/test_pipelines.py index d70c127b90e..9bfe6e5db2d 100644 --- a/tests/api/api/test_pipelines.py +++ b/tests/api/api/test_pipelines.py @@ -24,6 +24,7 @@ import kfp_server_api.models import pytest import sqlalchemy.orm +from mlrun_pipelines.models import PipelineRun import mlrun.common.schemas import server.api.crud @@ -66,7 +67,7 @@ def test_list_pipelines_formats( mlrun.common.schemas.PipelinesFormat.name_only, ]: runs = _generate_list_runs_mocks() - expected_runs = [run.to_dict() for run in runs] + expected_runs = [PipelineRun(run.to_dict()) for run in runs] expected_runs = server.api.crud.Pipelines()._format_runs( db, expected_runs, format_ ) @@ -99,7 +100,9 @@ def test_get_pipeline_formats( params={"format": format_}, ) expected_run = server.api.crud.Pipelines()._format_run( - db, api_run_detail.to_dict()["run"], format_, api_run_detail.to_dict() + db, + PipelineRun(api_run_detail), + format_, ) _assert_get_pipeline_response(expected_run, response) @@ -154,7 +157,7 @@ def test_get_pipeline_specific_project( params={"format": format_}, ) expected_run = server.api.crud.Pipelines()._format_run( - db, api_run_detail.to_dict()["run"], format_, api_run_detail.to_dict() + db, PipelineRun(api_run_detail), format_ ) _assert_get_pipeline_response(expected_run, response) @@ -244,7 +247,7 @@ def test_list_pipelines_name_contains( ) expected_runs = server.api.crud.Pipelines()._format_runs( - db, [run.to_dict() for run in runs if run.id in expected_runs_ids] + db, [PipelineRun(run.to_dict()) for run in runs if run.id in expected_runs_ids] ) expected_response = mlrun.common.schemas.PipelinesOutput( runs=expected_runs, total_size=len(expected_runs), next_page_token=None @@ -310,6 +313,7 @@ def _generate_get_run_mock() -> kfp_server_api.models.api_run_detail.ApiRunDetai id="id1", name="run1", description="desc1", + created_at="0001-01-01 00:00:00+00:00", pipeline_spec=kfp_server_api.models.api_pipeline_spec.ApiPipelineSpec( pipeline_id="pipe_id1", workflow_manifest=workflow_manifest, diff --git a/tests/api/api/test_projects.py b/tests/api/api/test_projects.py index 9b24e7c1341..e03fedf58e0 100644 --- a/tests/api/api/test_projects.py +++ b/tests/api/api/test_projects.py @@ -25,6 +25,7 @@ import deepdiff import fastapi.testclient import mergedeep +import mlrun_pipelines.common.models import pytest import sqlalchemy.orm from fastapi.testclient import TestClient @@ -1640,9 +1641,9 @@ def _create_schedules(client: TestClient, project_name, schedules_count): def _mock_pipelines(project_name): mlrun.mlconf.kfp_url = "http://some-random-url:8888" status_count_map = { - mlrun.run.RunStatuses.running: 4, - mlrun.run.RunStatuses.succeeded: 3, - mlrun.run.RunStatuses.failed: 2, + mlrun_pipelines.common.models.RunStatuses.running: 4, + mlrun_pipelines.common.models.RunStatuses.succeeded: 3, + mlrun_pipelines.common.models.RunStatuses.failed: 2, } pipelines = [] for status, count in status_count_map.items(): @@ -1651,4 +1652,4 @@ def _mock_pipelines(project_name): server.api.crud.Pipelines().list_pipelines = unittest.mock.Mock( return_value=(None, None, pipelines) ) - return status_count_map[mlrun.run.RunStatuses.running] + return status_count_map[mlrun_pipelines.common.models.RunStatuses.running] diff --git a/tests/api/crud/test_pipelines.py b/tests/api/crud/test_pipelines.py index c32ec43b372..c45254a6db1 100644 --- a/tests/api/crud/test_pipelines.py +++ b/tests/api/crud/test_pipelines.py @@ -14,8 +14,10 @@ # import json +import mlrun_pipelines +import mlrun_pipelines.common.helpers + import mlrun.errors -import mlrun.kfpops import mlrun.run import mlrun.utils.helpers import server.api.crud @@ -248,7 +250,7 @@ def test_resolve_pipeline_project(): "template": { "metadata": { "annotations": { - mlrun.kfpops.project_annotation: "project-from-annotation" + mlrun_pipelines.common.helpers.PROJECT_ANNOTATION: "project-from-annotation" } } }, @@ -259,5 +261,7 @@ def test_resolve_pipeline_project(): pipeline = { "pipeline_spec": {"workflow_manifest": json.dumps(workflow_manifest)} } - project = server.api.crud.Pipelines().resolve_project_from_pipeline(pipeline) + project = server.api.crud.Pipelines().resolve_project_from_pipeline( + mlrun_pipelines.models.PipelineRun(pipeline) + ) assert project == case["expected_project"] diff --git a/tests/api/runtimes/test_dask.py b/tests/api/runtimes/test_dask.py index 05e346b0292..d74c02da8f8 100644 --- a/tests/api/runtimes/test_dask.py +++ b/tests/api/runtimes/test_dask.py @@ -20,13 +20,13 @@ from dask import distributed from fastapi.testclient import TestClient +from mlrun_pipelines.mounts import auto_mount from sqlalchemy.orm import Session import mlrun import mlrun.common.schemas import server.api.api.endpoints.functions from mlrun import mlconf -from mlrun.platforms import auto_mount from mlrun.runtimes.utils import generate_resources from tests.api.conftest import K8sSecretsMock from tests.api.runtimes.base import TestRuntimeBase diff --git a/tests/api/runtimes/test_kubejob.py b/tests/api/runtimes/test_kubejob.py index 5d1c4a0ae0d..54e6b96ac83 100644 --- a/tests/api/runtimes/test_kubejob.py +++ b/tests/api/runtimes/test_kubejob.py @@ -21,6 +21,7 @@ import deepdiff import pytest from fastapi.testclient import TestClient +from mlrun_pipelines.mounts import auto_mount from sqlalchemy.orm import Session import mlrun.common.schemas @@ -29,7 +30,6 @@ import server.api.utils.builder from mlrun.common.schemas import SecurityContextEnrichmentModes from mlrun.config import config as mlconf -from mlrun.platforms import auto_mount from mlrun.runtimes.utils import generate_resources from server.api.utils.singletons.db import get_db from tests.api.conftest import K8sSecretsMock diff --git a/tests/platforms/test_iguazio.py b/tests/platforms/test_iguazio.py index 4caea812089..3b70f3ec034 100644 --- a/tests/platforms/test_iguazio.py +++ b/tests/platforms/test_iguazio.py @@ -16,6 +16,7 @@ from unittest.mock import Mock import deepdiff +import mlrun_pipelines.common.mounts import pytest import requests @@ -111,15 +112,21 @@ def test_mount_v3io(): {"remote": "~/custom-remote", "expect_failure": True}, { "volume_mounts": [ - mlrun.VolumeMount("/volume-mount-path", "volume-sub-path") + mlrun_pipelines.common.mounts.VolumeMount( + "/volume-mount-path", "volume-sub-path" + ) ], "remote": "~/custom-remote", "expect_failure": True, }, { "volume_mounts": [ - mlrun.VolumeMount("/volume-mount-path", "volume-sub-path"), - mlrun.VolumeMount("/volume-mount-path-2", "volume-sub-path-2"), + mlrun_pipelines.common.mounts.VolumeMount( + "/volume-mount-path", "volume-sub-path" + ), + mlrun_pipelines.common.mounts.VolumeMount( + "/volume-mount-path-2", "volume-sub-path-2" + ), ], "remote": "~/custom-remote", "set_user": True, @@ -150,8 +157,12 @@ def test_mount_v3io(): }, { "volume_mounts": [ - mlrun.VolumeMount("/volume-mount-path", "volume-sub-path"), - mlrun.VolumeMount("/volume-mount-path-2", "volume-sub-path-2"), + mlrun_pipelines.common.mounts.VolumeMount( + "/volume-mount-path", "volume-sub-path" + ), + mlrun_pipelines.common.mounts.VolumeMount( + "/volume-mount-path-2", "volume-sub-path-2" + ), ], "set_user": True, "expected_volume": { diff --git a/tests/platforms/test_other.py b/tests/platforms/test_other.py index 7dbded04761..fbd463bed65 100644 --- a/tests/platforms/test_other.py +++ b/tests/platforms/test_other.py @@ -13,6 +13,7 @@ # limitations under the License. # import deepdiff +import mlrun_pipelines.mounts import mlrun import mlrun.errors @@ -26,7 +27,7 @@ def test_mount_configmap(): "function-name", "function-project", kind=mlrun.runtimes.RuntimeKinds.job ) function.apply( - mlrun.platforms.mount_configmap( + mlrun_pipelines.mounts.mount_configmap( configmap_name="my-config-map", mount_path="/myConfMapPath", volume_name="my-volume", @@ -59,7 +60,7 @@ def test_mount_hostpath(): "function-name", "function-project", kind=mlrun.runtimes.RuntimeKinds.job ) function.apply( - mlrun.platforms.mount_hostpath( + mlrun_pipelines.mounts.mount_hostpath( host_path="/tmp", mount_path="/myHostPath", volume_name="my-volume" ) ) @@ -87,7 +88,7 @@ def test_mount_s3(): "function-name", "function-project", kind=mlrun.runtimes.RuntimeKinds.job ) function.apply( - mlrun.platforms.mount_s3( + mlrun_pipelines.mounts.mount_s3( aws_access_key="xx", aws_secret_key="yy", endpoint_url="a.b" ) ) @@ -101,7 +102,7 @@ def test_mount_s3(): function = mlrun.new_function( "function-name", "function-project", kind=mlrun.runtimes.RuntimeKinds.job ) - function.apply(mlrun.platforms.mount_s3(secret_name="s", endpoint_url="a.b")) + function.apply(mlrun_pipelines.mounts.mount_s3(secret_name="s", endpoint_url="a.b")) env_dict = { var["name"]: var.get("value", var.get("valueFrom")) for var in function.spec.env } @@ -129,7 +130,7 @@ def test_set_env_variables(): assert function.spec.env == [] # Using a dictionary - function.apply(mlrun.platforms.set_env_variables(env_variables)) + function.apply(mlrun_pipelines.mounts.set_env_variables(env_variables)) env_dict = {var["name"]: var.get("value") for var in function.spec.env} assert env_dict == env_variables @@ -140,7 +141,7 @@ def test_set_env_variables(): assert function.spec.env == [] # And using key=value parameters - function.apply(mlrun.platforms.set_env_variables(**env_variables)) + function.apply(mlrun_pipelines.mounts.set_env_variables(**env_variables)) env_dict = {var["name"]: var.get("value") for var in function.spec.env} assert env_dict == env_variables diff --git a/tests/runtimes/test_base.py b/tests/runtimes/test_base.py index eedb77ef28e..9301005486f 100644 --- a/tests/runtimes/test_base.py +++ b/tests/runtimes/test_base.py @@ -18,6 +18,7 @@ import shutil import tempfile +import mlrun_pipelines.mounts import pytest import mlrun.errors @@ -243,7 +244,7 @@ def test_auto_mount_function_with_pvc_config(self, rundb_mock): mlconf.storage.auto_mount_params = pvc_params_str runtime = self._generate_runtime() - runtime.apply(mlrun.auto_mount()) + runtime.apply(mlrun_pipelines.mounts.auto_mount()) assert runtime.spec.disable_auto_mount self._execute_run(runtime) @@ -255,7 +256,7 @@ def test_auto_mount_function_with_pvc_config(self, rundb_mock): with pytest.raises( ValueError, match="failed to auto mount, need to set env vars" ): - runtime.apply(mlrun.auto_mount()) + runtime.apply(mlrun_pipelines.mounts.auto_mount()) @staticmethod def _setup_s3_mount(use_secret, non_anonymous): diff --git a/tests/system/demos/churn/assets/workflow.py b/tests/system/demos/churn/assets/workflow.py index d3b7bf2378c..7b64b112abd 100644 --- a/tests/system/demos/churn/assets/workflow.py +++ b/tests/system/demos/churn/assets/workflow.py @@ -13,8 +13,7 @@ # limitations under the License. # from kfp import dsl - -from mlrun import mount_v3io +from mlrun_pipelines.mounts import mount_v3io funcs = {} diff --git a/tests/system/demos/churn/test_churn.py b/tests/system/demos/churn/test_churn.py index f5bb5143405..8f21f66c7af 100644 --- a/tests/system/demos/churn/test_churn.py +++ b/tests/system/demos/churn/test_churn.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import mlrun_pipelines.mounts import pytest import mlrun @@ -49,7 +50,7 @@ def create_demo_project(self) -> mlrun.projects.MlrunProject: description="clean and encode raw data", categories=["data-prep"], labels={"author": "yasha", "framework": "xgboost"}, - ).apply(mlrun.mount_v3io()) + ).apply(mlrun_pipelines.mounts.mount_v3io()) clean_data_function.spec.remote = True clean_data_function.spec.replicas = 1 diff --git a/tests/system/demos/horovod/assets/workflow.py b/tests/system/demos/horovod/assets/workflow.py index 9dbec8c33e4..6f6001244cf 100644 --- a/tests/system/demos/horovod/assets/workflow.py +++ b/tests/system/demos/horovod/assets/workflow.py @@ -13,8 +13,7 @@ # limitations under the License. # from kfp import dsl - -from mlrun import mount_v3io +from mlrun_pipelines.mounts import mount_v3io funcs = {} diff --git a/tests/system/demos/horovod/test_horovod.py b/tests/system/demos/horovod/test_horovod.py index c6da7894f53..01a632e9ac7 100644 --- a/tests/system/demos/horovod/test_horovod.py +++ b/tests/system/demos/horovod/test_horovod.py @@ -14,6 +14,7 @@ # import pathlib +import mlrun_pipelines.mounts import pytest import mlrun @@ -34,7 +35,7 @@ def create_demo_project(self) -> mlrun.projects.MlrunProject: self.project_name, str(self.assets_path), init_git=True ) - mlrun.mount_v3io() + mlrun_pipelines.mounts.mount_v3io() self._logger.debug("Uploading training file") trainer_src_path = str(self.assets_path / "horovod_training.py") diff --git a/tests/system/demos/sklearn/assets/workflow.py b/tests/system/demos/sklearn/assets/workflow.py index fe33ec07fa1..e48d0939ae5 100644 --- a/tests/system/demos/sklearn/assets/workflow.py +++ b/tests/system/demos/sklearn/assets/workflow.py @@ -13,8 +13,7 @@ # limitations under the License. # from kfp import dsl - -from mlrun import mount_v3io +from mlrun_pipelines.mounts import mount_v3io funcs = {} DATASET = "iris_dataset" diff --git a/tests/system/examples/dask/test_dask.py b/tests/system/examples/dask/test_dask.py index a72b29c870d..5f9b845163b 100644 --- a/tests/system/examples/dask/test_dask.py +++ b/tests/system/examples/dask/test_dask.py @@ -18,16 +18,16 @@ import kfp import kfp.compiler import pytest +from mlrun_pipelines.common.models import RunStatuses +from mlrun_pipelines.mounts import mount_v3io import mlrun.utils from mlrun import ( _run_pipeline, code_to_function, - mount_v3io, new_task, wait_for_pipeline_completion, ) -from mlrun.run import RunStatuses from tests.system.base import TestMLRunSystem diff --git a/tests/system/examples/jobs/test_jobs.py b/tests/system/examples/jobs/test_jobs.py index e3716dc1ac8..e8b2dac2502 100644 --- a/tests/system/examples/jobs/test_jobs.py +++ b/tests/system/examples/jobs/test_jobs.py @@ -17,6 +17,7 @@ import kfp.compiler import kfp.dsl import pytest +from mlrun_pipelines.mounts import mount_v3io from mlrun import ( _run_pipeline, @@ -24,7 +25,6 @@ new_task, wait_for_pipeline_completion, ) -from mlrun.platforms.other import mount_v3io from tests.system.base import TestMLRunSystem diff --git a/tests/system/feature_store/test_feature_store.py b/tests/system/feature_store/test_feature_store.py index d616962e19d..5ba614e237d 100644 --- a/tests/system/feature_store/test_feature_store.py +++ b/tests/system/feature_store/test_feature_store.py @@ -24,6 +24,7 @@ from time import sleep import fsspec +import mlrun_pipelines.mounts import numpy as np import pandas as pd import pyarrow @@ -1647,7 +1648,9 @@ def test_overwrite_single_file(self): feature_set.ingest( source, overwrite=True, - run_config=fstore.RunConfig(local=False).apply(mlrun.mount_v3io()), + run_config=fstore.RunConfig(local=False).apply( + mlrun_pipelines.mounts.mount_v3io() + ), targets=targets, ) @@ -2438,7 +2441,7 @@ def test_stream_source(self): ) function.spec.default_content_type = "application/json" run_config = fstore.RunConfig(function=function, local=False).apply( - mlrun.mount_v3io() + mlrun_pipelines.mounts.mount_v3io() ) myset.deploy_ingestion_service(source=source, run_config=run_config) # push records to stream @@ -2743,7 +2746,7 @@ def test_preview_saves_changes(self): ) function.spec.default_content_type = "application/json" run_config = fstore.RunConfig(function=function, local=False).apply( - mlrun.mount_v3io() + mlrun_pipelines.mounts.mount_v3io() ) fset.deploy_ingestion_service( source=v3io_source, @@ -2950,7 +2953,7 @@ def test_deploy_ingestion_service_with_different_targets( function.spec.default_content_type = "application/json" function.spec.image_pull_policy = "Always" run_config = fstore.RunConfig(function=function, local=False).apply( - mlrun.mount_v3io() + mlrun_pipelines.mounts.mount_v3io() ) fset.deploy_ingestion_service( source=source, run_config=run_config, targets=targets @@ -3004,7 +3007,7 @@ def test_get_offline_features_with_filter(self, engine): kind="dask", image="mlrun/ml-base", ) - dask_cluster.apply(mlrun.mount_v3io()) + dask_cluster.apply(mlrun_pipelines.mounts.mount_v3io()) dask_cluster.spec.remote = True dask_cluster.with_worker_requests(mem="2G") dask_cluster.save() @@ -3555,7 +3558,7 @@ def test_relation_join(self, engine, with_indexes): kind="dask", image="mlrun/ml-base", ) - dask_cluster.apply(mlrun.mount_v3io()) + dask_cluster.apply(mlrun_pipelines.mounts.mount_v3io()) dask_cluster.spec.remote = True dask_cluster.with_scheduler_requests(mem="2G") dask_cluster.save() @@ -3910,7 +3913,7 @@ def test_relation_join_multi_entities(self, engine, with_indexes): kind="dask", image="mlrun/ml-base", ) - dask_cluster.apply(mlrun.mount_v3io()) + dask_cluster.apply(mlrun_pipelines.mounts.mount_v3io()) dask_cluster.spec.remote = True dask_cluster.with_scheduler_requests(mem="2G") dask_cluster.save() @@ -4028,7 +4031,7 @@ def test_relation_asof_join(self, with_indexes, engine, with_graph): kind="dask", image="mlrun/ml-base", ) - dask_cluster.apply(mlrun.mount_v3io()) + dask_cluster.apply(mlrun_pipelines.mounts.mount_v3io()) dask_cluster.spec.remote = True dask_cluster.with_scheduler_requests(mem="2G") dask_cluster.save() @@ -4314,7 +4317,7 @@ def test_as_of_join_different_ts(self, engine): kind="dask", image="mlrun/ml-base", ) - dask_cluster.apply(mlrun.mount_v3io()) + dask_cluster.apply(mlrun_pipelines.mounts.mount_v3io()) dask_cluster.spec.remote = True dask_cluster.with_worker_requests(mem="2G") dask_cluster.save() @@ -4378,7 +4381,7 @@ def test_time_and_columns_filter(self, engine, timestamp_for_filtering): kind="dask", image="mlrun/ml-base", ) - dask_cluster.apply(mlrun.mount_v3io()) + dask_cluster.apply(mlrun_pipelines.mounts.mount_v3io()) dask_cluster.spec.remote = True dask_cluster.with_worker_requests(mem="2G") dask_cluster.save() diff --git a/tests/system/model_monitoring/test_model_monitoring.py b/tests/system/model_monitoring/test_model_monitoring.py index ee31fa641bc..0c15485f0c1 100644 --- a/tests/system/model_monitoring/test_model_monitoring.py +++ b/tests/system/model_monitoring/test_model_monitoring.py @@ -22,6 +22,7 @@ from typing import Optional, Union import fsspec +import mlrun_pipelines.mounts import numpy as np import pandas as pd import pytest @@ -267,7 +268,7 @@ def test_basic_model_monitoring(self): # Import the serving function from the function hub serving_fn = mlrun.import_function( "hub://v2-model-server", project=self.project_name - ).apply(mlrun.auto_mount()) + ).apply(mlrun_pipelines.mounts.auto_mount()) # enable model monitoring serving_fn.set_tracking() @@ -536,7 +537,7 @@ def test_model_monitoring_voting_ensemble(self): # Import the serving function from the function hub serving_fn = mlrun.import_function( "hub://v2-model-server", project=self.project_name - ).apply(mlrun.auto_mount()) + ).apply(mlrun_pipelines.mounts.auto_mount()) serving_fn.set_topology( "router", "mlrun.serving.VotingEnsemble", name="VotingEnsemble" @@ -904,7 +905,7 @@ def test_model_monitoring_with_kafka_stream(self): # Import the serving function from the function hub serving_fn = mlrun.import_function( "hub://v2_model_server", project=self.project_name - ).apply(mlrun.auto_mount()) + ).apply(mlrun_pipelines.mounts.auto_mount()) model_name = "sklearn_RandomForestClassifier" diff --git a/tests/system/projects/test_project.py b/tests/system/projects/test_project.py index bbb24cc16f7..19a33bae229 100644 --- a/tests/system/projects/test_project.py +++ b/tests/system/projects/test_project.py @@ -20,6 +20,7 @@ import sys from sys import executable +import mlrun_pipelines.common.models import pandas as pd import pytest from kfp import dsl @@ -184,7 +185,9 @@ def test_run(self): # load project from context dir and run a workflow project2 = mlrun.load_project(str(self.assets_path), name=name) run = project2.run("main", watch=True, artifact_path=f"v3io:///projects/{name}") - assert run.state == mlrun.run.RunStatuses.succeeded, "pipeline failed" + assert ( + run.state == mlrun_pipelines.common.models.RunStatuses.succeeded + ), "pipeline failed" # test the list_runs/artifacts/functions methods runs_list = project2.list_runs(name="test", labels=f"workflow={run.run_id}") @@ -215,7 +218,9 @@ def test_run_artifact_path(self): project = mlrun.load_project(str(self.assets_path), name=name) # Don't provide an artifact-path, to verify that the run-id is added by default workflow_run = project.run("main", watch=True) - assert workflow_run.state == mlrun.run.RunStatuses.succeeded, "pipeline failed" + assert ( + workflow_run.state == mlrun_pipelines.common.models.RunStatuses.succeeded + ), "pipeline failed" # check that the functions running in the workflow had the output_path set correctly db = mlrun.get_run_db() @@ -243,7 +248,9 @@ def test_run_git_load(self): project2.spec.load_source_on_run = True run = project2.run("main", artifact_path=f"v3io:///projects/{name}") run.wait_for_completion() - assert run.state == mlrun.run.RunStatuses.succeeded, "pipeline failed" + assert ( + run.state == mlrun_pipelines.common.models.RunStatuses.succeeded + ), "pipeline failed" def test_run_git_build(self): name = "pipe3" @@ -263,7 +270,9 @@ def test_run_git_build(self): arguments={"build": 1}, ) run.wait_for_completion() - assert run.state == mlrun.run.RunStatuses.succeeded, "pipeline failed" + assert ( + run.state == mlrun_pipelines.common.models.RunStatuses.succeeded + ), "pipeline failed" @staticmethod def _assert_cli_output(output: str, project_name: str): @@ -274,7 +283,9 @@ def _assert_cli_output(output: str, project_name: str): db = mlrun.get_run_db() pipeline = db.get_pipeline(run_id, project=project_name) state = pipeline["run"]["status"] - assert state == mlrun.run.RunStatuses.succeeded, "pipeline failed" + assert ( + state == mlrun_pipelines.common.models.RunStatuses.succeeded + ), "pipeline failed" def test_run_cli(self): # load project from git @@ -364,7 +375,9 @@ def test_inline_pipeline(self): workflow_handler=pipe_test, ) run.wait_for_completion() - assert run.state == mlrun.run.RunStatuses.succeeded, "pipeline failed" + assert ( + run.state == mlrun_pipelines.common.models.RunStatuses.succeeded + ), "pipeline failed" def test_cli_no_save_flag(self): # load project from git @@ -485,7 +498,9 @@ def _test_new_pipeline(self, name, engine): artifact_path=f"v3io:///projects/{name}", watch=True, ) - assert run.state == mlrun.run.RunStatuses.succeeded, "pipeline failed" + assert ( + run.state == mlrun_pipelines.common.models.RunStatuses.succeeded + ), "pipeline failed" fn = project.get_function("gen-iris", ignore_cache=True) assert fn.status.state == "ready" assert fn.spec.image, "image path got cleared" @@ -574,7 +589,9 @@ def _test_remote_pipeline_from_github( engine=engine, ) - assert run.state == mlrun.run.RunStatuses.succeeded, "pipeline failed" + assert ( + run.state == mlrun_pipelines.common.models.RunStatuses.succeeded + ), "pipeline failed" # run.run_id can be empty in case of a local engine: assert run.run_id is not None, "workflow's run id failed to fetch" @@ -632,7 +649,9 @@ def test_remote_from_archive(self): watch=True, engine="remote", ) - assert run.state == mlrun.run.RunStatuses.succeeded, "pipeline failed" + assert ( + run.state == mlrun_pipelines.common.models.RunStatuses.succeeded + ), "pipeline failed" assert run.run_id, "workflow's run id failed to fetch" def test_kfp_from_local_code(self): @@ -664,7 +683,9 @@ def test_kfp_from_local_code(self): "main", watch=True, ) - assert run.state == mlrun.run.RunStatuses.succeeded, "pipeline failed" + assert ( + run.state == mlrun_pipelines.common.models.RunStatuses.succeeded + ), "pipeline failed" assert run.run_id, "workflow's run id failed to fetch" def test_local_cli(self): @@ -914,13 +935,13 @@ def test_failed_schedule_workflow_non_remote_source(self): # scheduling project with non-remote source (scheduling) run = project.run("main", schedule="*/10 * * * *") assert ( - run.state == mlrun.run.RunStatuses.failed + run.state == mlrun_pipelines.common.models.RunStatuses.failed ), f"pipeline should failed, state = {run.state}" # scheduling project with non-remote source (single run) run = project.run("main", engine="remote") assert ( - run.state == mlrun.run.RunStatuses.failed + run.state == mlrun_pipelines.common.models.RunStatuses.failed ), f"pipeline should failed, state = {run.state}" def test_remote_workflow_source(self): @@ -946,7 +967,7 @@ def test_remote_workflow_source(self): source=temporary_source, artifact_path=artifact_path, ) - assert run.state == mlrun.run.RunStatuses.succeeded + assert run.state == mlrun_pipelines.common.models.RunStatuses.succeeded # Ensuring that the project's source has not changed in the db: project_from_db = self._run_db.get_project(name) assert project_from_db.source == expected_source @@ -974,7 +995,7 @@ def test_remote_workflow_source(self): watch=True, ) assert ( - run.state == mlrun.run.RunStatuses.failed + run.state == mlrun_pipelines.common.models.RunStatuses.failed ), "pipeline supposed to fail since newflow is not in the temporary source" def test_workflow_image_fails(self): @@ -997,7 +1018,7 @@ def test_workflow_image_fails(self): "bad-image", engine="remote", ) - assert run.state == mlrun.run.RunStatuses.failed + assert run.state == mlrun_pipelines.common.models.RunStatuses.failed def _assert_scheduled(self, project_name, schedule_str): schedule = self._run_db.get_schedule(project_name, "main") @@ -1042,7 +1063,7 @@ def _assert_workflow_status(workflow, status): True, _assert_workflow_status, workflow, - mlrun.run.RunStatuses.running, + mlrun_pipelines.common.models.RunStatuses.running, ) # obtain the first run in the workflow when it began running @@ -1064,7 +1085,7 @@ def _assert_workflow_status(workflow, status): True, _assert_workflow_status, workflow, - mlrun.run.RunStatuses.failed, + mlrun_pipelines.common.models.RunStatuses.failed, ) def test_project_build_image(self): diff --git a/tests/system/runtimes/test_kfp.py b/tests/system/runtimes/test_kfp.py index 4ec9e07d8ca..f0fe97843b8 100644 --- a/tests/system/runtimes/test_kfp.py +++ b/tests/system/runtimes/test_kfp.py @@ -14,6 +14,7 @@ # import os +import mlrun_pipelines.mounts import pytest from kfp import dsl @@ -36,7 +37,7 @@ def test_kfp_with_mount(self): project=self.project_name, image="mlrun/mlrun", ) - kfp_with_v3io_mount.apply(mlrun.mount_v3io()) + kfp_with_v3io_mount.apply(mlrun_pipelines.mounts.mount_v3io()) @dsl.pipeline(name="job test", description="demonstrating mlrun usage") def job_pipeline(p1=9): diff --git a/tests/system/runtimes/test_nuclio.py b/tests/system/runtimes/test_nuclio.py index e93e7a973c3..4a4f44cf86b 100644 --- a/tests/system/runtimes/test_nuclio.py +++ b/tests/system/runtimes/test_nuclio.py @@ -17,6 +17,7 @@ import time import uuid +import mlrun_pipelines.mounts import pandas as pd import pytest import requests @@ -381,7 +382,7 @@ def test_kafka_source_with_avro(self, kafka_fixture): func.spec.max_replicas = 1 run_config = fstore.RunConfig(local=False, function=func).apply( - mlrun.auto_mount() + mlrun_pipelines.mounts.auto_mount() ) stocks_set_endpoint, _ = stocks_set.deploy_ingestion_service( source=kafka_source, diff --git a/tests/test_execution.py b/tests/test_execution.py index 94465267012..61be7a3cccb 100644 --- a/tests/test_execution.py +++ b/tests/test_execution.py @@ -15,6 +15,7 @@ import unittest.mock import pytest +from mlrun_pipelines.models import PipelineRun import mlrun import mlrun.artifacts @@ -92,7 +93,7 @@ def test_context_from_run_dict(is_api): # create run object from dict and dict again to mock the run serialization run = mlrun.run.RunObject.from_dict(run_dict) - context = mlrun.MLClientCtx.from_dict(run.to_dict(), is_api=is_api) + context = mlrun.MLClientCtx.from_dict(PipelineRun(run.to_dict()), is_api=is_api) assert context.name == run_dict["metadata"]["name"] assert context._project == run_dict["metadata"]["project"] @@ -145,7 +146,7 @@ def test_context_inputs(rundb_mock, is_api): # create run object from dict and dict again to mock the run serialization run = mlrun.run.RunObject.from_dict(run_dict) - context = mlrun.MLClientCtx.from_dict(run.to_dict(), is_api=is_api) + context = mlrun.MLClientCtx.from_dict(PipelineRun(run.to_dict()), is_api=is_api) assert ( context.get_input("input-key").artifact_url == run_dict["spec"]["inputs"]["input-key"] diff --git a/tests/test_kfp.py b/tests/test_kfp.py index 5c39bd7df02..068f6e74213 100644 --- a/tests/test_kfp.py +++ b/tests/test_kfp.py @@ -18,13 +18,13 @@ from pathlib import Path from tempfile import TemporaryDirectory +import mlrun_pipelines.common.ops import numpy as np import pandas as pd import plotly.graph_objects as go import pytest import yaml -import mlrun.kfpops from mlrun import new_function, new_task from mlrun.artifacts import PlotlyArtifact from mlrun.utils import logger @@ -87,8 +87,10 @@ def kfp_dirs(monkeypatch): artifacts_dir=artifacts_dir, output_dir=output_dir, ) - monkeypatch.setattr(mlrun.kfpops, "KFPMETA_DIR", str(meta_dir)) - monkeypatch.setattr(mlrun.kfpops, "KFP_ARTIFACTS_DIR", str(artifacts_dir)) + monkeypatch.setattr(mlrun_pipelines.common.ops, "KFPMETA_DIR", str(meta_dir)) + monkeypatch.setattr( + mlrun_pipelines.common.ops, "KFP_ARTIFACTS_DIR", str(artifacts_dir) + ) yield str(meta_dir), str(artifacts_dir), str(output_dir) From aba8e7cdf6ddeea1324b0c0d8e8a037eb01b39cc Mon Sep 17 00:00:00 2001 From: davesh0812 <85231462+davesh0812@users.noreply.github.com> Date: Wed, 17 Apr 2024 18:46:46 +0300 Subject: [PATCH 099/119] [Model Monitoring] raise error when using mlrun default image with client_version<1.6.3 and set_tracking is on. (#5440) --- server/api/api/endpoints/functions.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/server/api/api/endpoints/functions.py b/server/api/api/endpoints/functions.py index 964f3ce2d41..639283b1d6b 100644 --- a/server/api/api/endpoints/functions.py +++ b/server/api/api/endpoints/functions.py @@ -19,6 +19,7 @@ from http import HTTPStatus from typing import List, Optional +import semver from fastapi import ( APIRouter, BackgroundTasks, @@ -789,10 +790,7 @@ def _deploy_nuclio_runtime( model_monitoring_access_key = None if serving_to_monitor: _deploy_serving_monitoring( - auth_info, - db_session, - fn, - model_monitoring_access_key, + auth_info, db_session, fn, model_monitoring_access_key, client_version ) if monitoring_application: fn = _deploy_monitoring_application( @@ -813,6 +811,7 @@ def _deploy_serving_monitoring( db_session, fn, model_monitoring_access_key, + client_version, ): try: # Handle model monitoring @@ -832,6 +831,15 @@ def _deploy_serving_monitoring( overwrite_stream = False if not mlrun.mlconf.is_ce_mode(): + if ( + fn.spec.image.startswith("mlrun/") + and client_version + and semver.Version.parse(client_version) < semver.Version.parse("1.6.3") + ): + raise mlrun.errors.MLRunBadRequestError( + "On deploy of serving-functions which is based on mlrun image " + "('mlrun/') and with set-tracking enabled, client version must be >= 1.6.3" + ) if not monitoring_deployment.is_monitoring_stream_has_the_new_stream_trigger( project=fn.metadata.project, db_session=db_session, From 412dc9160744e90ea45c37e62d98f513f6b0ce17 Mon Sep 17 00:00:00 2001 From: tomer-mamia <125267619+tomerm-iguazio@users.noreply.github.com> Date: Thu, 18 Apr 2024 17:46:40 +0300 Subject: [PATCH 100/119] [Datastore] Rely on buffer instead of mmap [1.6.x] (#5445) --- mlrun/datastore/helpers.py | 18 ------- mlrun/datastore/v3io.py | 77 ++++++++++------------------- tests/system/datastore/test_v3io.py | 6 ++- 3 files changed, 31 insertions(+), 70 deletions(-) delete mode 100644 mlrun/datastore/helpers.py diff --git a/mlrun/datastore/helpers.py b/mlrun/datastore/helpers.py deleted file mode 100644 index 09accf86137..00000000000 --- a/mlrun/datastore/helpers.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2023 Iguazio -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -ONE_GB = 1024 * 1024 * 1024 -ONE_MB = 1024 * 1024 diff --git a/mlrun/datastore/v3io.py b/mlrun/datastore/v3io.py index ed4e3ace6e2..d43ba48096d 100644 --- a/mlrun/datastore/v3io.py +++ b/mlrun/datastore/v3io.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import mmap -import os import time from datetime import datetime @@ -22,7 +20,6 @@ from v3io.dataplane.response import HttpResponseError import mlrun -from mlrun.datastore.helpers import ONE_GB, ONE_MB from ..platforms.iguazio import parse_path, split_path from .base import ( @@ -32,6 +29,7 @@ ) V3IO_LOCAL_ROOT = "v3io" +V3IO_DEFAULT_UPLOAD_CHUNK_SIZE = 1024 * 1024 * 100 class V3ioStore(DataStore): @@ -94,46 +92,28 @@ def get_storage_options(self): ) return self._sanitize_storage_options(res) - def _upload(self, key: str, src_path: str, max_chunk_size: int = ONE_GB): + def _upload( + self, + key: str, + src_path: str, + max_chunk_size: int = V3IO_DEFAULT_UPLOAD_CHUNK_SIZE, + ): """helper function for upload method, allows for controlling max_chunk_size in testing""" container, path = split_path(self._join(key)) - file_size = os.path.getsize(src_path) # in bytes - if file_size <= ONE_MB: - with open(src_path, "rb") as source_file: - data = source_file.read() - self._do_object_request( - self.object.put, - container=container, - path=path, - body=data, - append=False, - ) - return - # chunk must be a multiple of the ALLOCATIONGRANULARITY - # https://docs.python.org/3/library/mmap.html - if residue := max_chunk_size % mmap.ALLOCATIONGRANULARITY: - # round down to the nearest multiple of ALLOCATIONGRANULARITY - max_chunk_size -= residue - with open(src_path, "rb") as file_obj: - file_offset = 0 - while file_offset < file_size: - chunk_size = min(file_size - file_offset, max_chunk_size) - with mmap.mmap( - file_obj.fileno(), - length=chunk_size, - access=mmap.ACCESS_READ, - offset=file_offset, - ) as mmap_obj: - append = file_offset != 0 - self._do_object_request( - self.object.put, - container=container, - path=path, - body=mmap_obj, - append=append, - ) - file_offset += chunk_size + append = False + while True: + data = memoryview(file_obj.read(max_chunk_size)) + if not data: + break + self._do_object_request( + self.object.put, + container=container, + path=path, + body=data, + append=append, + ) + append = True def upload(self, key, src_path): return self._upload(key, src_path) @@ -148,19 +128,16 @@ def get(self, key, size=None, offset=0): num_bytes=size, ).body - def _put(self, key, data, append=False, max_chunk_size: int = ONE_GB): + def _put( + self, + key, + data, + append=False, + max_chunk_size: int = V3IO_DEFAULT_UPLOAD_CHUNK_SIZE, + ): """helper function for put method, allows for controlling max_chunk_size in testing""" container, path = split_path(self._join(key)) buffer_size = len(data) # in bytes - if buffer_size <= ONE_MB: - self._do_object_request( - self.object.put, - container=container, - path=path, - body=data, - append=append, - ) - return buffer_offset = 0 try: data = memoryview(data) diff --git a/tests/system/datastore/test_v3io.py b/tests/system/datastore/test_v3io.py index bb655315e53..a64632402ef 100644 --- a/tests/system/datastore/test_v3io.py +++ b/tests/system/datastore/test_v3io.py @@ -89,13 +89,15 @@ def _setup_df_dir(self, first_file_path, second_file_path, file_extension): @pytest.mark.skip( reason="Skipping this test as it hangs when running against the CI system. ML-5598" ) - def test_v3io_large_object_upload(self, tmp_path): + @pytest.mark.parametrize( + "file_size", [4 * 1024 * 1024, 20 * 1024 * 1024] + ) # 4MB and 20MB + def test_v3io_large_object_upload(self, tmp_path, file_size): tempfile_1_path = os.path.join(tmp_path, "tempfile_1") tempfile_2_path = os.path.join(tmp_path, "tempfile_2") cmp_command = ["cmp", tempfile_1_path, tempfile_2_path] with open(tempfile_1_path, "wb") as f: - file_size = 20 * 1024 * 1024 # 20MB f.truncate(file_size) r = random.Random(123) for i in range(min(100, file_size)): From ead4c9fd5118b0f5359a56706a417ddcd8ba0f6a Mon Sep 17 00:00:00 2001 From: Laury Bueno Date: Sat, 27 Apr 2024 16:14:10 -0300 Subject: [PATCH 101/119] [Pipelines] Add a boilerplate pipeline adapter package for KFPv2 (#5448) --- .github/workflows/ci.yaml | 8 +- .../mlrun-pipelines-kfp-v2/setup.py | 45 +++++ .../src/mlrun_pipelines/helpers.py | 26 +++ .../src/mlrun_pipelines/mixins.py | 31 ++++ .../src/mlrun_pipelines/models.py | 100 ++++++++++ .../src/mlrun_pipelines/mounts.py | 171 ++++++++++++++++++ .../src/mlrun_pipelines/ops.py | 84 +++++++++ .../src/mlrun_pipelines/patcher.py | 14 ++ .../src/mlrun_pipelines/utils.py | 18 ++ 9 files changed, 496 insertions(+), 1 deletion(-) create mode 100644 pipeline-adapters/mlrun-pipelines-kfp-v2/setup.py create mode 100644 pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/helpers.py create mode 100644 pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/mixins.py create mode 100644 pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/models.py create mode 100644 pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/mounts.py create mode 100644 pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/ops.py create mode 100644 pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/patcher.py create mode 100644 pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/utils.py diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index c61669480b6..dfa1c3eed7a 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -138,19 +138,25 @@ jobs: run: MLRUN_DOCKER_REGISTRY=ghcr.io/ MLRUN_DOCKER_CACHE_FROM_TAG=${{ steps.docker_cache.outputs.tag }} make test-migrations-dockerized package-tests: - name: Run package tests (Python ${{ matrix.python-version }}) + name: Run package tests (Python ${{ matrix.python-version }}; Pipeline ${{ matrix.pipeline-adapter }}) runs-on: ubuntu-latest strategy: matrix: # 3.9 is the current >= 1.3.0 python version python-version: [3.9] + default-pipeline-adapter: ["kfp-v1-8"] + pipeline-adapter: ["kfp-v1-8", "kfp-v2"] steps: - uses: actions/checkout@v3 - name: Set up python ${{ matrix.python-version }} uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} + pipeline-adapter: ${{ matrix.pipeline-adapter }} cache: 'pip' + - name: Change default pipeline adapter for MLRun + if: matrix.default-pipeline-adapter != matrix.pipeline-adapter + run: sed -i -e 's/${{ matrix.default-pipeline-adapter }}/${{ matrix.pipeline-adapter }}/g' requirements.txt - name: Install automation scripts dependencies and add mlrun to dev packages run: pip install -r automation/requirements.txt && pip install -e . - name: Test package diff --git a/pipeline-adapters/mlrun-pipelines-kfp-v2/setup.py b/pipeline-adapters/mlrun-pipelines-kfp-v2/setup.py new file mode 100644 index 00000000000..37f81728ce6 --- /dev/null +++ b/pipeline-adapters/mlrun-pipelines-kfp-v2/setup.py @@ -0,0 +1,45 @@ +# Copyright 2024 Iguazio +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from setuptools import find_namespace_packages, setup + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger("mlrun-kfp-setup") + +setup( + name="mlrun-pipelines-kfp-v2-experiment", + version="0.1.2", + description="MLRun Pipelines package for providing KFP 2.* compatibility", + author="Yaron Haviv", + author_email="yaronh@iguazio.com", + license="Apache License 2.0", + url="https://github.com/mlrun/mlrun", + packages=find_namespace_packages( + where="src/", + include=[ + "mlrun_pipelines", + ], + ), + package_dir={"": "src"}, + keywords=[ + "mlrun", + "kfp", + ], + python_requires=">=3.9, <3.12", + install_requires=[ + "kfp[kubernetes]~=2.5.0", + ], +) diff --git a/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/helpers.py b/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/helpers.py new file mode 100644 index 00000000000..3bbbe42ad87 --- /dev/null +++ b/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/helpers.py @@ -0,0 +1,26 @@ +# Copyright 2024 Iguazio +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import typing + + +def new_pipe_metadata( + artifact_path: str = None, + cleanup_ttl: int = None, + op_transformers: list[typing.Callable] = None, +): + # This function is not required on a KFP 2.0 setup + # The definition is here for import compatibilty reasons + raise NotImplementedError diff --git a/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/mixins.py b/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/mixins.py new file mode 100644 index 00000000000..75052342ccd --- /dev/null +++ b/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/mixins.py @@ -0,0 +1,31 @@ +# Copyright 2024 Iguazio +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +class KfpAdapterMixin: + def apply(self, modify): + """ + Apply a modifier to the runtime which is used to change the runtimes k8s object's spec. + Modifiers can be either KFP modifiers or MLRun modifiers (which are compatible with KFP) + + :param modify: a modifier runnable object + :return: the runtime (self) after the modifications + """ + raise NotImplementedError + + +class PipelineProviderMixin: + def resolve_project_from_workflow_manifest(self, workflow_manifest): + raise NotImplementedError diff --git a/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/models.py b/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/models.py new file mode 100644 index 00000000000..9d1d6ebd69f --- /dev/null +++ b/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/models.py @@ -0,0 +1,100 @@ +# Copyright 2024 Iguazio +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from kfp.dsl import PipelineTask +from mlrun_pipelines.common.helpers import FlexibleMapper + +# class pointer for type checking on the main MLRun codebase +PipelineNodeWrapper = PipelineTask + + +class PipelineManifest(FlexibleMapper): + """ + A Pipeline Manifest might have been created by an 1.8 SDK regardless of coming from a 2.0 API, + so this class tries to account for that + """ + + def get_schema_version(self) -> str: + raise NotImplementedError + + def is_argo_compatible(self) -> bool: + raise NotImplementedError + + def get_executors(self): + raise NotImplementedError + + +class PipelineRun(FlexibleMapper): + @property + def id(self): + raise NotImplementedError + + @property + def name(self): + raise NotImplementedError + + @name.setter + def name(self, name): + raise NotImplementedError + + @property + def status(self): + raise NotImplementedError + + @status.setter + def status(self, status): + raise NotImplementedError + + @property + def description(self): + raise NotImplementedError + + @description.setter + def description(self, description): + raise NotImplementedError + + @property + def created_at(self): + raise NotImplementedError + + @created_at.setter + def created_at(self, created_at): + raise NotImplementedError + + @property + def scheduled_at(self): + raise NotImplementedError + + @scheduled_at.setter + def scheduled_at(self, scheduled_at): + raise NotImplementedError + + @property + def finished_at(self): + raise NotImplementedError + + @finished_at.setter + def finished_at(self, finished_at): + raise NotImplementedError + + @property + def workflow_manifest(self) -> PipelineManifest: + raise NotImplementedError + + +class PipelineExperiment(FlexibleMapper): + @property + def id(self): + raise NotImplementedError diff --git a/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/mounts.py b/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/mounts.py new file mode 100644 index 00000000000..35172e6cca8 --- /dev/null +++ b/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/mounts.py @@ -0,0 +1,171 @@ +# Copyright 2024 Iguazio +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +def v3io_cred(api="", user="", access_key=""): + """ + Modifier function to copy local v3io env vars to container + + Usage:: + + train = train_op(...) + train.apply(use_v3io_cred()) + """ + raise NotImplementedError + + +def mount_v3io( + name="v3io", + remote="", + access_key="", + user="", + secret=None, + volume_mounts=None, +): + """Modifier function to apply to a Container Op to volume mount a v3io path + + :param name: the volume name + :param remote: the v3io path to use for the volume. ~/ prefix will be replaced with /users// + :param access_key: the access key used to auth against v3io. if not given V3IO_ACCESS_KEY env var will be used + :param user: the username used to auth against v3io. if not given V3IO_USERNAME env var will be used + :param secret: k8s secret name which would be used to get the username and access key to auth against v3io. + :param volume_mounts: list of VolumeMount. empty volume mounts & remote will default to mount /v3io & /User. + """ + raise NotImplementedError + + +def mount_v3iod(namespace, v3io_config_configmap): + raise NotImplementedError + + +def mount_pvc(pvc_name=None, volume_name="pipeline", volume_mount_path="/mnt/pipeline"): + """ + Modifier function to apply to a Container Op to simplify volume, volume mount addition and + enable better reuse of volumes, volume claims across container ops. + + Usage:: + + train = train_op(...) + train.apply(mount_pvc('claim-name', 'pipeline', '/mnt/pipeline')) + """ + raise NotImplementedError + + +def set_env_variables(env_vars_dict: dict[str, str] = None, **kwargs): + """ + Modifier function to apply a set of environment variables to a runtime. Variables may be passed + as either a dictionary of name-value pairs, or as arguments to the function. + See `KubeResource.apply` for more information on modifiers. + + Usage:: + + function.apply(set_env_variables({"ENV1": "value1", "ENV2": "value2"})) + or + function.apply(set_env_variables(ENV1=value1, ENV2=value2)) + + :param env_vars_dict: dictionary of env. variables + :param kwargs: environment variables passed as args + """ + raise NotImplementedError + + +def mount_s3( + secret_name=None, + aws_access_key="", + aws_secret_key="", + endpoint_url=None, + prefix="", + aws_region=None, + non_anonymous=False, +): + """Modifier function to add s3 env vars or secrets to container + + **Warning:** + Using this function to configure AWS credentials will expose these credentials in the pod spec of the runtime + created. It is recommended to use the `secret_name` parameter, or set the credentials as project-secrets and avoid + using this function. + + :param secret_name: kubernetes secret name (storing the access/secret keys) + :param aws_access_key: AWS_ACCESS_KEY_ID value. If this parameter is not specified and AWS_ACCESS_KEY_ID env. + variable is defined, the value will be taken from the env. variable + :param aws_secret_key: AWS_SECRET_ACCESS_KEY value. If this parameter is not specified and AWS_SECRET_ACCESS_KEY + env. variable is defined, the value will be taken from the env. variable + :param endpoint_url: s3 endpoint address (for non AWS s3) + :param prefix: string prefix to add before the env var name (for working with multiple s3 data stores) + :param aws_region: amazon region + :param non_anonymous: force the S3 API to use non-anonymous connection, even if no credentials are provided + (for authenticating externally, such as through IAM instance-roles) + """ + raise NotImplementedError + + +def mount_spark_conf(): + raise NotImplementedError + + +def mount_secret(secret_name, mount_path, volume_name="secret", items=None): + """Modifier function to mount kubernetes secret as files(s) + + :param secret_name: k8s secret name + :param mount_path: path to mount inside the container + :param volume_name: unique volume name + :param items: If unspecified, each key-value pair in the Data field + of the referenced Secret will be projected into the + volume as a file whose name is the key and content is + the value. + If specified, the listed keys will be projected into + the specified paths, and unlisted keys will not be + present. + """ + raise NotImplementedError + + +def mount_configmap(configmap_name, mount_path, volume_name="configmap", items=None): + """Modifier function to mount kubernetes configmap as files(s) + + :param configmap_name: k8s configmap name + :param mount_path: path to mount inside the container + :param volume_name: unique volume name + :param items: If unspecified, each key-value pair in the Data field + of the referenced Configmap will be projected into the + volume as a file whose name is the key and content is + the value. + If specified, the listed keys will be projected into + the specified paths, and unlisted keys will not be + present. + """ + raise NotImplementedError + + +def mount_hostpath(host_path, mount_path, volume_name="hostpath"): + """Modifier function to mount kubernetes configmap as files(s) + + :param host_path: host path + :param mount_path: path to mount inside the container + :param volume_name: unique volume name + """ + raise NotImplementedError + + +def auto_mount(pvc_name="", volume_mount_path="", volume_name=None): + """choose the mount based on env variables and params + + volume will be selected by the following order: + - k8s PVC volume when both pvc_name and volume_mount_path are set + - k8s PVC volume when env var is set: MLRUN_PVC_MOUNT=: + - k8s PVC volume if it's configured as the auto mount type + - iguazio v3io volume when V3IO_ACCESS_KEY and V3IO_USERNAME env vars are set + """ + raise NotImplementedError diff --git a/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/ops.py b/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/ops.py new file mode 100644 index 00000000000..e2694579ef7 --- /dev/null +++ b/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/ops.py @@ -0,0 +1,84 @@ +# Copyright 2024 Iguazio +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from kfp import dsl + + +def generate_kfp_dag_and_resolve_project(run, project=None): + raise NotImplementedError + + +def add_default_function_resources( + task: dsl.PipelineTask, +) -> dsl.PipelineTask: + raise NotImplementedError + + +def add_function_node_selection_attributes( + function, task: dsl.PipelineTask +) -> dsl.PipelineTask: + raise NotImplementedError + + +def add_annotations( + task: dsl.PipelineTask, + kind: str, + function, + func_url: str = None, + project: str = None, +): + raise NotImplementedError + + +def add_labels(task, function, scrape_metrics=False): + raise NotImplementedError + + +def add_default_env(task): + raise NotImplementedError + + +def generate_pipeline_node( + project_name: str, + name: str, + image: str, + command: list, + file_outputs: dict, + function, + func_url: str, + scrape_metrics: bool, + code_env: str, + registry: str, +): + raise NotImplementedError + + +def generate_image_builder_pipeline_node( + name, + function=None, + func_url=None, + cmd=None, +): + raise NotImplementedError + + +def generate_deployer_pipeline_node( + name, + function, + func_url=None, + cmd=None, +): + raise NotImplementedError diff --git a/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/patcher.py b/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/patcher.py new file mode 100644 index 00000000000..99be6280fc3 --- /dev/null +++ b/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/patcher.py @@ -0,0 +1,14 @@ +# Copyright 2024 Iguazio +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/utils.py b/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/utils.py new file mode 100644 index 00000000000..92285b86741 --- /dev/null +++ b/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/utils.py @@ -0,0 +1,18 @@ +# Copyright 2024 Iguazio +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +def compile_pipeline(pipeline, **kwargs): + raise NotImplementedError From c634bd8d775574d296ca4218cccb74dfecc3f350 Mon Sep 17 00:00:00 2001 From: Liran BG Date: Tue, 30 Apr 2024 11:05:43 +0300 Subject: [PATCH 102/119] [RemoteWorkflows] Fail fast when remote runner is failing (#5469) --- mlrun/model.py | 18 ++++++++++++++++++ mlrun/projects/pipelines.py | 27 ++++++++++++++++++++++----- server/api/crud/workflows.py | 13 +++++++++++++ tests/api/api/test_workflows.py | 31 +++++++++++++++++++++++++++++++ 4 files changed, 84 insertions(+), 5 deletions(-) diff --git a/mlrun/model.py b/mlrun/model.py index fd3b04511b8..d032c55adb4 100644 --- a/mlrun/model.py +++ b/mlrun/model.py @@ -624,6 +624,11 @@ def iteration(self): def iteration(self, iteration): self._iteration = iteration + def is_workflow_runner(self): + if not self.labels: + return False + return self.labels.get("job-type", "") == "workflow-runner" + class HyperParamStrategies: grid = "grid" @@ -1068,6 +1073,19 @@ def __init__( self.reason = reason self.notifications = notifications or {} + def is_failed(self) -> Optional[bool]: + """ + This method returns whether a run has failed. + Returns none if state has yet to be defined. callee is responsible for handling None. + (e.g wait for state to be defined) + """ + if not self.state: + return None + return self.state.casefold() in [ + mlrun.run.RunStatuses.failed.casefold(), + mlrun.run.RunStatuses.error.casefold(), + ] + class RunTemplate(ModelObj): """Run template""" diff --git a/mlrun/projects/pipelines.py b/mlrun/projects/pipelines.py index 37d52ad0c58..50ca27d26b2 100644 --- a/mlrun/projects/pipelines.py +++ b/mlrun/projects/pipelines.py @@ -13,6 +13,7 @@ # limitations under the License. import abc import builtins +import http import importlib.util as imputil import os import tempfile @@ -877,17 +878,33 @@ def run( get_workflow_id_timeout=get_workflow_id_timeout, ) + def _get_workflow_id_or_bail(): + try: + return run_db.get_workflow_id( + project=project.name, + name=workflow_response.name, + run_id=workflow_response.run_id, + engine=workflow_spec.engine, + ) + except mlrun.errors.MLRunHTTPStatusError as get_wf_exc: + # fail fast on specific errors + if get_wf_exc.error_status_code in [ + http.HTTPStatus.PRECONDITION_FAILED + ]: + raise mlrun.errors.MLRunFatalFailureError( + original_exception=get_wf_exc + ) + + # raise for a retry (on other errors) + raise + # Getting workflow id from run: response = retry_until_successful( 1, get_workflow_id_timeout, logger, False, - run_db.get_workflow_id, - project=project.name, - name=workflow_response.name, - run_id=workflow_response.run_id, - engine=workflow_spec.engine, + _get_workflow_id_or_bail, ) workflow_id = response.workflow_id # After fetching the workflow_id the workflow executed successfully diff --git a/server/api/crud/workflows.py b/server/api/crud/workflows.py index e1d1ca0e7a4..551dc682358 100644 --- a/server/api/crud/workflows.py +++ b/server/api/crud/workflows.py @@ -275,6 +275,19 @@ def get_workflow_id( if workflow_id is None: if ( + run_object.metadata.is_workflow_runner() + and run_object.status.is_failed() + ): + state = run_object.status.state + state_text = run_object.status.error + workflow_name = run_object.spec.parameters.get( + "workflow_name", "" + ) + raise mlrun.errors.MLRunPreconditionFailedError( + f"Failed to run workflow {workflow_name}, state: {state}, state_text: {state_text}" + ) + + elif ( engine == "local" and state.casefold() == mlrun.run.RunStatuses.running.casefold() ): diff --git a/tests/api/api/test_workflows.py b/tests/api/api/test_workflows.py index f0ccc90979e..365e4268a6f 100644 --- a/tests/api/api/test_workflows.py +++ b/tests/api/api/test_workflows.py @@ -52,6 +52,37 @@ def test_bad_schedule_format(db: Session, client: TestClient): assert resp.status_code == HTTPStatus.BAD_REQUEST +def test_get_workflow_fail_fast(db: Session, client: TestClient): + _create_proj_with_workflow(client) + + right_id = "".join(random.choices("0123456789abcdef", k=40)) + data = { + "metadata": { + "name": "run-name", + "labels": { + "job-type": "workflow-runner", + }, + }, + "spec": { + "parameters": {"workflow_name": "main"}, + }, + "status": { + "state": "failed", + "error": "some dummy error", + # workflow id is empty to simulate a failed remote runner + "results": {"workflow_id": None}, + }, + } + server.api.crud.Runs().store_run(db, data, right_id, project=PROJECT_NAME) + resp = client.get( + f"projects/{PROJECT_NAME}/workflows/{WORKFLOW_NAME}/runs/{right_id}" + ) + + # remote runner has failed, so the run should be failed + assert resp.status_code == HTTPStatus.PRECONDITION_FAILED + assert "some dummy error" in resp.json()["detail"] + + def test_get_workflow_bad_id(db: Session, client: TestClient): _create_proj_with_workflow(client) From e9bb9e2e9eb0d88e5659fabbfd907e97543a161b Mon Sep 17 00:00:00 2001 From: TomerShor <90552140+TomerShor@users.noreply.github.com> Date: Tue, 30 Apr 2024 20:47:55 +0300 Subject: [PATCH 103/119] [Project] Don't sync functions if running workflows in remote [1.6.x] (#5475) --- mlrun/projects/project.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/mlrun/projects/project.py b/mlrun/projects/project.py index b3b7e0ebe5d..f1aeada685a 100644 --- a/mlrun/projects/project.py +++ b/mlrun/projects/project.py @@ -2650,12 +2650,14 @@ def run( "Remote repo is not defined, use .create_remote() + push()" ) - self.sync_functions(always=sync) - if not self.spec._function_objects: - raise ValueError( - "There are no functions in the project." - " Make sure you've set your functions with project.set_function()." - ) + if engine not in ["remote"]: + # for remote runs we don't require the functions to be synced as they can be loaded dynamically during run + self.sync_functions(always=sync) + if not self.spec._function_objects: + raise ValueError( + "There are no functions in the project." + " Make sure you've set your functions with project.set_function()." + ) if not name and not workflow_path and not workflow_handler: raise ValueError("Workflow name, path, or handler must be specified") From 344e016adc6edcd335cd9b1d56391e7c51f545fe Mon Sep 17 00:00:00 2001 From: Saar Cohen <66667568+theSaarco@users.noreply.github.com> Date: Wed, 1 May 2024 20:35:56 +0300 Subject: [PATCH 104/119] [Auth] Authenticate with OAuth client-credentials grant [1.6.x] (#5483) --- mlrun/config.py | 4 + mlrun/db/auth_utils.py | 152 +++++++++++++++++++++++++++++++++++++ mlrun/db/httpdb.py | 49 +++++++----- tests/rundb/test_httpdb.py | 83 +++++++++++++++++++- 4 files changed, 269 insertions(+), 19 deletions(-) create mode 100644 mlrun/db/auth_utils.py diff --git a/mlrun/config.py b/mlrun/config.py index 5f9beeb4dfb..a2487cc5c0b 100644 --- a/mlrun/config.py +++ b/mlrun/config.py @@ -672,6 +672,10 @@ "access_key": "", }, "grafana_url": "", + "auth_with_client_id": { + "enabled": False, + "request_timeout": 5, + }, } _is_running_as_api = None diff --git a/mlrun/db/auth_utils.py b/mlrun/db/auth_utils.py new file mode 100644 index 00000000000..8fbc5c31e10 --- /dev/null +++ b/mlrun/db/auth_utils.py @@ -0,0 +1,152 @@ +# Copyright 2024 Iguazio +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from abc import ABC, abstractmethod +from datetime import datetime, timedelta + +import requests + +import mlrun.errors +from mlrun.utils import logger + + +class TokenProvider(ABC): + @abstractmethod + def get_token(self): + pass + + @abstractmethod + def is_iguazio_session(self): + pass + + +class StaticTokenProvider(TokenProvider): + def __init__(self, token: str): + self.token = token + + def get_token(self): + return self.token + + def is_iguazio_session(self): + return mlrun.platforms.iguazio.is_iguazio_session(self.token) + + +class OAuthClientIDTokenProvider(TokenProvider): + def __init__( + self, token_endpoint: str, client_id: str, client_secret: str, timeout=5 + ): + if not token_endpoint or not client_id or not client_secret: + raise mlrun.errors.MLRunValueError( + "Invalid client_id configuration for authentication. Must provide token endpoint, client-id and secret" + ) + self.token_endpoint = token_endpoint + self.client_id = client_id + self.client_secret = client_secret + self.timeout = timeout + + # Since we're only issuing POST requests, which are actually a disguised GET, then it's ok to allow retries + # on them. + self._session = mlrun.utils.HTTPSessionWithRetry( + retry_on_post=True, + verbose=True, + ) + + self._cleanup() + self._refresh_token_if_needed() + + def get_token(self): + self._refresh_token_if_needed() + return self.token + + def is_iguazio_session(self): + return False + + def _cleanup(self): + self.token = self.token_expiry_time = self.token_refresh_time = None + + def _refresh_token_if_needed(self): + now = datetime.now() + if self.token: + if self.token_refresh_time and now <= self.token_refresh_time: + return self.token + + # We only cleanup if token was really expired - even if we fail in refreshing the token, we can still + # use the existing one given that it's not expired. + if now >= self.token_expiry_time: + self._cleanup() + + self._issue_token_request() + return self.token + + def _issue_token_request(self, raise_on_error=False): + try: + headers = {"Content-Type": "application/x-www-form-urlencoded"} + request_body = { + "grant_type": "client_credentials", + "client_id": self.client_id, + "client_secret": self.client_secret, + } + response = self._session.request( + "POST", + self.token_endpoint, + timeout=self.timeout, + headers=headers, + data=request_body, + ) + except requests.RequestException as exc: + error = f"Retrieving token failed: {mlrun.errors.err_to_str(exc)}" + if raise_on_error: + raise mlrun.errors.MLRunRuntimeError(error) from exc + else: + logger.warning(error) + return + + if not response.ok: + error = "No error available" + if response.content: + try: + data = response.json() + error = data.get("error") + except Exception: + pass + logger.warning( + "Retrieving token failed", status=response.status_code, error=error + ) + if raise_on_error: + mlrun.errors.raise_for_status(response) + return + + self._parse_response(response.json()) + + def _parse_response(self, data: dict): + # Response is described in https://datatracker.ietf.org/doc/html/rfc6749#section-4.4.3 + # According to spec, there isn't a refresh token - just the access token and its expiry time (in seconds). + self.token = data.get("access_token") + expires_in = data.get("expires_in") + if not self.token or not expires_in: + token_str = "****" if self.token else "missing" + logger.warning( + "Failed to parse token response", token=token_str, expires_in=expires_in + ) + return + + now = datetime.now() + self.token_expiry_time = now + timedelta(seconds=expires_in) + self.token_refresh_time = now + timedelta(seconds=expires_in / 2) + logger.info( + "Successfully retrieved client-id token", + expires_in=expires_in, + expiry=str(self.token_expiry_time), + refresh=str(self.token_refresh_time), + ) diff --git a/mlrun/db/httpdb.py b/mlrun/db/httpdb.py index 2236c0b3533..48732e642c1 100644 --- a/mlrun/db/httpdb.py +++ b/mlrun/db/httpdb.py @@ -33,6 +33,7 @@ import mlrun.model_monitoring.model_endpoint import mlrun.platforms import mlrun.projects +from mlrun.db.auth_utils import OAuthClientIDTokenProvider, StaticTokenProvider from mlrun.errors import MLRunInvalidArgumentError, err_to_str from ..artifacts import Artifact @@ -133,17 +134,27 @@ def _enrich_and_validate(self, url): endpoint += f":{parsed_url.port}" base_url = f"{parsed_url.scheme}://{endpoint}{parsed_url.path}" + self.base_url = base_url username = parsed_url.username or config.httpdb.user password = parsed_url.password or config.httpdb.password - - username, password, token = mlrun.platforms.add_or_refresh_credentials( - parsed_url.hostname, username, password, config.httpdb.token - ) - - self.base_url = base_url self.user = username self.password = password - self.token = token + self.token_provider = None + + if config.auth_with_client_id.enabled: + self.token_provider = OAuthClientIDTokenProvider( + token_endpoint=mlrun.get_secret_or_env("MLRUN_AUTH_TOKEN_ENDPOINT"), + client_id=mlrun.get_secret_or_env("MLRUN_AUTH_CLIENT_ID"), + client_secret=mlrun.get_secret_or_env("MLRUN_AUTH_CLIENT_SECRET"), + timeout=config.auth_with_client_id.request_timeout, + ) + else: + username, password, token = mlrun.platforms.add_or_refresh_credentials( + parsed_url.hostname, username, password, config.httpdb.token + ) + + if token: + self.token_provider = StaticTokenProvider(token) def __repr__(self): cls = self.__class__.__name__ @@ -213,17 +224,19 @@ def api_call( if self.user: kw["auth"] = (self.user, self.password) - elif self.token: - # Iguazio auth doesn't support passing token through bearer, so use cookie instead - if mlrun.platforms.iguazio.is_iguazio_session(self.token): - session_cookie = f'j:{{"sid": "{self.token}"}}' - cookies = { - "session": session_cookie, - } - kw["cookies"] = cookies - else: - if "Authorization" not in kw.setdefault("headers", {}): - kw["headers"].update({"Authorization": "Bearer " + self.token}) + elif self.token_provider: + token = self.token_provider.get_token() + if token: + # Iguazio auth doesn't support passing token through bearer, so use cookie instead + if self.token_provider.is_iguazio_session(): + session_cookie = f'j:{{"sid": "{token}"}}' + cookies = { + "session": session_cookie, + } + kw["cookies"] = cookies + else: + if "Authorization" not in kw.setdefault("headers", {}): + kw["headers"].update({"Authorization": "Bearer " + token}) if mlrun.common.schemas.HeaderNames.client_version not in kw.setdefault( "headers", {} diff --git a/tests/rundb/test_httpdb.py b/tests/rundb/test_httpdb.py index d5f24cef678..64def6ce45c 100644 --- a/tests/rundb/test_httpdb.py +++ b/tests/rundb/test_httpdb.py @@ -28,12 +28,14 @@ import deepdiff import pytest +import requests_mock as requests_mock_package import mlrun.artifacts.base import mlrun.common.schemas import mlrun.errors import mlrun.projects.project from mlrun import RunObject +from mlrun.db.auth_utils import StaticTokenProvider from mlrun.db.httpdb import HTTPRunDB from tests.conftest import tests_root_directory, wait_for_server @@ -323,10 +325,89 @@ def test_bearer_auth(create_server): with pytest.raises(mlrun.errors.MLRunUnauthorizedError): db.list_runs() - db.token = token + db.token_provider = StaticTokenProvider(token) db.list_runs() +def test_client_id_auth(requests_mock: requests_mock_package.Mocker, monkeypatch): + """ + Test the httpdb behavior when using a client-id OAuth token. Test verifies that: + - Token is retrieved successfully, and kept in the httpdb class. + - Token is added as Bearer token when issuing API calls to BE. + - Token is refreshed when its expiry time is nearing. + - Some error flows when token cannot be retrieved - such as that token is still used while it hasn't expired. + """ + + token_url = "https://mock/token_endpoint/protocol/openid-connect/token" + test_env = { + "MLRUN_AUTH_TOKEN_ENDPOINT": token_url, + "MLRUN_AUTH_CLIENT_ID": "some-client-id", + "MLRUN_AUTH_CLIENT_SECRET": "some-client-secret", + } + + mlrun.mlconf.auth_with_client_id.enabled = True + for key, value in test_env.items(): + monkeypatch.setenv(key, value) + + expected_token = "my-cool-token" + # Set a 4-second expiry, so a refresh will happen in 2 seconds + requests_mock.post( + token_url, json={"access_token": expected_token, "expires_in": 4} + ) + + db_url = "http://mock-server:1919" + db = HTTPRunDB(db_url) + db.connect() + token = db.token_provider.get_token() + assert token == expected_token + assert len(requests_mock.request_history) == 1 + + time.sleep(1) + token = db.token_provider.get_token() + assert token == expected_token + # verify no additional calls were made (too early) + assert len(requests_mock.request_history) == 1 + + time.sleep(1.5) + expected_token = "my-other-cool-token" + requests_mock.post( + token_url, json={"access_token": expected_token, "expires_in": 3} + ) + token = db.token_provider.get_token() + assert token == expected_token + + # Check that httpdb attaches the token to API calls as Authorization header. + # Using trigger-migrations since it needs no payload and returns nothing, so easy to simulate. + requests_mock.post(f"{db_url}/api/v1/operations/migrations", status_code=200) + db.trigger_migrations() + + expected_auth = f"Bearer {expected_token}" + last_request = requests_mock.last_request + assert last_request.headers["Authorization"] == expected_auth + + # Check flow where we fail token retrieval while token is still active (not expired). + requests_mock.reset_mock() + requests_mock.post(token_url, status_code=401) + + time.sleep(2) + db.trigger_migrations() + + request_history = requests_mock.request_history + # We expect 2 calls - one for the token (which failed but didn't fail the flow) and one for the actual api call. + assert len(request_history) == 2 + # The token should still be the previous token, since it was not refreshed but it's not expired yet. + assert request_history[-1].headers["Authorization"] == expected_auth + + # Now let the token expire, and verify commands still go out, only without auth + time.sleep(2) + requests_mock.reset_mock() + + db.trigger_migrations() + assert len(requests_mock.request_history) == 2 + assert "Authorization" not in requests_mock.last_request.headers + assert db.token_provider.token is None + + def _generate_runtime(name) -> mlrun.runtimes.KubejobRuntime: runtime = mlrun.runtimes.KubejobRuntime() runtime.metadata.name = name From 4c7780f6e2e1fd64e0b80c528c9e40151ecbf1ba Mon Sep 17 00:00:00 2001 From: Liran BG Date: Thu, 2 May 2024 09:44:35 +0300 Subject: [PATCH 105/119] [API] Create logger handler once [1.6.x] (#5485) --- mlrun/config.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/mlrun/config.py b/mlrun/config.py index a2487cc5c0b..f567270160b 100644 --- a/mlrun/config.py +++ b/mlrun/config.py @@ -1379,7 +1379,11 @@ def read_env(env=None, prefix=env_prefix): log_formatter = mlrun.utils.create_formatter_instance( mlrun.utils.FormatterKinds(log_formatter_name) ) - mlrun.utils.logger.get_handler("default").setFormatter(log_formatter) + current_handler = mlrun.utils.logger.get_handler("default") + current_formatter_name = current_handler.formatter.__class__.__name__ + desired_formatter_name = log_formatter.__class__.__name__ + if current_formatter_name != desired_formatter_name: + current_handler.setFormatter(log_formatter) # The default function pod resource values are of type str; however, when reading from environment variable numbers, # it converts them to type int if contains only number, so we want to convert them to str. From dbb53dc1e528cfa04c02960e9de29fcad530c9ed Mon Sep 17 00:00:00 2001 From: Liran BG Date: Thu, 2 May 2024 11:55:02 +0300 Subject: [PATCH 106/119] [Dask] Enhance extending env vars to avoid memory leak [1.6.x] (#5486) --- mlrun/runtimes/pod.py | 10 ++-- server/api/runtime_handlers/daskjob.py | 28 ++++++++- tests/api/runtimes/test_dask.py | 80 ++++++++++++++++++++++++++ 3 files changed, 111 insertions(+), 7 deletions(-) diff --git a/mlrun/runtimes/pod.py b/mlrun/runtimes/pod.py index d162ebbef4c..ea5718414ed 100644 --- a/mlrun/runtimes/pod.py +++ b/mlrun/runtimes/pod.py @@ -1012,12 +1012,12 @@ def is_env_exists(self, name): def _set_env(self, name, value=None, value_from=None): new_var = k8s_client.V1EnvVar(name=name, value=value, value_from=value_from) - i = 0 - for v in self.spec.env: - if get_item_name(v) == name: - self.spec.env[i] = new_var + + # ensure we don't have duplicate env vars with the same name + for env_index, value_item in enumerate(self.spec.env): + if get_item_name(value_item) == name: + self.spec.env[env_index] = new_var return self - i += 1 self.spec.env.append(new_var) return self diff --git a/server/api/runtime_handlers/daskjob.py b/server/api/runtime_handlers/daskjob.py index 1a3055ddd91..db79f994781 100644 --- a/server/api/runtime_handlers/daskjob.py +++ b/server/api/runtime_handlers/daskjob.py @@ -320,8 +320,32 @@ def enrich_dask_cluster( # TODO: we might never enter here, since running a function requires defining an image or "daskdev/dask:latest" ) - env = spec.env - env.extend(function.generate_runtime_k8s_env()) + env = function.generate_runtime_k8s_env() + + # filter any spec.env that already exists in env + # in other words, dont let spec.env override env (or not even duplicate it) + # we dont want to override env to ensure k8s runtime envs are enforced and correct + # leaving no room for human mistakes + def get_env_name(env_: Union[client.V1EnvVar, Dict]) -> str: + if isinstance(env_, client.V1EnvVar): + return env_.name + return env_.get("name", "") + + env.extend( + filter( + lambda spec_env: not any( + [ + True + for _env in env + # spec_env might be V1EnvVar or a dict + # _env is just a dict + if get_env_name(spec_env) == get_env_name(_env) + ] + ), + spec.env, + ) + ) + namespace = meta.namespace or config.namespace if spec.extra_pip: env.append(spec.extra_pip) diff --git a/tests/api/runtimes/test_dask.py b/tests/api/runtimes/test_dask.py index 05e346b0292..cf3b3782b85 100644 --- a/tests/api/runtimes/test_dask.py +++ b/tests/api/runtimes/test_dask.py @@ -20,11 +20,13 @@ from dask import distributed from fastapi.testclient import TestClient +from kubernetes import client as k8s_client from sqlalchemy.orm import Session import mlrun import mlrun.common.schemas import server.api.api.endpoints.functions +import server.api.runtime_handlers.daskjob from mlrun import mlconf from mlrun.platforms import auto_mount from mlrun.runtimes.utils import generate_resources @@ -432,6 +434,84 @@ def test_dask_with_security_context(self, db: Session, client: TestClient): _ = runtime.client self.assert_security_context(other_security_context) + def test_enrich_dask_cluster(self): + function = mlrun.runtimes.DaskCluster( + metadata=dict( + name="test", + project="project", + labels={"label1": "val1"}, + annotations={"annotation1": "val1"}, + ), + spec=dict( + nthreads=1, + worker_resources={"limits": {"memory": "1Gi"}}, + scheduler_resources={"limits": {"memory": "1Gi"}}, + env=[ + {"name": "MLRUN_NAMESPACE", "value": "other-namespace"}, + k8s_client.V1EnvVar(name="MLRUN_TAG", value="latest"), + ], + ), + ) + + function.generate_runtime_k8s_env = unittest.mock.Mock( + return_value=[ + {"name": "MLRUN_DEFAULT_PROJECT", "value": "project"}, + {"name": "MLRUN_NAMESPACE", "value": "test-namespace"}, + ] + ) + + # add default envvars that expected to be on enriched pods + # do it to verify later on it is not duplicated and appears only once + function.spec.env.extend(function.generate_runtime_k8s_env()) + + expected_resources = { + "limits": {"memory": "1Gi"}, + "requests": {}, + } + expected_env = [ + {"name": "MLRUN_DEFAULT_PROJECT", "value": "project"}, + {"name": "MLRUN_NAMESPACE", "value": "test-namespace"}, + k8s_client.V1EnvVar(name="MLRUN_TAG", value="latest"), + ] + expected_labels = { + "mlrun/project": "project", + "mlrun/class": "dask", + "mlrun/function": "test", + "label1": "val1", + "mlrun/scrape-metrics": "True", + "mlrun/tag": "latest", + } + + secrets = [] + client_version = "1.6.0" + client_python_version = "3.9" + scheduler_pod, worker_pod, function, namespace = ( + server.api.runtime_handlers.daskjob.enrich_dask_cluster( + function, secrets, client_version, client_python_version + ) + ) + + assert scheduler_pod.metadata.namespace == namespace + assert worker_pod.metadata.namespace == namespace + assert scheduler_pod.metadata.labels == expected_labels + assert worker_pod.metadata.labels == expected_labels + assert scheduler_pod.spec.containers[0].args == ["dask", "scheduler"] + assert worker_pod.spec.containers[0].args == [ + "dask", + "worker", + "--nthreads", + "1", + "--memory-limit", + "1Gi", + ] + assert worker_pod.spec.containers[0].resources == expected_resources + assert scheduler_pod.spec.containers[0].resources == expected_resources + assert worker_pod.spec.containers[0].env == expected_env + assert scheduler_pod.spec.containers[0].env == expected_env + + # used once by test, once by enrich_dask_cluster + assert function.generate_runtime_k8s_env.call_count == 2 + def test_deploy_dask_function_with_enriched_security_context( self, db: Session, client: TestClient, k8s_secrets_mock: K8sSecretsMock ): From d777f65dfb47ece15ec922e95183651168c23d45 Mon Sep 17 00:00:00 2001 From: Saar Cohen <66667568+theSaarco@users.noreply.github.com> Date: Thu, 2 May 2024 15:56:30 +0300 Subject: [PATCH 107/119] [Auth] Fix httpdb init from env [1.6.x] (#5497) --- mlrun/db/httpdb.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/mlrun/db/httpdb.py b/mlrun/db/httpdb.py index 48732e642c1..4e99f2d42e2 100644 --- a/mlrun/db/httpdb.py +++ b/mlrun/db/httpdb.py @@ -137,8 +137,6 @@ def _enrich_and_validate(self, url): self.base_url = base_url username = parsed_url.username or config.httpdb.user password = parsed_url.password or config.httpdb.password - self.user = username - self.password = password self.token_provider = None if config.auth_with_client_id.enabled: @@ -156,6 +154,9 @@ def _enrich_and_validate(self, url): if token: self.token_provider = StaticTokenProvider(token) + self.user = username + self.password = password + def __repr__(self): cls = self.__class__.__name__ return f"{cls}({self.base_url!r})" From 3327a2e410e43048e7f2206c1c8918bea80f1d6f Mon Sep 17 00:00:00 2001 From: Laury Bueno Date: Fri, 3 May 2024 10:10:53 -0300 Subject: [PATCH 108/119] [Pipelines] Add KFPv2 functionalities to MLRun (#5466) --- mlrun/platforms/__init__.py | 13 + .../src/mlrun_pipelines/models.py | 3 + .../src/mlrun_pipelines/ops.py | 2 +- .../src/mlrun_pipelines/mixins.py | 53 ++- .../src/mlrun_pipelines/models.py | 49 +-- .../src/mlrun_pipelines/mounts.py | 153 +++++++-- .../src/mlrun_pipelines/ops.py | 312 +++++++++++++++++- .../src/mlrun_pipelines/utils.py | 8 +- server/api/crud/pipelines.py | 2 +- 9 files changed, 539 insertions(+), 56 deletions(-) diff --git a/mlrun/platforms/__init__.py b/mlrun/platforms/__init__.py index 9ff4b22c07a..f1e65f30c69 100644 --- a/mlrun/platforms/__init__.py +++ b/mlrun/platforms/__init__.py @@ -17,6 +17,19 @@ from pprint import pprint from time import sleep +from mlrun_pipelines.common.mounts import VolumeMount +from mlrun_pipelines.mounts import ( + auto_mount, + mount_configmap, + mount_hostpath, + mount_pvc, + mount_s3, + mount_secret, + mount_v3io, + set_env_variables, + v3io_cred, +) + from .iguazio import ( V3ioStreamClient, add_or_refresh_credentials, diff --git a/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/models.py b/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/models.py index 0cc61bc715b..649c5808de9 100644 --- a/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/models.py +++ b/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/models.py @@ -106,6 +106,9 @@ def finished_at(self): def finished_at(self, finished_at): self._external_data["finished_at"] = finished_at + def workflow_manifest(self) -> PipelineManifest: + return self._workflow_manifest + class PipelineExperiment(FlexibleMapper): @property diff --git a/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/ops.py b/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/ops.py index 964b4bd3c46..f2312876ad7 100644 --- a/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/ops.py +++ b/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/ops.py @@ -245,7 +245,7 @@ def add_function_node_selection_attributes( def generate_kfp_dag_and_resolve_project(run, project=None): - workflow = run._workflow_manifest + workflow = run.workflow_manifest() if not workflow: return None, project, None diff --git a/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/mixins.py b/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/mixins.py index 75052342ccd..b62d0f0ae07 100644 --- a/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/mixins.py +++ b/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/mixins.py @@ -13,6 +13,10 @@ # limitations under the License. # +from mlrun_pipelines.common.helpers import PROJECT_ANNOTATION + +import mlrun + class KfpAdapterMixin: def apply(self, modify): @@ -23,9 +27,54 @@ def apply(self, modify): :param modify: a modifier runnable object :return: the runtime (self) after the modifications """ - raise NotImplementedError + return modify(self) class PipelineProviderMixin: def resolve_project_from_workflow_manifest(self, workflow_manifest): - raise NotImplementedError + for _, executor in workflow_manifest.get_executors(): + project_from_annotation = ( + executor.get("metadata", {}) + .get("annotations", {}) + .get(PROJECT_ANNOTATION) + ) + if project_from_annotation: + return project_from_annotation + command = executor.get("container", {}).get("command", []) + action = None + for index, argument in enumerate(command): + if argument == "mlrun" and index + 1 < len(command): + action = command[index + 1] + break + if action: + if action == "deploy": + project = self._resolve_project_from_command( + command, + hyphen_p_is_also_project=True, + has_func_url_flags=True, + has_runtime_flags=False, + ) + if project: + return project + elif action == "run": + project = self._resolve_project_from_command( + command, + hyphen_p_is_also_project=False, + has_func_url_flags=True, + has_runtime_flags=True, + ) + if project: + return project + elif action == "build": + project = self._resolve_project_from_command( + command, + hyphen_p_is_also_project=False, + has_func_url_flags=False, + has_runtime_flags=True, + ) + if project: + return project + else: + raise NotImplementedError(f"Unknown action: {action}") + + return mlrun.mlconf.default_project diff --git a/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/models.py b/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/models.py index 9d1d6ebd69f..7783cef97bb 100644 --- a/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/models.py +++ b/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/models.py @@ -27,74 +27,85 @@ class PipelineManifest(FlexibleMapper): """ def get_schema_version(self) -> str: - raise NotImplementedError + try: + return self._external_data["schemaVersion"] + except KeyError: + return self._external_data["apiVersion"] def is_argo_compatible(self) -> bool: - raise NotImplementedError + if self.get_schema_version().startswith("argoproj.io"): + return True + return False def get_executors(self): - raise NotImplementedError + if self.is_argo_compatible(): + yield from [ + (t.get("name"), t) for t in self._external_data["spec"]["templates"] + ] + else: + yield from self._external_data["deploymentSpec"]["executors"].items() class PipelineRun(FlexibleMapper): @property def id(self): - raise NotImplementedError + return self._external_data["run_id"] @property def name(self): - raise NotImplementedError + return self._external_data["display_name"] @name.setter def name(self, name): - raise NotImplementedError + self._external_data["display_name"] = name @property def status(self): - raise NotImplementedError + return self._external_data["state"] @status.setter def status(self, status): - raise NotImplementedError + self._external_data["state"] = status @property def description(self): - raise NotImplementedError + return self._external_data["description"] @description.setter def description(self, description): - raise NotImplementedError + self._external_data["description"] = description @property def created_at(self): - raise NotImplementedError + return self._external_data["created_at"] @created_at.setter def created_at(self, created_at): - raise NotImplementedError + self._external_data["created_at"] = created_at @property def scheduled_at(self): - raise NotImplementedError + return self._external_data["scheduled_at"] @scheduled_at.setter def scheduled_at(self, scheduled_at): - raise NotImplementedError + self._external_data["scheduled_at"] = scheduled_at @property def finished_at(self): - raise NotImplementedError + return self._external_data["finished_at"] @finished_at.setter def finished_at(self, finished_at): - raise NotImplementedError + self._external_data["finished_at"] = finished_at - @property def workflow_manifest(self) -> PipelineManifest: - raise NotImplementedError + return PipelineManifest( + self._external_data["pipeline_spec"], + ) class PipelineExperiment(FlexibleMapper): @property def id(self): - raise NotImplementedError + return self._external_data["experiment_id"] diff --git a/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/mounts.py b/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/mounts.py index 35172e6cca8..d17e4a8f7b7 100644 --- a/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/mounts.py +++ b/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/mounts.py @@ -12,6 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import os + +import mlrun +from mlrun.config import config +from mlrun.errors import MLRunInvalidArgumentError +from mlrun.utils import logger def v3io_cred(api="", user="", access_key=""): @@ -52,15 +58,36 @@ def mount_v3iod(namespace, v3io_config_configmap): def mount_pvc(pvc_name=None, volume_name="pipeline", volume_mount_path="/mnt/pipeline"): """ - Modifier function to apply to a Container Op to simplify volume, volume mount addition and - enable better reuse of volumes, volume claims across container ops. + Modifier function to apply to a PipelineTask to simplify volume, volume mount addition and + enable better reuse of volumes, volume claims across pipelinetasks. Usage:: train = train_op(...) train.apply(mount_pvc('claim-name', 'pipeline', '/mnt/pipeline')) """ - raise NotImplementedError + if "MLRUN_PVC_MOUNT" in os.environ: + mount = os.environ.get("MLRUN_PVC_MOUNT") + items = mount.split(":") + if len(items) != 2: + raise MLRunInvalidArgumentError( + "MLRUN_PVC_MOUNT should include :" + ) + pvc_name = items[0] + volume_mount_path = items[1] + + if not pvc_name: + raise MLRunInvalidArgumentError( + "No PVC name: use the pvc_name parameter or configure the MLRUN_PVC_MOUNT environment variable" + ) + + def _mount_pvc(runtime): + runtime.spec.update_vols_and_mounts( + volumes=[{"PVC": {"name": pvc_name}, "name": volume_name}], + volume_mounts=[{"mountPath": volume_mount_path, "name": volume_name}], + ) + + return _mount_pvc def set_env_variables(env_vars_dict: dict[str, str] = None, **kwargs): @@ -78,7 +105,16 @@ def set_env_variables(env_vars_dict: dict[str, str] = None, **kwargs): :param env_vars_dict: dictionary of env. variables :param kwargs: environment variables passed as args """ - raise NotImplementedError + env_data = env_vars_dict.copy() if env_vars_dict else {} + for key, value in kwargs.items(): + env_data[key] = value + + def _set_env_variables(runtime): + for _key, _value in env_data.items(): + runtime.set_env(_key, _value) + return runtime + + return _set_env_variables def mount_s3( @@ -108,7 +144,53 @@ def mount_s3( :param non_anonymous: force the S3 API to use non-anonymous connection, even if no credentials are provided (for authenticating externally, such as through IAM instance-roles) """ - raise NotImplementedError + if secret_name and (aws_access_key or aws_secret_key): + raise mlrun.errors.MLRunInvalidArgumentError( + "can use k8s_secret for credentials or specify them (aws_access_key, aws_secret_key) not both" + ) + + if not secret_name and ( + aws_access_key + or os.environ.get(prefix + "AWS_ACCESS_KEY_ID") + or aws_secret_key + or os.environ.get(prefix + "AWS_SECRET_ACCESS_KEY") + ): + logger.warning( + "it is recommended to use k8s secret (specify secret_name), " + "specifying the aws_access_key/aws_secret_key directly is unsafe" + ) + + def _use_s3_cred(runtime): + from os import environ + + _access_key = aws_access_key or environ.get(prefix + "AWS_ACCESS_KEY_ID") + _secret_key = aws_secret_key or environ.get(prefix + "AWS_SECRET_ACCESS_KEY") + _endpoint_url = endpoint_url or environ.get(prefix + "S3_ENDPOINT_URL") + + if _endpoint_url: + runtime.set_env(prefix + "S3_ENDPOINT_URL", endpoint_url) + if aws_region: + runtime.set_env(prefix + "AWS_REGION", aws_region) + if non_anonymous: + runtime.set_env(prefix + "S3_NON_ANONYMOUS", "true") + + if secret_name: + runtime.set_env_from_secret( + prefix + "AWS_ACCESS_KEY_ID", + secret=secret_name, + secret_key="AWS_ACCESS_KEY_ID", + ) + runtime.set_env_from_secret( + prefix + "AWS_SECRET_ACCESS_KEY", + secret=secret_name, + secret_key="AWS_SECRET_ACCESS_KEY", + ) + else: + runtime.set_env(prefix + "AWS_ACCESS_KEY_ID", _access_key) + runtime.set_env(prefix + "AWS_SECRET_ACCESS_KEY", _secret_key) + return runtime + + return _use_s3_cred def mount_spark_conf(): @@ -121,15 +203,18 @@ def mount_secret(secret_name, mount_path, volume_name="secret", items=None): :param secret_name: k8s secret name :param mount_path: path to mount inside the container :param volume_name: unique volume name - :param items: If unspecified, each key-value pair in the Data field - of the referenced Secret will be projected into the - volume as a file whose name is the key and content is - the value. - If specified, the listed keys will be projected into - the specified paths, and unlisted keys will not be - present. + :param items: unused due to lack of support on KFPv2 SDK + kept for backwards compatibility """ - raise NotImplementedError + + def _mount_secret(runtime): + runtime.spec.update_vols_and_mounts( + volumes=[{"secret": {"name": secret_name}, "name": volume_name}], + volume_mounts=[{"mountPath": mount_path, "name": volume_name}], + ) + return runtime + + return _mount_secret def mount_configmap(configmap_name, mount_path, volume_name="configmap", items=None): @@ -138,15 +223,18 @@ def mount_configmap(configmap_name, mount_path, volume_name="configmap", items=N :param configmap_name: k8s configmap name :param mount_path: path to mount inside the container :param volume_name: unique volume name - :param items: If unspecified, each key-value pair in the Data field - of the referenced Configmap will be projected into the - volume as a file whose name is the key and content is - the value. - If specified, the listed keys will be projected into - the specified paths, and unlisted keys will not be - present. + :param items: unused due to lack of support on KFPv2 SDK + kept for backwards compatibility """ - raise NotImplementedError + + def _mount_configmap(runtime): + runtime.spec.update_vols_and_mounts( + volumes=[{"configMap": {"name": configmap_name}, "name": volume_name}], + volume_mounts=[{"mountPath": mount_path, "name": volume_name}], + ) + return runtime + + return _mount_configmap def mount_hostpath(host_path, mount_path, volume_name="hostpath"): @@ -156,7 +244,9 @@ def mount_hostpath(host_path, mount_path, volume_name="hostpath"): :param mount_path: path to mount inside the container :param volume_name: unique volume name """ - raise NotImplementedError + raise NotImplementedError( + "Support for hostPath mounting is not yet available on the KFP 2 engine" + ) def auto_mount(pvc_name="", volume_mount_path="", volume_name=None): @@ -168,4 +258,21 @@ def auto_mount(pvc_name="", volume_mount_path="", volume_name=None): - k8s PVC volume if it's configured as the auto mount type - iguazio v3io volume when V3IO_ACCESS_KEY and V3IO_USERNAME env vars are set """ - raise NotImplementedError + if pvc_name and volume_mount_path: + return mount_pvc( + pvc_name=pvc_name, + volume_mount_path=volume_mount_path, + volume_name=volume_name or "shared-persistency", + ) + if "MLRUN_PVC_MOUNT" in os.environ: + return mount_pvc( + volume_name=volume_name or "shared-persistency", + ) + # In the case of MLRun-kit when working remotely, no env variables will be defined but auto-mount + # parameters may still be declared - use them in that case. + if config.storage.auto_mount_type == "pvc": + return mount_pvc(**config.get_storage_auto_mount_params()) + if "V3IO_ACCESS_KEY" in os.environ: + return mount_v3io(name=volume_name or "v3io") + + raise ValueError("failed to auto mount, need to set env vars") diff --git a/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/ops.py b/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/ops.py index e2694579ef7..a13a488803a 100644 --- a/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/ops.py +++ b/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/ops.py @@ -13,24 +13,132 @@ # limitations under the License. # +import os from kfp import dsl +from kfp import kubernetes as kfp_k8s +from mlrun_pipelines.common.helpers import ( + FUNCTION_ANNOTATION, + PROJECT_ANNOTATION, + RUN_ANNOTATION, +) +from mlrun_pipelines.common.ops import PipelineRunType + +import mlrun +from mlrun.config import config +from mlrun.utils import get_in, logger def generate_kfp_dag_and_resolve_project(run, project=None): - raise NotImplementedError + workflow = run.workflow_manifest() + if not workflow: + return None, project, None + + templates = {} + for name, template in workflow.get_executors(): + project = project or get_in( + template, ["metadata", "annotations", PROJECT_ANNOTATION], "" + ) + templates[name] = { + "run_type": get_in( + template, ["metadata", "annotations", RUN_ANNOTATION], "" + ), + "function": get_in( + template, ["metadata", "annotations", FUNCTION_ANNOTATION], "" + ), + } + + dag = {} + nodes = [] + if run["run_details"]: + nodes = run["run_details"].get("task_details", []) + for node in nodes: + name = ( + node["display_name"] + if not node["child_tasks"] + else node["child_tasks"][0]["pod_name"] + ) + if not name: + continue + record = { + "phase": node["state"], + "started_at": node["create_time"], + "finished_at": node["end_time"], + "id": node["task_id"], + "parent": node.get("parent_task_id", ""), + "name": node["display_name"], + "type": "DAG" if node["child_tasks"] else "Pod", + "children": [c["pod_name"] for c in node["child_tasks"] or []], + } + + if name in templates: + record["function"] = templates[name].get("function") + record["run_type"] = templates[name].get("run_type") + dag[name] = record + + return dag, project, run["state"] def add_default_function_resources( task: dsl.PipelineTask, ) -> dsl.PipelineTask: - raise NotImplementedError + __set_task_requests = { + "cpu": task.set_cpu_request, + "memory": task.set_memory_request, + } + __set_task_limits = { + "cpu": task.set_cpu_limit, + "memory": task.set_memory_limit, + } + + default_resources = config.get_default_function_pod_resources() + for resource_name, resource_value in default_resources["requests"].items(): + if resource_value: + __set_task_requests[resource_name](resource_value) + + for resource_name, resource_value in default_resources["limits"].items(): + if resource_value: + __set_task_limits[resource_name](resource_value) + + return task def add_function_node_selection_attributes( function, task: dsl.PipelineTask ) -> dsl.PipelineTask: - raise NotImplementedError + if not mlrun.runtimes.RuntimeKinds.is_local_runtime(function.kind): + if getattr(function.spec, "node_selector"): + for k, v in function.spec.node_selector.items(): + task = kfp_k8s.add_node_selector(task, k, v) + + if getattr(function.spec, "tolerations"): + if hasattr(kfp_k8s, "add_toleration"): + for t in function.spec.tolerations: + task = kfp_k8s.add_toleration( + task, + t.key, + t.operator, + t.value, + t.effect, + t.toleration_seconds, + ) + else: + # TODO: remove this warning as soon as KFP SDK >=2.7.0 is available for MLRun SDK + logger.warning( + "Support for Pod tolerations is not yet available on the KFP 2 engine", + project=function.metadata.project, + function_name=function.metadata.name, + ) + + # TODO: remove this warning as soon as KFP SDK provides support for affinity management + if getattr(function.spec, "affinity"): + logger.warning( + "Support for Pod affinity is not yet available on the KFP 2 engine", + project=function.metadata.project, + function_name=function.metadata.name, + ) + + return task def add_annotations( @@ -40,15 +148,118 @@ def add_annotations( func_url: str = None, project: str = None, ): - raise NotImplementedError + # TODO: remove this warning as soon as KFP SDK >=2.7.0 is available for MLRun SDK + if not hasattr(kfp_k8s, "add_pod_annotation"): + logger.warning( + "Support for Pod annotations is not yet available on the KFP 2 engine", + project=project, + function_name=function.metadata.name, + ) + return + + if func_url and func_url.startswith("db://"): + func_url = func_url[len("db://") :] + kfp_k8s.add_pod_annotation(task, RUN_ANNOTATION, kind) + kfp_k8s.add_pod_annotation( + task, PROJECT_ANNOTATION, project or function.metadata.project + ) + kfp_k8s.add_pod_annotation(task, FUNCTION_ANNOTATION, func_url or function.uri) + return task def add_labels(task, function, scrape_metrics=False): - raise NotImplementedError + # TODO: remove this warning as soon as KFP SDK >=2.7.0 is available for MLRun SDK + if not hasattr(kfp_k8s, "add_pod_label"): + logger.warning( + "Support for Pod labels is not yet available on the KFP 2 engine", + project=function.metadata.project, + function_name=function.metadata.name, + ) + return + + prefix = mlrun.runtimes.utils.mlrun_key + kfp_k8s.add_pod_label(task, prefix + "class", function.kind) + kfp_k8s.add_pod_label(task, prefix + "function", function.metadata.name) + kfp_k8s.add_pod_label(task, prefix + "name", task.name) + kfp_k8s.add_pod_label(task, prefix + "project", function.metadata.project) + kfp_k8s.add_pod_label(task, prefix + "tag", function.metadata.tag or "latest") + kfp_k8s.add_pod_label( + task, prefix + "scrape-metrics", "True" if scrape_metrics else "False" + ) def add_default_env(task): - raise NotImplementedError + if hasattr(kfp_k8s, "use_field_path_as_env"): + kfp_k8s.use_field_path_as_env(task, "MLRUN_NAMESPACE", "metadata.namespace") + else: + # TODO: remove this warning as soon as "use_field_path_as_env" is available for MLRun SDK + logger.warning( + "Support for field paths as Pod environment variables is not yet available for the KFP 2 engine." + 'Functions tentatively default to "MLRUN_NAMESPACE: mlrun"', + ) + task.set_env_variable(name="MLRUN_NAMESPACE", value="mlrun") + + if config.httpdb.api_url: + task.set_env_variable(name="MLRUN_DBPATH", value=config.httpdb.api_url) + + if config.mpijob_crd_version: + task.set_env_variable( + name="MLRUN_MPIJOB_CRD_VERSION", value=config.mpijob_crd_version + ) + + auth_env_var = mlrun.runtimes.constants.FunctionEnvironmentVariables.auth_session + if auth_env_var in os.environ or "V3IO_ACCESS_KEY" in os.environ: + task.set_env_variable( + name=auth_env_var, + value=os.environ.get(auth_env_var) or os.environ.get("V3IO_ACCESS_KEY"), + ) + return task + + +def sync_environment_variables(function, task): + function_env = {var.name: var.value for var in function.spec.env} + for k in function_env: + task.set_env_variable(name=k, value=function_env[k]) + return task + + +def sync_mounts(function, task): + supported_mounts = { + "configMap": __sync_mount_config_map, + "secret": __sync_mount_secret, + "PVC": __sync_pvc, + } + for volume in function.spec.volumes: + for key in volume: + if isinstance(volume[key], dict): + mount_path = "" + for m in function.spec.volume_mounts: + if m["name"] == volume["name"]: + mount_path = m["mountPath"] + break + supported_mounts[key](task, volume, mount_path) + return task + + +def __sync_mount_config_map(task, volume, mount_path): + # TODO: remove this warning as soon as KFP SDK >=2.7.0 is available for MLRun SDK + if not hasattr(kfp_k8s, "use_config_map_as_volume"): + logger.warning( + "Support for using a ConfigMap as a volume is not yet available on the KFP 2 engine", + ) + return + kfp_k8s.use_config_map_as_volume(task, volume["configMap"]["name"], mount_path) + return task + + +def __sync_mount_secret(task, volume, mount_path): + kfp_k8s.use_secret_as_volume(task, volume["secret"]["name"], mount_path) + return task + + +def __sync_pvc(task, volume, mount_path): + kfp_k8s.mount_pvc(task, volume["PVC"]["name"], mount_path) + return task def generate_pipeline_node( @@ -63,7 +274,37 @@ def generate_pipeline_node( code_env: str, registry: str, ): - raise NotImplementedError + def mlrun_function(): + return dsl.ContainerSpec( + image=image, + command=command, + ) + + container_component = dsl.component_factory.create_container_component_from_func( + mlrun_function + ) + + task = container_component() + task.set_display_name(name) + + add_default_function_resources(task) + add_function_node_selection_attributes(function, task) + add_annotations(task, PipelineRunType.run, function, func_url, project_name) + add_labels(task, function, scrape_metrics) + task.set_env_variable( + name="MLRUN_ARTIFACT_PATH", + value=mlrun.pipeline_context.project._enrich_artifact_path_with_workflow_uid(), + ) + if code_env: + task.set_env_variable(name="MLRUN_EXEC_CODE", value=code_env) + if registry: + task.set_env_variable( + name="MLRUN_HTTPDB__BUILDER__DOCKER_REGISTRY", value=registry + ) + add_default_env(task) + sync_mounts(function, task) + sync_environment_variables(function, task) + return task def generate_image_builder_pipeline_node( @@ -72,7 +313,43 @@ def generate_image_builder_pipeline_node( func_url=None, cmd=None, ): - raise NotImplementedError + def build_mlrun_function(state: dsl.OutputPath(str), image: dsl.OutputPath(str)): + runtime_args = ["--state-file-path", state, "--image-file-path", image] + return dsl.ContainerSpec( + image=config.kfp_image, + command=cmd + runtime_args, + ) + + container_component = dsl.component_factory.create_container_component_from_func( + build_mlrun_function + ) + task = container_component() + task.set_display_name(name) + + add_default_function_resources(task) + add_function_node_selection_attributes(function, task) + add_annotations(task, PipelineRunType.build, function, func_url) + + if config.httpdb.builder.docker_registry: + task.set_env_variable( + name="MLRUN_HTTPDB__BUILDER__DOCKER_REGISTRY", + value=config.httpdb.builder.docker_registry, + ) + if "IGZ_NAMESPACE_DOMAIN" in os.environ: + task.set_env_variable( + name="IGZ_NAMESPACE_DOMAIN", + value=os.environ.get("IGZ_NAMESPACE_DOMAIN"), + ) + + is_v3io = function.spec.build.source and function.spec.build.source.startswith( + "v3io" + ) + if "V3IO_ACCESS_KEY" in os.environ and is_v3io: + task.set_env_variable( + name="V3IO_ACCESS_KEY", value=os.environ.get("V3IO_ACCESS_KEY") + ) + add_default_env(task) + return task def generate_deployer_pipeline_node( @@ -81,4 +358,21 @@ def generate_deployer_pipeline_node( func_url=None, cmd=None, ): - raise NotImplementedError + def deploy_function(): + return dsl.ContainerSpec( + image=config.kfp_image, + command=cmd, + ) + + container_component = dsl.component_factory.create_container_component_from_func( + deploy_function + ) + task = container_component() + task.set_display_name(name) + + add_default_function_resources(task) + add_function_node_selection_attributes(function, task) + add_annotations(task, PipelineRunType.deploy, function, func_url) + + add_default_env(task) + return task diff --git a/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/utils.py b/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/utils.py index 92285b86741..2795bd79ca2 100644 --- a/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/utils.py +++ b/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/utils.py @@ -13,6 +13,12 @@ # limitations under the License. # +import tempfile + +from kfp.compiler import Compiler + def compile_pipeline(pipeline, **kwargs): - raise NotImplementedError + pipe_file = tempfile.NamedTemporaryFile(suffix=".yaml", delete=False).name + Compiler().compile(pipeline, pipe_file, type_check=False) + return pipe_file diff --git a/server/api/crud/pipelines.py b/server/api/crud/pipelines.py index fc4ed59e04d..2edc4180ab8 100644 --- a/server/api/crud/pipelines.py +++ b/server/api/crud/pipelines.py @@ -385,7 +385,7 @@ def _resolve_project_from_command( return None def resolve_project_from_pipeline(self, pipeline: PipelineRun): - return self.resolve_project_from_workflow_manifest(pipeline._workflow_manifest) + return self.resolve_project_from_workflow_manifest(pipeline.workflow_manifest()) @staticmethod def _get_experiment_id_from_run(run: dict) -> str: From d57468b0ae8caa2e49a0f40611e9e2a322c31647 Mon Sep 17 00:00:00 2001 From: quaark Date: Wed, 8 May 2024 14:16:59 +0300 Subject: [PATCH 109/119] fix httpdb --- .../03-deploy-serving-model.ipynb | 6 +- docs/feature-store/feature-store.md | 1 - mlrun/db/httpdb.py | 351 +++++++++++++----- mlrun/utils/helpers.py | 8 +- .../src/mlrun_pipelines/mounts.py | 2 +- server/api/utils/projects/leader.py | 6 +- tests/system/projects/test_project.py | 2 +- 7 files changed, 269 insertions(+), 107 deletions(-) diff --git a/docs/feature-store/end-to-end-demo/03-deploy-serving-model.ipynb b/docs/feature-store/end-to-end-demo/03-deploy-serving-model.ipynb index 0ab0d601028..6fa34ea2521 100644 --- a/docs/feature-store/end-to-end-demo/03-deploy-serving-model.ipynb +++ b/docs/feature-store/end-to-end-demo/03-deploy-serving-model.ipynb @@ -370,11 +370,7 @@ "source": [ "### Accessing the real-time feature vector directly\n", "\n", - "You can also directly query the feature store values using the `get_online_feature_service` method. This method is used internally in the EnrichmentVotingEnsemble router class.\n", - "\n", - "```{admonition} Note\n", - "The timestamp of the last event is not returned with `get_online_feature_service` / `svc.get`.\n", - "```" + "You can also directly query the feature store values using the `get_online_feature_service` method. This method is used internally in the EnrichmentVotingEnsemble router class." ] }, { diff --git a/docs/feature-store/feature-store.md b/docs/feature-store/feature-store.md index 995a520d90c..a749ad8c046 100644 --- a/docs/feature-store/feature-store.md +++ b/docs/feature-store/feature-store.md @@ -27,6 +27,5 @@ feature-sets sources-targets transformations feature-vectors -sources-targets ./end-to-end-demo/index ``` \ No newline at end of file diff --git a/mlrun/db/httpdb.py b/mlrun/db/httpdb.py index abdb19f53f8..ab9613ad6b9 100644 --- a/mlrun/db/httpdb.py +++ b/mlrun/db/httpdb.py @@ -1490,10 +1490,99 @@ def remote_builder( if not resp.ok: logger.error(f"bad resp!!\n{resp.text}") - raise ValueError("bad function run response") + raise ValueError("bad submit build response") return resp.json() + def deploy_nuclio_function( + self, + func: mlrun.runtimes.RemoteRuntime, + builder_env: Optional[dict] = None, + ): + """ + Deploy a Nuclio function. + :param func: Function to build. + :param builder_env: Kaniko builder pod env vars dict (for config/credentials) + """ + func.metadata.project = func.metadata.project or config.default_project + self.warn_on_s3_and_ecr_permissions_conflict(func) + try: + req = { + "function": func.to_dict(), + } + if builder_env: + req["builder_env"] = builder_env + _path = ( + f"projects/{func.metadata.project}/nuclio/{func.metadata.name}/deploy" + ) + resp = self.api_call("POST", _path, json=req) + except OSError as err: + logger.error(f"error submitting nuclio deploy task: {err_to_str(err)}") + raise OSError(f"error: cannot submit deploy, {err_to_str(err)}") + + if not resp.ok: + logger.error(f"deploy nuclio - bad response:\n{resp.text}") + raise ValueError("bad nuclio deploy response") + + return resp.json() + + def get_nuclio_deploy_status( + self, + func: mlrun.runtimes.RemoteRuntime, + last_log_timestamp: float = 0.0, + verbose: bool = False, + ): + """Retrieve the status of a deploy operation currently in progress. + :param func: Function object that is being built. + :param last_log_timestamp: Last timestamp of logs that were already retrieved. Function will return only logs + later than this parameter. + :param verbose: Add verbose logs into the output. + :returns: The following parameters: + - Text of builder logs. + - Timestamp of last log retrieved, to be used in subsequent calls to this function. + """ + + try: + normalized_name = normalize_name(func.metadata.name) + params = { + "name": normalized_name, + "project": func.metadata.project, + "tag": func.metadata.tag, + "last_log_timestamp": str(last_log_timestamp), + "verbose": bool2str(verbose), + } + _path = f"projects/{func.metadata.project}/nuclio/{normalized_name}/deploy" + resp = self.api_call("GET", _path, params=params) + except OSError as err: + logger.error(f"error getting deploy status: {err_to_str(err)}") + raise OSError(f"error: cannot get deploy status, {err_to_str(err)}") + + if not resp.ok: + logger.warning(f"failed resp, {resp.text}") + raise RunDBError("bad function build response") + + if resp.headers: + func.status.state = resp.headers.get("x-mlrun-function-status", "") + last_log_timestamp = float( + resp.headers.get("x-mlrun-last-timestamp", "0.0") + ) + func.status.address = resp.headers.get("x-mlrun-address", "") + func.status.nuclio_name = resp.headers.get("x-mlrun-name", "") + func.status.internal_invocation_urls = resp.headers.get( + "x-mlrun-internal-invocation-urls", "" + ).split(",") + func.status.external_invocation_urls = resp.headers.get( + "x-mlrun-external-invocation-urls", "" + ).split(",") + func.status.container_image = resp.headers.get( + "x-mlrun-container-image", "" + ) + + text = "" + if resp.content: + text = resp.content.decode() + return text, last_log_timestamp + def get_builder_status( self, func: BaseRuntime, @@ -1555,9 +1644,14 @@ def get_builder_status( func.status.container_image = resp.headers.get( "x-mlrun-container-image", "" ) - else: - func.status.build_pod = resp.headers.get("builder_pod", "") - func.spec.image = resp.headers.get("function_image", "") + + builder_pod = resp.headers.get("builder_pod", "") + if builder_pod: + func.status.build_pod = builder_pod + + function_image = resp.headers.get("function_image", "") + if function_image: + func.spec.image = function_image text = "" if resp.content: @@ -1620,7 +1714,7 @@ def list_project_background_tasks( Retrieve updated information on project background tasks being executed. If no filter is provided, will return background tasks from the last week. - :param project: Project name (defaults to mlrun.config.config.default_project). + :param project: Project name (defaults to mlrun.mlconf.default_project). :param state: List only background tasks whose state is specified. :param created_from: Filter by background task created time in ``[created_from, created_to]``. :param created_to: Filter by background task created time in ``[created_from, created_to]``. @@ -1952,9 +2046,9 @@ def list_features( project: str, name: str = None, tag: str = None, - entities: List[str] = None, - labels: List[str] = None, - ) -> List[dict]: + entities: list[str] = None, + labels: list[str] = None, + ) -> list[dict]: """List feature-sets which contain specific features. This function may return multiple versions of the same feature-set if a specific tag is not requested. Note that the various filters of this function actually refer to the feature-set object containing the features, not to the features themselves. @@ -1989,8 +2083,8 @@ def list_entities( project: str, name: str = None, tag: str = None, - labels: List[str] = None, - ) -> List[dict]: + labels: list[str] = None, + ) -> list[dict]: """Retrieve a list of entities and their mapping to the containing feature-sets. This function is similar to the :py:func:`~list_features` function, and uses the same logic. However, the entities are matched against the name rather than the features. @@ -2034,9 +2128,9 @@ def list_feature_sets( name: str = None, tag: str = None, state: str = None, - entities: List[str] = None, - features: List[str] = None, - labels: List[str] = None, + entities: list[str] = None, + features: list[str] = None, + labels: list[str] = None, partition_by: Union[ mlrun.common.schemas.FeatureStorePartitionByField, str ] = None, @@ -2045,7 +2139,7 @@ def list_feature_sets( partition_order: Union[ mlrun.common.schemas.OrderType, str ] = mlrun.common.schemas.OrderType.desc, - ) -> List[FeatureSet]: + ) -> list[FeatureSet]: """Retrieve a list of feature-sets matching the criteria provided. :param project: Project name. @@ -2160,7 +2254,7 @@ def patch_feature_set( not a full object. Example:: - feature_set_update = {"status": {"processed" : True}} + feature_set_update = {"status": {"processed": True}} Will apply the field ``status.processed`` to the existing object. :param project: Project which contains the modified object. @@ -2255,7 +2349,7 @@ def list_feature_vectors( name: str = None, tag: str = None, state: str = None, - labels: List[str] = None, + labels: list[str] = None, partition_by: Union[ mlrun.common.schemas.FeatureStorePartitionByField, str ] = None, @@ -2264,7 +2358,7 @@ def list_feature_vectors( partition_order: Union[ mlrun.common.schemas.OrderType, str ] = mlrun.common.schemas.OrderType.desc, - ) -> List[FeatureVector]: + ) -> list[FeatureVector]: """Retrieve a list of feature-vectors matching the criteria provided. :param project: Project name. @@ -2466,7 +2560,7 @@ def delete_objects_tag( def tag_artifacts( self, - artifacts: Union[List[Artifact], List[dict], Artifact, dict], + artifacts: Union[list[Artifact], list[dict], Artifact, dict], project: str, tag_name: str, replace: bool = False, @@ -2504,9 +2598,9 @@ def list_projects( format_: Union[ str, mlrun.common.schemas.ProjectsFormat ] = mlrun.common.schemas.ProjectsFormat.name_only, - labels: List[str] = None, + labels: list[str] = None, state: Union[str, mlrun.common.schemas.ProjectState] = None, - ) -> List[Union[mlrun.projects.MlrunProject, str]]: + ) -> list[Union[mlrun.projects.MlrunProject, str]]: """Return a list of the existing projects, potentially filtered by specific criteria. :param owner: List only projects belonging to this specific owner. @@ -2734,11 +2828,11 @@ def create_project_secrets( :param secrets: A set of secret values to store. Example:: - secrets = {'password': 'myPassw0rd', 'aws_key': '111222333'} + secrets = {"password": "myPassw0rd", "aws_key": "111222333"} db.create_project_secrets( "project1", provider=mlrun.common.schemas.SecretProviderName.kubernetes, - secrets=secrets + secrets=secrets, ) """ path = f"projects/{project}/secrets" @@ -2761,7 +2855,7 @@ def list_project_secrets( provider: Union[ str, mlrun.common.schemas.SecretProviderName ] = mlrun.common.schemas.SecretProviderName.kubernetes, - secrets: List[str] = None, + secrets: list[str] = None, ) -> mlrun.common.schemas.SecretsData: """Retrieve project-context secrets from Vault. @@ -2850,7 +2944,7 @@ def delete_project_secrets( provider: Union[ str, mlrun.common.schemas.SecretProviderName ] = mlrun.common.schemas.SecretProviderName.kubernetes, - secrets: List[str] = None, + secrets: list[str] = None, ): """Delete project-context secrets from Kubernetes. @@ -3007,13 +3101,13 @@ def list_model_endpoints( project: str, model: Optional[str] = None, function: Optional[str] = None, - labels: List[str] = None, + labels: list[str] = None, start: str = "now-1h", end: str = "now", - metrics: Optional[List[str]] = None, + metrics: Optional[list[str]] = None, top_level: bool = False, - uids: Optional[List[str]] = None, - ) -> List[mlrun.model_monitoring.model_endpoint.ModelEndpoint]: + uids: Optional[list[str]] = None, + ) -> list[mlrun.model_monitoring.model_endpoint.ModelEndpoint]: """ Returns a list of `ModelEndpoint` objects. Each `ModelEndpoint` object represents the current state of a model endpoint. This functions supports filtering by the following parameters: @@ -3079,7 +3173,7 @@ def get_model_endpoint( endpoint_id: str, start: Optional[str] = None, end: Optional[str] = None, - metrics: Optional[List[str]] = None, + metrics: Optional[list[str]] = None, feature_analysis: bool = False, ) -> mlrun.model_monitoring.model_endpoint.ModelEndpoint: """ @@ -3164,65 +3258,76 @@ def patch_model_endpoint( params=attributes, ) - def deploy_monitoring_batch_job( + def update_model_monitoring_controller( self, - project: str = "", - default_batch_image: str = "mlrun/mlrun", - with_schedule: bool = False, - ): + project: str, + base_period: int = 10, + image: str = "mlrun/mlrun", + ) -> None: """ - Submit model monitoring batch job. By default, submit only the batch job as ML function without scheduling. - To submit a scheduled job as well, please set with_schedule = True. - - :param project: Project name. - :param default_batch_image: The default image of the model monitoring batch job. By default, the image - is mlrun/mlrun. - :param with_schedule: If true, submit the model monitoring scheduled job as well. + Redeploy model monitoring application controller function. - - :returns: model monitoring batch job as a dictionary. You can easily convert the returned function into a - runtime object by calling ~mlrun.new_function. + :param project: Project name. + :param base_period: The time period in minutes in which the model monitoring controller function + triggers. By default, the base period is 10 minutes. + :param image: The image of the model monitoring controller function. + By default, the image is mlrun/mlrun. """ + self.api_call( + method=mlrun.common.types.HTTPMethod.POST, + path=f"projects/{project}/model-monitoring/model-monitoring-controller", + params={ + "base_period": base_period, + "image": image, + }, + ) - params = { - "default_batch_image": default_batch_image, - "with_schedule": with_schedule, - } - path = f"projects/{project}/jobs/batch-monitoring" - - resp = self.api_call(method="POST", path=path, params=params) - return resp.json()["func"] - - def create_model_monitoring_controller( + def enable_model_monitoring( self, - project: str = "", - default_controller_image: str = "mlrun/mlrun", + project: str, base_period: int = 10, - ): + image: str = "mlrun/mlrun", + deploy_histogram_data_drift_app: bool = True, + ) -> None: + """ + Deploy model monitoring application controller, writer and stream functions. + While the main goal of the controller function is to handle the monitoring processing and triggering + applications, the goal of the model monitoring writer function is to write all the monitoring + application results to the databases. + The stream function goal is to monitor the log of the data stream. It is triggered when a new log entry + is detected. It processes the new events into statistics that are then written to statistics databases. + :param project: Project name. + :param base_period: The time period in minutes in which the model monitoring controller function + triggers. By default, the base period is 10 minutes. + :param image: The image of the model monitoring controller, writer & monitoring + stream functions, which are real time nuclio functions. + By default, the image is mlrun/mlrun. + :param deploy_histogram_data_drift_app: If true, deploy the default histogram-based data drift application. """ - Submit model monitoring application controller job along with deploying the model monitoring writer function. - While the main goal of the controller job is to handle the monitoring processing and triggering applications, - the goal of the model monitoring writer function is to write all the monitoring application results to the - databases. Note that the default scheduling policy of the controller job is to run every 10 min. + self.api_call( + method=mlrun.common.types.HTTPMethod.POST, + path=f"projects/{project}/model-monitoring/enable-model-monitoring", + params={ + "base_period": base_period, + "image": image, + "deploy_histogram_data_drift_app": deploy_histogram_data_drift_app, + }, + ) - :param project: Project name. - :param default_controller_image: The default image of the model monitoring controller job. Note that the writer - function, which is a real time nuclio functino, will be deployed with the same - image. By default, the image is mlrun/mlrun. - :param base_period: Minutes to determine the frequency in which the model monitoring controller job - is running. By default, the base period is 5 minutes. - :returns: model monitoring controller job as a dictionary. You can easily convert the returned function into a - runtime object by calling ~mlrun.new_function. + def deploy_histogram_data_drift_app( + self, project: str, image: str = "mlrun/mlrun" + ) -> None: """ + Deploy the histogram data drift application. - params = { - "default_controller_image": default_controller_image, - "base_period": base_period, - } - path = f"projects/{project}/jobs/model-monitoring-controller" - - resp = self.api_call(method="POST", path=path, params=params) - return resp.json()["func"] + :param project: Project name. + :param image: The image on which the application will run. + """ + self.api_call( + method=mlrun.common.types.HTTPMethod.POST, + path=f"projects/{project}/model-monitoring/deploy-histogram-data-drift-app", + params={"image": image}, + ) def create_hub_source( self, source: Union[dict, mlrun.common.schemas.IndexedHubSource] @@ -3253,7 +3358,10 @@ def create_hub_source( metadata=mlrun.common.schemas.HubObjectMetadata( name="priv", description="a private source" ), - spec=mlrun.common.schemas.HubSourceSpec(path="/local/path/to/source", channel="development") + spec=mlrun.common.schemas.HubSourceSpec( + path="/local/path/to/source", channel="development" + ), + ), ) ) db.create_hub_source(private_source) @@ -3268,9 +3376,9 @@ def create_hub_source( spec=mlrun.common.schemas.HubSourceSpec( path="/local/path/to/source/2", channel="development", - credentials={...} - ) - ) + credentials={...}, + ), + ), ) db.create_hub_source(another_source) @@ -3312,7 +3420,7 @@ def list_hub_sources( item_name: Optional[str] = None, tag: Optional[str] = None, version: Optional[str] = None, - ) -> List[mlrun.common.schemas.hub.IndexedHubSource]: + ) -> list[mlrun.common.schemas.hub.IndexedHubSource]: """ List hub sources in the MLRun DB. @@ -3462,17 +3570,70 @@ def verify_authorization( body=dict_to_json(authorization_verification_input.dict()), ) - def list_api_gateways(self, project=None): + def list_api_gateways(self, project=None) -> mlrun.common.schemas.APIGatewaysOutput: """ Returns a list of Nuclio api gateways - :param project: optional str parameter to filter by project, if not passed, default Nuclio's value is taken + :param project: optional str parameter to filter by project, if not passed, default project value is taken - :return: json with the list of Nuclio Api Gateways - (json example is here - https://github.com/nuclio/nuclio/blob/development/docs/reference/api/README.md#listing-all-api-gateways) + :return: :py:class:`~mlrun.common.schemas.APIGateways`. """ project = project or config.default_project error = "list api gateways" + endpoint_path = f"projects/{project}/api-gateways" + response = self.api_call("GET", endpoint_path, error) + return mlrun.common.schemas.APIGatewaysOutput(**response.json()) + + def get_api_gateway(self, name, project=None) -> mlrun.common.schemas.APIGateway: + """ + Returns an API gateway + :param name: API gateway name + :param project: optional str parameter to filter by project, if not passed, default project value is taken + :return: :py:class:`~mlrun.common.schemas.APIGateway`. + """ + project = project or config.default_project + error = "get api gateway" + endpoint_path = f"projects/{project}/api-gateways/{name}" + response = self.api_call("GET", endpoint_path, error) + return mlrun.common.schemas.APIGateway(**response.json()) + + def delete_api_gateway(self, name, project=None): + """ + Deletes an API gateway + :param name: API gateway name + :param project: Project name + """ + project = project or config.default_project + error = "delete api gateway" + endpoint_path = f"projects/{project}/api-gateways/{name}" + self.api_call("DELETE", endpoint_path, error) + + def store_api_gateway( + self, + api_gateway: Union[ + mlrun.common.schemas.APIGateway, + mlrun.runtimes.nuclio.api_gateway.APIGateway, + ], + project: Optional[str] = None, + ) -> mlrun.common.schemas.APIGateway: + """ + Stores an API Gateway. + :param api_gateway :py:class:`~mlrun.runtimes.nuclio.APIGateway` + or :py:class:`~mlrun.common.schemas.APIGateway`: API Gateway entity. + :param project: project name. Mandatory if api_gateway is mlrun.common.schemas.APIGateway. + :return: :py:class:`~mlrun.common.schemas.APIGateway`. + """ + + if isinstance(api_gateway, mlrun.runtimes.nuclio.api_gateway.APIGateway): + api_gateway = api_gateway.to_scheme() + endpoint_path = f"projects/{project}/api-gateways/{api_gateway.metadata.name}" + error = "store api gateways" + response = self.api_call( + "PUT", + endpoint_path, + error, + json=api_gateway.dict(exclude_none=True), + ) + return mlrun.common.schemas.APIGateway(**response.json()) endpoint_path = f"projects/{project}/nuclio/api-gateways" resp = self.api_call("GET", endpoint_path, error) return resp.json() @@ -3498,7 +3659,7 @@ def set_run_notifications( self, project: str, run_uid: str, - notifications: typing.List[mlrun.model.Notification] = None, + notifications: typing.list[mlrun.model.Notification] = None, ): """ Set notifications on a run. This will override any existing notifications on the run. @@ -3523,7 +3684,7 @@ def set_schedule_notifications( self, project: str, schedule_name: str, - notifications: typing.List[mlrun.model.Notification] = None, + notifications: typing.list[mlrun.model.Notification] = None, ): """ Set notifications on a schedule. This will override any existing notifications on the schedule. @@ -3546,7 +3707,7 @@ def set_schedule_notifications( def store_run_notifications( self, - notification_objects: typing.List[mlrun.model.Notification], + notification_objects: typing.list[mlrun.model.Notification], run_uid: str, project: str = None, mask_params: bool = True, @@ -3558,6 +3719,16 @@ def store_run_notifications( """ pass + def store_alert_notifications( + self, + session, + notification_objects: list[mlrun.model.Notification], + alert_id: str, + project: str, + mask_params: bool = True, + ): + pass + def submit_workflow( self, project: str, diff --git a/mlrun/utils/helpers.py b/mlrun/utils/helpers.py index 539d9310e97..3b4e7bcff2e 100644 --- a/mlrun/utils/helpers.py +++ b/mlrun/utils/helpers.py @@ -1052,9 +1052,7 @@ def retry_until_successful( def get_ui_url(project, uid=None): url = "" if mlrun.mlconf.resolve_ui_url(): - url = "{}/{}/{}/jobs".format( - mlrun.mlconf.resolve_ui_url(), mlrun.mlconf.ui.projects_prefix, project - ) + url = f"{mlrun.mlconf.resolve_ui_url()}/{mlrun.mlconf.ui.projects_prefix}/{project}/jobs" if uid: url += f"/monitor/{uid}/overview" return url @@ -1063,9 +1061,7 @@ def get_ui_url(project, uid=None): def get_workflow_url(project, id=None): url = "" if mlrun.mlconf.resolve_ui_url(): - url = "{}/{}/{}/jobs/monitor-workflows/workflow/{}".format( - mlrun.mlconf.resolve_ui_url(), mlrun.mlconf.ui.projects_prefix, project, id - ) + url = f"{mlrun.mlconf.resolve_ui_url()}/{mlrun.mlconf.ui.projects_prefix}/{project}/jobs/monitor-workflows/workflow/{id}" return url diff --git a/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/mounts.py b/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/mounts.py index d17e4a8f7b7..ceff8b0ca13 100644 --- a/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/mounts.py +++ b/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/mounts.py @@ -64,7 +64,7 @@ def mount_pvc(pvc_name=None, volume_name="pipeline", volume_mount_path="/mnt/pip Usage:: train = train_op(...) - train.apply(mount_pvc('claim-name', 'pipeline', '/mnt/pipeline')) + train.apply(mount_pvc("claim-name", "pipeline", "/mnt/pipeline")) """ if "MLRUN_PVC_MOUNT" in os.environ: mount = os.environ.get("MLRUN_PVC_MOUNT") diff --git a/server/api/utils/projects/leader.py b/server/api/utils/projects/leader.py index dc3dc0ca342..71be8ce337f 100644 --- a/server/api/utils/projects/leader.py +++ b/server/api/utils/projects/leader.py @@ -216,9 +216,9 @@ def _sync_projects(self): followers_projects_map = collections.defaultdict(dict) for _follower_name, follower_projects in follower_projects_map.items(): for project in follower_projects.projects: - followers_projects_map[_follower_name][ - project.metadata.name - ] = project + followers_projects_map[_follower_name][project.metadata.name] = ( + project + ) # create map - leader project name -> leader project for easier searches leader_projects_map = {} diff --git a/tests/system/projects/test_project.py b/tests/system/projects/test_project.py index 6a47029603d..2c1dfdb34b1 100644 --- a/tests/system/projects/test_project.py +++ b/tests/system/projects/test_project.py @@ -21,8 +21,8 @@ import time from sys import executable -import mlrun_pipelines.common.models import igz_mgmt +import mlrun_pipelines.common.models import pandas as pd import pytest from kfp import dsl From bfab122e8e151ac994a18c9f1d99816d4a6356c8 Mon Sep 17 00:00:00 2001 From: quaark Date: Wed, 8 May 2024 14:19:05 +0300 Subject: [PATCH 110/119] fix httpdb --- mlrun/db/httpdb.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/mlrun/db/httpdb.py b/mlrun/db/httpdb.py index ab9613ad6b9..c2d6e544bd7 100644 --- a/mlrun/db/httpdb.py +++ b/mlrun/db/httpdb.py @@ -1533,11 +1533,14 @@ def get_nuclio_deploy_status( verbose: bool = False, ): """Retrieve the status of a deploy operation currently in progress. + :param func: Function object that is being built. :param last_log_timestamp: Last timestamp of logs that were already retrieved. Function will return only logs later than this parameter. :param verbose: Add verbose logs into the output. + :returns: The following parameters: + - Text of builder logs. - Timestamp of last log retrieved, to be used in subsequent calls to this function. """ @@ -3296,6 +3299,7 @@ def enable_model_monitoring( application results to the databases. The stream function goal is to monitor the log of the data stream. It is triggered when a new log entry is detected. It processes the new events into statistics that are then written to statistics databases. + :param project: Project name. :param base_period: The time period in minutes in which the model monitoring controller function triggers. By default, the base period is 10 minutes. @@ -3362,7 +3366,6 @@ def create_hub_source( path="/local/path/to/source", channel="development" ), ), - ) ) db.create_hub_source(private_source) @@ -3588,6 +3591,7 @@ def get_api_gateway(self, name, project=None) -> mlrun.common.schemas.APIGateway Returns an API gateway :param name: API gateway name :param project: optional str parameter to filter by project, if not passed, default project value is taken + :return: :py:class:`~mlrun.common.schemas.APIGateway`. """ project = project or config.default_project @@ -3620,6 +3624,7 @@ def store_api_gateway( :param api_gateway :py:class:`~mlrun.runtimes.nuclio.APIGateway` or :py:class:`~mlrun.common.schemas.APIGateway`: API Gateway entity. :param project: project name. Mandatory if api_gateway is mlrun.common.schemas.APIGateway. + :return: :py:class:`~mlrun.common.schemas.APIGateway`. """ @@ -3634,9 +3639,6 @@ def store_api_gateway( json=api_gateway.dict(exclude_none=True), ) return mlrun.common.schemas.APIGateway(**response.json()) - endpoint_path = f"projects/{project}/nuclio/api-gateways" - resp = self.api_call("GET", endpoint_path, error) - return resp.json() def trigger_migrations(self) -> Optional[mlrun.common.schemas.BackgroundTask]: """Trigger migrations (will do nothing if no migrations are needed) and wait for them to finish if actually @@ -3659,7 +3661,7 @@ def set_run_notifications( self, project: str, run_uid: str, - notifications: typing.list[mlrun.model.Notification] = None, + notifications: list[mlrun.model.Notification] = None, ): """ Set notifications on a run. This will override any existing notifications on the run. @@ -3684,7 +3686,7 @@ def set_schedule_notifications( self, project: str, schedule_name: str, - notifications: typing.list[mlrun.model.Notification] = None, + notifications: list[mlrun.model.Notification] = None, ): """ Set notifications on a schedule. This will override any existing notifications on the schedule. @@ -3707,7 +3709,7 @@ def set_schedule_notifications( def store_run_notifications( self, - notification_objects: typing.list[mlrun.model.Notification], + notification_objects: list[mlrun.model.Notification], run_uid: str, project: str = None, mask_params: bool = True, From 9e7cb6f8ca79df9c298adf1e5c04796ef9a1dd4c Mon Sep 17 00:00:00 2001 From: quaark Date: Wed, 8 May 2024 14:34:00 +0300 Subject: [PATCH 111/119] more fixes --- mlrun/runtimes/pod.py | 22 --- mlrun/utils/helpers.py | 138 ++++-------------- server/api/utils/clients/iguazio.py | 2 +- server/api/utils/projects/leader.py | 6 +- tests/integration/sdk_api/alerts/__init__.py | 2 +- .../datastore/assets/testdata_short.json | 3 - 6 files changed, 32 insertions(+), 141 deletions(-) delete mode 100644 tests/system/datastore/assets/testdata_short.json diff --git a/mlrun/runtimes/pod.py b/mlrun/runtimes/pod.py index 780a982e5c2..96a8e8ed69b 100644 --- a/mlrun/runtimes/pod.py +++ b/mlrun/runtimes/pod.py @@ -997,28 +997,6 @@ def spec(self) -> KubeResourceSpec: def spec(self, spec): self._spec = self._verify_dict(spec, "spec", KubeResourceSpec) - def to_dict(self, fields=None, exclude=None, strip=False): - struct = super().to_dict(fields, exclude, strip=strip) - api = k8s_client.ApiClient() - struct = api.sanitize_for_serialization(struct) - if strip: - spec = struct["spec"] - for attr in [ - "volumes", - "volume_mounts", - "driver_volume_mounts", - "executor_volume_mounts", - ]: - if attr in spec: - del spec[attr] - if "env" in spec and spec["env"]: - for ev in spec["env"]: - if ev["name"].startswith("V3IO_"): - ev["value"] = "" - # Reset this, since mounts and env variables were cleared. - spec["disable_auto_mount"] = False - return struct - def set_env_from_secret(self, name, secret=None, secret_key=None): """set pod environment var from secret""" secret_key = secret_key or name diff --git a/mlrun/utils/helpers.py b/mlrun/utils/helpers.py index 3b4e7bcff2e..2fe051b278e 100644 --- a/mlrun/utils/helpers.py +++ b/mlrun/utils/helpers.py @@ -868,7 +868,7 @@ def get_docker_repository_or_default(repository: str) -> str: return repository -def get_parsed_docker_registry() -> Tuple[Optional[str], Optional[str]]: +def get_parsed_docker_registry() -> tuple[Optional[str], Optional[str]]: # according to https://stackoverflow.com/questions/37861791/how-are-docker-image-names-parsed docker_registry = config.httpdb.builder.docker_registry or "" first_slash_index = docker_registry.find("/") @@ -922,65 +922,27 @@ def fill_function_hash(function_dict, tag=""): return fill_object_hash(function_dict, "hash", tag) -def create_linear_backoff(base=2, coefficient=2, stop_value=120): - """ - Create a generator of linear backoff. Check out usage example in test_helpers.py - """ - x = 0 - comparison = min if coefficient >= 0 else max - - while True: - next_value = comparison(base + x * coefficient, stop_value) - yield next_value - x += 1 - - -def create_step_backoff(steps=None): - """ - Create a generator of steps backoff. - Example: steps = [[2, 5], [20, 10], [120, None]] will produce a generator in which the first 5 - values will be 2, the next 10 values will be 20 and the rest will be 120. - :param steps: a list of lists [step_value, number_of_iteration_in_this_step] - """ - steps = steps if steps is not None else [[2, 10], [10, 10], [120, None]] - steps = iter(steps) - - # Get first step - step = next(steps) - while True: - current_step_value, current_step_remain = step - if current_step_remain == 0: - # No more in this step, moving on - step = next(steps) - elif current_step_remain is None: - # We are in the last step, staying here forever - yield current_step_value - elif current_step_remain > 0: - # Still more remains in this step, just reduce the remaining number - step[1] -= 1 - yield current_step_value - - -def create_exponential_backoff(base=2, max_value=120, scale_factor=1): +def retry_until_successful( + backoff: int, timeout: int, logger, verbose: bool, _function, *args, **kwargs +): """ - Create a generator of exponential backoff. Check out usage example in test_helpers.py - :param base: exponent base - :param max_value: max limit on the result - :param scale_factor: factor to be used as linear scaling coefficient + Runs function with given *args and **kwargs. + Tries to run it until success or timeout reached (timeout is optional) + :param backoff: can either be a: + - number (int / float) that will be used as interval. + - generator of waiting intervals. (support next()) + :param timeout: pass None if timeout is not wanted, number of seconds if it is + :param logger: a logger so we can log the failures + :param verbose: whether to log the failure on each retry + :param _function: function to run + :param args: functions args + :param kwargs: functions kwargs + :return: function result """ - exponent = 1 - while True: - # This "complex" implementation (unlike the one in linear backoff) is to avoid exponent growing too fast and - # risking going behind max_int - next_value = scale_factor * (base**exponent) - if next_value < max_value: - exponent += 1 - yield next_value - else: - yield max_value + return Retryer(backoff, timeout, logger, verbose, _function, *args, **kwargs).run() -def retry_until_successful( +async def retry_until_successful_async( backoff: int, timeout: int, logger, verbose: bool, _function, *args, **kwargs ): """ @@ -997,56 +959,9 @@ def retry_until_successful( :param kwargs: functions kwargs :return: function result """ - start_time = time.time() - last_exception = None - - # Check if backoff is just a simple interval - if isinstance(backoff, int) or isinstance(backoff, float): - backoff = create_linear_backoff(base=backoff, coefficient=0) - - first_interval = next(backoff) - if timeout and timeout <= first_interval: - logger.warning( - f"Timeout ({timeout}) must be higher than backoff ({first_interval})." - f" Set timeout to be higher than backoff." - ) - - # If deadline was not provided or deadline not reached - while timeout is None or time.time() < start_time + timeout: - next_interval = first_interval or next(backoff) - first_interval = None - try: - result = _function(*args, **kwargs) - return result - - except mlrun.errors.MLRunFatalFailureError as exc: - raise exc.original_exception - except Exception as exc: - last_exception = exc - - # If next interval is within allowed time period - wait on interval, abort otherwise - if timeout is None or time.time() + next_interval < start_time + timeout: - if logger is not None and verbose: - logger.debug( - f"Operation not yet successful, Retrying in {next_interval} seconds." - f" exc: {err_to_str(exc)}" - ) - - time.sleep(next_interval) - else: - break - - if logger is not None: - logger.warning( - f"Operation did not complete on time. last exception: {last_exception}" - ) - - raise mlrun.errors.MLRunRetryExhaustedError( - f"Failed to execute command by the given deadline." - f" last_exception: {last_exception}," - f" function_name: {_function.__name__}," - f" timeout: {timeout}" - ) from last_exception + return await AsyncRetryer( + backoff, timeout, logger, verbose, _function, *args, **kwargs + ).run() def get_ui_url(project, uid=None): @@ -1061,12 +976,15 @@ def get_ui_url(project, uid=None): def get_workflow_url(project, id=None): url = "" if mlrun.mlconf.resolve_ui_url(): - url = f"{mlrun.mlconf.resolve_ui_url()}/{mlrun.mlconf.ui.projects_prefix}/{project}/jobs/monitor-workflows/workflow/{id}" + url = ( + f"{mlrun.mlconf.resolve_ui_url()}/{mlrun.mlconf.ui.projects_prefix}" + f"/{project}/jobs/monitor-workflows/workflow/{id}" + ) return url def are_strings_in_exception_chain_messages( - exception: Exception, strings_list=typing.List[str] + exception: Exception, strings_list: list[str] ) -> bool: while exception is not None: if any([string in str(exception) for string in strings_list]): @@ -1246,7 +1164,7 @@ def has_timezone(timestamp): return False -def as_list(element: Any) -> List[Any]: +def as_list(element: Any) -> list[Any]: return element if isinstance(element, list) else [element] @@ -1522,8 +1440,6 @@ def normalize_project_username(username: str): return username -# run_in threadpool is taken from fastapi to allow us to run sync functions in a threadpool -# without importing fastapi in the client async def run_in_threadpool(func, *args, **kwargs): """ Run a sync-function in the loop default thread pool executor pool and await its result. diff --git a/server/api/utils/clients/iguazio.py b/server/api/utils/clients/iguazio.py index 5a3bf9ffdab..9bf2ae67e72 100644 --- a/server/api/utils/clients/iguazio.py +++ b/server/api/utils/clients/iguazio.py @@ -697,7 +697,7 @@ def _resolve_params_from_response_headers( @staticmethod def _resolve_params_from_response_body( response_body: typing.Mapping[typing.Any, typing.Any], - ) -> typing.Tuple[typing.Optional[str], typing.Optional[typing.List[str]]]: + ) -> typing.Tuple[typing.Optional[str], typing.Optional[list[str]]]: context_auth = get_in( response_body, "data.attributes.context.authentication", {} ) diff --git a/server/api/utils/projects/leader.py b/server/api/utils/projects/leader.py index 71be8ce337f..f40dadf00d7 100644 --- a/server/api/utils/projects/leader.py +++ b/server/api/utils/projects/leader.py @@ -149,11 +149,11 @@ async def list_project_summaries( self, db_session: sqlalchemy.orm.Session, owner: str = None, - labels: typing.List[str] = None, + labels: list[str] = None, state: mlrun.common.schemas.ProjectState = None, projects_role: typing.Optional[mlrun.common.schemas.ProjectsRole] = None, leader_session: typing.Optional[str] = None, - names: typing.Optional[typing.List[str]] = None, + names: typing.Optional[list[str]] = None, ) -> mlrun.common.schemas.ProjectSummariesOutput: return await self._leader_follower.list_project_summaries( db_session, owner, labels, state, names @@ -197,7 +197,7 @@ def _sync_projects(self): try: # re-generating all of the maps every time since _ensure_follower_projects_synced might cause changes leader_projects: mlrun.common.schemas.ProjectsOutput - follower_projects_map: typing.Dict[str, mlrun.common.schemas.ProjectsOutput] + follower_projects_map: dict[str, mlrun.common.schemas.ProjectsOutput] leader_projects, follower_projects_map = self._run_on_all_followers( True, "list_projects", db_session ) diff --git a/tests/integration/sdk_api/alerts/__init__.py b/tests/integration/sdk_api/alerts/__init__.py index 99be6280fc3..33c5b3d3bd7 100644 --- a/tests/integration/sdk_api/alerts/__init__.py +++ b/tests/integration/sdk_api/alerts/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2024 Iguazio +# Copyright 2023 Iguazio # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/system/datastore/assets/testdata_short.json b/tests/system/datastore/assets/testdata_short.json deleted file mode 100644 index 9bcef908486..00000000000 --- a/tests/system/datastore/assets/testdata_short.json +++ /dev/null @@ -1,3 +0,0 @@ -{"id":1,"name":"John","number":10,"float_number":1.5,"date_of_birth":"1990-01-01T00:00:00.000"} -{"id":2,"name":"Jane","number":20,"float_number":2.5,"date_of_birth":"1995-05-10T00:00:00.000"} -{"id":3,"name":"Bob","number":30,"float_number":3.5,"date_of_birth":"1985-12-15T00:00:00.000"} From ad429055793b75487e1262a7f8f349a78823a760 Mon Sep 17 00:00:00 2001 From: quaark Date: Wed, 8 May 2024 14:34:55 +0300 Subject: [PATCH 112/119] and another --- server/api/utils/clients/iguazio.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/api/utils/clients/iguazio.py b/server/api/utils/clients/iguazio.py index 9bf2ae67e72..954167cb258 100644 --- a/server/api/utils/clients/iguazio.py +++ b/server/api/utils/clients/iguazio.py @@ -697,7 +697,7 @@ def _resolve_params_from_response_headers( @staticmethod def _resolve_params_from_response_body( response_body: typing.Mapping[typing.Any, typing.Any], - ) -> typing.Tuple[typing.Optional[str], typing.Optional[list[str]]]: + ) -> tuple[typing.Optional[str], typing.Optional[list[str]]]: context_auth = get_in( response_body, "data.attributes.context.authentication", {} ) From f95526b13f8743609f7716927b31c3e5e5a127ba Mon Sep 17 00:00:00 2001 From: quaark Date: Wed, 8 May 2024 14:36:42 +0300 Subject: [PATCH 113/119] more fixes --- mlrun/run.py | 35 ------------------- .../src/mlrun_pipelines/common/__init__.py | 14 ++++++++ 2 files changed, 14 insertions(+), 35 deletions(-) diff --git a/mlrun/run.py b/mlrun/run.py index d3d71eb4600..e8a982eb5c9 100644 --- a/mlrun/run.py +++ b/mlrun/run.py @@ -70,41 +70,6 @@ ) -class RunStatuses: - succeeded = "Succeeded" - failed = "Failed" - skipped = "Skipped" - error = "Error" - running = "Running" - - @staticmethod - def all(): - return [ - RunStatuses.succeeded, - RunStatuses.failed, - RunStatuses.skipped, - RunStatuses.error, - RunStatuses.running, - ] - - @staticmethod - def stable_statuses(): - return [ - RunStatuses.succeeded, - RunStatuses.failed, - RunStatuses.skipped, - RunStatuses.error, - ] - - @staticmethod - def transient_statuses(): - return [ - status - for status in RunStatuses.all() - if status not in RunStatuses.stable_statuses() - ] - - def function_to_module(code="", workdir=None, secrets=None, silent=False): """Load code, notebook or mlrun function as .py module this function can import a local/remote py file or notebook diff --git a/pipeline-adapters/mlrun-pipelines-kfp-common/src/mlrun_pipelines/common/__init__.py b/pipeline-adapters/mlrun-pipelines-kfp-common/src/mlrun_pipelines/common/__init__.py index e69de29bb2d..99be6280fc3 100644 --- a/pipeline-adapters/mlrun-pipelines-kfp-common/src/mlrun_pipelines/common/__init__.py +++ b/pipeline-adapters/mlrun-pipelines-kfp-common/src/mlrun_pipelines/common/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2024 Iguazio +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# From 741f7e4d6c5bc9ef7ec6a4c9b61e51c82e7c9ade Mon Sep 17 00:00:00 2001 From: quaark Date: Wed, 8 May 2024 14:44:08 +0300 Subject: [PATCH 114/119] fix kfpops --- mlrun/runtimes/nuclio/function.py | 2 +- mlrun/utils/notifications/notification_pusher.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/mlrun/runtimes/nuclio/function.py b/mlrun/runtimes/nuclio/function.py index f10aa86da21..c4a526cad4c 100644 --- a/mlrun/runtimes/nuclio/function.py +++ b/mlrun/runtimes/nuclio/function.py @@ -24,6 +24,7 @@ import requests from aiohttp.client import ClientSession from kubernetes import client +from mlrun_pipelines.common.ops import deploy_op from nuclio.deploy import find_dashboard_url, get_deploy_status from nuclio.triggers import V3IOStreamTrigger @@ -35,7 +36,6 @@ from mlrun.common.schemas import AuthInfo from mlrun.config import config as mlconf from mlrun.errors import err_to_str -from mlrun.kfpops import deploy_op from mlrun.lists import RunList from mlrun.model import RunObject from mlrun.platforms.iguazio import ( diff --git a/mlrun/utils/notifications/notification_pusher.py b/mlrun/utils/notifications/notification_pusher.py index 4f10c97e82e..68554efd0f9 100644 --- a/mlrun/utils/notifications/notification_pusher.py +++ b/mlrun/utils/notifications/notification_pusher.py @@ -22,13 +22,13 @@ from concurrent.futures import ThreadPoolExecutor import kfp +import mlrun_pipelines.common.ops import mlrun.common.runtimes.constants import mlrun.common.schemas import mlrun.config import mlrun.db.base import mlrun.errors -import mlrun.kfpops import mlrun.lists import mlrun.model import mlrun.utils.helpers @@ -431,9 +431,9 @@ def _add_deploy_function_step(_, _node_template): steps.append(function) step_methods = { - mlrun.kfpops.PipelineRunType.run: _add_run_step, - mlrun.kfpops.PipelineRunType.build: _add_deploy_function_step, - mlrun.kfpops.PipelineRunType.deploy: _add_deploy_function_step, + mlrun_pipelines.common.ops.PipelineRunType.run: _add_run_step, + mlrun_pipelines.common.ops.PipelineRunType.build: _add_deploy_function_step, + mlrun_pipelines.common.ops.PipelineRunType.deploy: _add_deploy_function_step, } workflow_id = run.status.results.get("workflow_id", None) From 8206408440ce1079e896b0bbbcc25e9e15893256 Mon Sep 17 00:00:00 2001 From: quaark Date: Wed, 8 May 2024 15:00:51 +0300 Subject: [PATCH 115/119] oops --- dockerfiles/mlrun-api/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dockerfiles/mlrun-api/Dockerfile b/dockerfiles/mlrun-api/Dockerfile index 6b1668466c3..d1d9b85058b 100644 --- a/dockerfiles/mlrun-api/Dockerfile +++ b/dockerfiles/mlrun-api/Dockerfile @@ -76,7 +76,7 @@ COPY . . RUN python -m pip install .[complete-api] &&\ pip install ./pipeline-adapters/mlrun-pipelines-kfp-common &&\ - pip install ./pipeline-adapters/mlrun-pipelines-kfp-v1-8 \ + pip install ./pipeline-adapters/mlrun-pipelines-kfp-v1-8 VOLUME /mlrun/db From 21330b63dd4ccf8450892ad27c59f9195dca2f4d Mon Sep 17 00:00:00 2001 From: quaark Date: Wed, 8 May 2024 15:05:13 +0300 Subject: [PATCH 116/119] removed --- mlrun/artifacts/model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mlrun/artifacts/model.py b/mlrun/artifacts/model.py index 80d28cb9f52..48fe6fd052d 100644 --- a/mlrun/artifacts/model.py +++ b/mlrun/artifacts/model.py @@ -93,8 +93,8 @@ def __init__( self.model_file = model_file self.metrics = metrics or {} self.parameters = paraemeters or {} - self.inputs: list[Feature] = inputs or [] - self.outputs: list[Feature] = outputs or [] + self.inputs = inputs or [] + self.outputs = outputs or [] self.framework = framework self.algorithm = algorithm self.feature_vector = feature_vector From 3844ca6ed7d880a1080cf5a27afc58e812b03fa5 Mon Sep 17 00:00:00 2001 From: quaark Date: Wed, 8 May 2024 15:09:17 +0300 Subject: [PATCH 117/119] more fixes --- mlrun/runtimes/nuclio/function.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/mlrun/runtimes/nuclio/function.py b/mlrun/runtimes/nuclio/function.py index c4a526cad4c..bec4ebd0ab8 100644 --- a/mlrun/runtimes/nuclio/function.py +++ b/mlrun/runtimes/nuclio/function.py @@ -24,7 +24,9 @@ import requests from aiohttp.client import ClientSession from kubernetes import client +from mlrun_pipelines.common.mounts import VolumeMount from mlrun_pipelines.common.ops import deploy_op +from mlrun_pipelines.mounts import mount_v3io, v3io_cred from nuclio.deploy import find_dashboard_url, get_deploy_status from nuclio.triggers import V3IOStreamTrigger @@ -39,11 +41,8 @@ from mlrun.lists import RunList from mlrun.model import RunObject from mlrun.platforms.iguazio import ( - VolumeMount, - mount_v3io, parse_path, split_path, - v3io_cred, ) from mlrun.runtimes.base import FunctionStatus, RunError from mlrun.runtimes.pod import KubeResource, KubeResourceSpec From a54b7ab034aa6f11f09509824a4787475f0be37a Mon Sep 17 00:00:00 2001 From: quaark Date: Wed, 8 May 2024 15:41:15 +0300 Subject: [PATCH 118/119] fixes for UTs --- .../src/mlrun_pipelines/common/models.py | 2 +- .../mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/ops.py | 3 ++- .../mlrun-pipelines-kfp-v2/src/mlrun_pipelines/ops.py | 3 ++- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/pipeline-adapters/mlrun-pipelines-kfp-common/src/mlrun_pipelines/common/models.py b/pipeline-adapters/mlrun-pipelines-kfp-common/src/mlrun_pipelines/common/models.py index 07492c5943c..ede5a6a601a 100644 --- a/pipeline-adapters/mlrun-pipelines-kfp-common/src/mlrun_pipelines/common/models.py +++ b/pipeline-adapters/mlrun-pipelines-kfp-common/src/mlrun_pipelines/common/models.py @@ -16,7 +16,7 @@ from enum import Enum -class RunStatuses(Enum): +class RunStatuses(str, Enum): """ Class for different types of statuses a 'PipelineRun' can have using an enum type. Beyond enumerating all possible statuses, this class ensures comparisons are case-insensitive. diff --git a/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/ops.py b/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/ops.py index f2312876ad7..c0e16dcf742 100644 --- a/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/ops.py +++ b/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/ops.py @@ -27,6 +27,7 @@ from mlrun_pipelines.common.ops import KFPMETA_DIR, PipelineRunType import mlrun +import mlrun.common.runtimes.constants from mlrun.config import config from mlrun.utils import get_in @@ -186,7 +187,7 @@ def add_default_env(k8s_client, cop): ) ) - auth_env_var = mlrun.runtimes.constants.FunctionEnvironmentVariables.auth_session + auth_env_var = mlrun.common.runtimes.constants.FunctionEnvironmentVariables.auth_session if auth_env_var in os.environ or "V3IO_ACCESS_KEY" in os.environ: cop.container.add_env_variable( k8s_client.V1EnvVar( diff --git a/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/ops.py b/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/ops.py index a13a488803a..e33de45b7a1 100644 --- a/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/ops.py +++ b/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/ops.py @@ -25,6 +25,7 @@ from mlrun_pipelines.common.ops import PipelineRunType import mlrun +import mlrun.common.runtimes.constants from mlrun.config import config from mlrun.utils import get_in, logger @@ -207,7 +208,7 @@ def add_default_env(task): name="MLRUN_MPIJOB_CRD_VERSION", value=config.mpijob_crd_version ) - auth_env_var = mlrun.runtimes.constants.FunctionEnvironmentVariables.auth_session + auth_env_var = mlrun.common.runtimes.constants.FunctionEnvironmentVariables.auth_session if auth_env_var in os.environ or "V3IO_ACCESS_KEY" in os.environ: task.set_env_variable( name=auth_env_var, From 82a79777818e1a0b745261d204979171fd24cab0 Mon Sep 17 00:00:00 2001 From: quaark Date: Wed, 8 May 2024 16:14:07 +0300 Subject: [PATCH 119/119] lint :( --- .../mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/ops.py | 4 +++- .../mlrun-pipelines-kfp-v2/src/mlrun_pipelines/ops.py | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/ops.py b/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/ops.py index c0e16dcf742..3e82529bfe2 100644 --- a/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/ops.py +++ b/pipeline-adapters/mlrun-pipelines-kfp-v1-8/src/mlrun_pipelines/ops.py @@ -187,7 +187,9 @@ def add_default_env(k8s_client, cop): ) ) - auth_env_var = mlrun.common.runtimes.constants.FunctionEnvironmentVariables.auth_session + auth_env_var = ( + mlrun.common.runtimes.constants.FunctionEnvironmentVariables.auth_session + ) if auth_env_var in os.environ or "V3IO_ACCESS_KEY" in os.environ: cop.container.add_env_variable( k8s_client.V1EnvVar( diff --git a/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/ops.py b/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/ops.py index e33de45b7a1..7c5e864fb5d 100644 --- a/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/ops.py +++ b/pipeline-adapters/mlrun-pipelines-kfp-v2/src/mlrun_pipelines/ops.py @@ -208,7 +208,9 @@ def add_default_env(task): name="MLRUN_MPIJOB_CRD_VERSION", value=config.mpijob_crd_version ) - auth_env_var = mlrun.common.runtimes.constants.FunctionEnvironmentVariables.auth_session + auth_env_var = ( + mlrun.common.runtimes.constants.FunctionEnvironmentVariables.auth_session + ) if auth_env_var in os.environ or "V3IO_ACCESS_KEY" in os.environ: task.set_env_variable( name=auth_env_var,

    a1x`&_ukxqM(lDuji!L;sSk~Wdo}E@|J6~3&gRtdwIeN8jflJci>rBG+5Fc+i8{-5pvFJe_Q+(oodqw0G1a&E^Z# zF5~#lTSU|??_Ww&QwIWPNWvRs{A_L=K5N(Mti(tv8uby~q0r@~D5q=H4z)jHblvZN z=0ap1dNl~ZuQOA0GKl%Nfy3qutOe)w5u8Lc$c^&3O3i7R{o@*@zTw+CklJ$NEa2Lj zJr14EcyFe7SD&VG^|KP)xm#t(RgW)!eSPcIuxm7}Jgb?bS~#X>x>~IOLw2@3PcO;q z5Ug02CHRS776V~reR*Q&&#&_$HZE@VWj7W*DW^L&!GSMZQxT%7z>^0{PAzTK7T9L* z`RPE{DGZ`wtG9NRZqoMe9cOQikOl%#EuD(4c77V>wd*Z86RkQXoxQuFgP&IK>L;Q| zAJ>X{JY~L(i9I|^mZkb1AQjz}rdCO- zEZx6g+ZV?*5bw>C)mWG3VW}a`&SBV7+NrZgx*Sl}J(g{W6rP0_ovOJ)C7rJ!Ssm@o#b7 z!{K)PW>?~7HX_ZP>349}Vf$xtvcb;a@e*XDPqv=pGAz%lesNZBrvK@mN-os_MRVqU zK>cM<0O+3X^3cGSg{g*SF){yIVAdnp;Q=$0w4@{h}ltQfcz* zFqWb*(LA1-U72e+sM+#2e;n*oiH{6{hf&7Tg9UcJsCQED~U)4cwMvKSI^WV1s{T_||fe!qXEY-qquIm?tv*S{8*k0hF&Um7ibTCJTpV$f^Yp2^-RZ@J z+;kc6zmAl(@*<3f$}l+h=Q*o)ZVlgTjBV@Xt$E@*4@6@<94Dzzca!n&9U@;7k-I+y z_6q0-VsOIv%(1}-$ETN>sOr-S%Ej{5usOq}?cdo5 zY%ciq;1fL==7sBA*z!r&6>ZmP@0>LHRyQIHyHF5W2z4B64PzzsG&E+dMwG7JQ)nPo z(NgagBJY*syz(bp(?gfCO*2Ql6%V*kXkD0R5hW!{Ir^|&MpN~qFH_BZx{Nbi5EpWE zJogo|pI_?veIUT`X5S%2djv}p4p9{WirUUY{4MCr@PY(X(`oWtQ`L0em$cFMg1(md4nfGt#fb_ETg(sfMzwIz%VST!8Jl_OY`rK^)m8s| zXYrF4FR%TdlVkcXP0zdo`n%VG|B|md_N;yKxF!r(C0YL^H}@j?i#z{6FCX_`I#+*5 zVroS1SD86NJ*R_LAIVo_t{_(U5z@`zm!FG-%UrqL;fs>>;e|%B{?uKhsH*1BW+7mY zU~Ti8LtHl-e1Q&@Tm&rp$3b!JZR)Owc9~mD~y6T z*7(VXXJNxVVv=TynBwrV=BR8Vjy3NfPg2HIWui} zLuL?SE$PF8D-~s~q&09fa8ow5UR&SBLq9qU&Br6X5Q-rZbuPvb!2*z!ETSNbo^|27jOo>3!ltSBW0C;Sk-vA&yHpOz~;-#)IfvQh!yG`60JMU_{%r&^RPb9;`cBEujDPxz&G@2zGTvu;Q`y;eW6+g6eX|&lE=(O;OZ6~xOCy;)yV5Kc|oTe zHQ7>3?(9NDd#sD5k8Z~K_@^KdulnNJxHU~sHer@dYj&{`ahpFr^rI@A!gZsaB(Lj6 zkVqiq*pc$LDJ{}nJq&MCj!BFRnUvlm#(^&(qB{EO-Cz-G)Dh43gv#f2FEpCHF zr{ey)>$KEdkQHS?HbRUsNIr&GurNWC`BdoP*l_9(a1m?* zY?purpumW!43q5rbHA^pkFVQ(xsFW?jY0@Lckf`IZA?Hi-~Ny@bey1m`|@yTV1Lu6 z1uC%$DS0i-Z_J5r9J=U&0Q4&}%05GR9?guI$0z*mh*nZVLYW#Of~n7SX4&o*uN^BE zM~Vj+I6wdXCdg`Q*vn3>bRF>L%b=BpO2OLDVF1J+t)NrtweD&-52QKUfdss&&4>s% z@et`Gbp>_qq=!QtNkCVSsthG9wfRq7vpoUbYY4Jr0JlhAXAJS6O&M! z!taE=l4~3HcB*Q*Q(y&EB-)LtGPV6PHi_3HhfSp~7jmeo#j#!aMFT2CS`4P}p7D?E z$nC-a5yg1AY8J!+f-NY>I>$7SwWxDL*9Z7GAA}9pH>KXv8IoQ^^gXqOL*zpPL`@zs znh%8V)0LD9u*Pv(WBAYK;>Paug$8mp7>_2c#pGY}86MX_AI1wqwV_d$7zihjy`K(H4J$1Hy{cs2PZwsI6*#{>U_Etd8cT$HDHOF90tzrPkaW35i&1 zGZ4}Z4qLqb9{oSBZ~eUdqc6qMGA{mg*tPe-VOgJ9I8I;AwBtFeLPxcY=vPi#P@mH` zINJcJVlijFXj0eU;7o@}0{EPjSTobpX8?C#b}vi4@KSwwrP<;gI+uRYo?*jT*wtbH zloDy^BPT!>R_DYTsT+nUTb(E&aeW<9>Ug`U&+xrOTE!UhA%KGeP+6Q*6Fwt=U~tOd zbI+^Lre6D)7W)Z%o3*k1T}6?Uw)cz5<%P-O3jQ<*1Q)-6%&jIZ3JuFrZ&<(RRM-j% z&w<6y%(H_mf=37y?2hGhO&B4fx;@W@n)z4r?RlIxqAUjUEfYwb748NyU*I!!1dZF` zaj-w_LrTa;zfdp}GLKUhZ;tfLYU>K)YG!e9k3OPa=!>qzZv%GA zUo^#kpYr}UzRfClHCFjg>e?3)oBQdTziF_863Ujvxm*&q;(za4Va;cR%9+0oiILn$hG3FEG#HpXg_$EMuY$ykC{R2@3%K!ND5=uAYEk5;DV$-n8H%m zm`ytQ&fuy*2@25B~OZRo1gl+Vx9aX%xaf#he|dR+^myz^}OXy9Iq5g=7UD;$(23=w2^M>mJc6Wbpgg8Mp7M!m<&|Lc0kT^i zaMa6#0dF#U)j&{O(Y)gXq`%DG8ig%4cZ6-hg0HE374dAUp98|rvx{mU}jfe6su=$S(chVPCU;s z%=_FU%+?o8sWgAFUqN~CXJvT8ET8HT? z>wl1__gMY{Z?N0ww3P_Z@#g585>|yb1D^g1D?3sq`(wdp{=0`o0w2~VpeiOoWBr-o zDytWOJsp4!H$OE6dsF@ZFz>d;tS#~)0gB>NjTN6@oa9lI{iM*O^pEGiI^{@43?(rF zp~v?0#31AbJpnosVj%ZW;}0a3(HvH~PXYud^TouV7>PJx^Y)<*>%0VB-tQOGQD9Au zpjI_rLu;ZFn{L#iDvMKm+~<@JociM)j8&1Rie?S>PS!HRDixisN>w)L*kwaS65SR$ zG!%g7GaF%dBnX-(gCIT{H^Sc>n?t!84fdX9z<+286dP{T(Dpt?vudmOlL*_P;l=PW z3C>YTl7OfV10KG-eIR_=uM)3u0qoFlh}{j*ln_D|3nac(HT9xdZXg&{+0DzNko}F; zLoQ1`k4tZpt+umKEckR*V?21DEN><yP9f*2 zJX1!CP_T`B-X-n4*g6}*C~vB{n4g2IhTHpyhFw~ILX7}jgFKy7(I8*3oe^&G-q=pt zyt-k#@P$bu+a41(_mJ%)Jjc#3JIb}%*-yJ0pJE(}u+6zESryD%4b za7H&i{Tn^zKqJP!B07qiUBSt3PW{C?wGJHFjbY);Um4n=*^&N;dSXYT@}Q@587D7@0GB+)044Ny(~D{MeOs+=iWyhw>wq&Y!F^z#HNKj^X z8|!AY5!ch#21z%{5-T$WKMjm)5ZOrADXJZV5E}sbncrdAZ7!*^G9TKt;qLXsfKYrf zN?IQ4P+Jjg#jfOsqN~d4t@+y(9Z7e&zO^KG@MHlS$>|*RH}0fLPgJ(on+-(tyGp7C zOf%sEDb=l#!D`H_%LD3N>=-iJZ3D{Mr@K0=v*riH8#OfPJiMxD67z(n+D#f~&4>n{ z0Yb|{v9bs}`(16u{HWRxE;r%jAO1#m>t(*`)3GT%^gO;+z8$Qg$`d@(I4iwR)_6Pr z5Z5r+DgO3o*h>D5XxJ70w^5^Ek+9P3OFAQRQ|Q`5DISk>cBXz6@ zv9x^X0i7RhF5b@KKC$Ia7(5q!{6YiGn1#I<+Jld<_k|JKZ;_Km9Ix(o-q+W4#|tyI zm@k^H;#UnXFp})dl{lbt07EGaogcfg0N@2U1FJ~qu@Lzx(=Ox zh>$PJB~GiF^t4};%>~zEKnt@ds;PY!Dpp&Qk97@tfTYjmh5Hrq>u;n;HoR$VuIw&e za2$=P>>$*^9M>u00zyf?yQw}*4|?07wmk{~z6_Nd2jy*~B|TF~Hu~D2skj8Sl0GVT zJ1obq!C4=oTW3UBBXQ#o+mmF{a+tl^V2qneL^+v^R&qg+g7aLxOen=bFp-YDM$fC0 z=LaQNI4pZ@G9!a*RWa*FVNgk{@p||Gy3D?qL zQ_)RyMus=GKvp=rd??Md(=LBJ4)%7S?Z?whAIE|W8{;$7`c3LIn~r;*-!Zzb?9x@8 zw^!=VOQd;KsIe5@rIJG6gsNIDDk782`>f=zR~3_Hf77RykX1E#M=8#YdW%ax7d{tb z)Dvq^3qz&PTV;h#q5$f9MAcX=r&D0aErFIg58x-0-X n$*d2QmX*96?nQ_;5>g) zikkq)Xg#&uO?OK@fFnl3I~YlBLh^{JmbNcx&m{Lizh_^l!a7ss(-6Qsy|fASIY*2W z`?Hg(tIDFo>4`ZH9+h|0ca|v&N+^*Kj_A;UH=zqc2Ad1q;gy!Pb!x+tmK6nx+x;}4 z{=(hzr72D%vJ1dwi#2Z03AbZ2qH&EvT zolk_aBJOA4*M4KIH=Q*-7cr+%;?R|4w+VisvS;)@GSJ`OPrlc; z+AaxM^klxS!$x53r5Fa&Gum{*QS_ro&(P5>l3oiqDbGYf&y7$-x+)zqL-6P*@e!uK zzH(ZE@yVKyY&b-z((J9e+ZqV>Y;_{_9wNJKN^Oh#<^VFzTcy7{;(4HM0px(gMJfde{QIcV#hg>XfqT0?86u>e6L!N3aul>^M%)RI0IpQYrBm z&Zi&Y_DXg!JZw*dscwiHo57eT6%-fs!;r zO?8YRoYOV!CT;5AyLsx1o=Xa55FtEX{CqwaSS{`SR{O$)?uZ12gRIfgTyc92ZnF%h zci=(BVWk(O350jq=_DQnPB%0}?VTu$7?FH)w`1m=yRsa=$E@wg9;09%lxM{KuEWaX z!Eq*bdAXu}pG0~jxfVu`BiD7$3#uOaRaQ-vsgzH_vXnP(1`rh0*w|Pu4}5>_)!-Ll z!r}>Y48f^3C-yr+V4nII;E%tZ#ow7#O_qZ>++%Z!?*OsdgVV4=D!#!rS>W^=65GOH zCEK^XyEoM=-6f74`MFS{yOhU+!pi%rt2<~P<;w?&Dj>$s4*JO!S;t9Be<-fIOC>fP z02A{koHWW7)Q}FAkJe5*q+399FQA+33J|ccK-eR&Y_M4)MMDk+G(e#lt&Ky)j(|TN zkpFhyYAZy(QHfLg+SoGUIsvXxI}vMO*Vxnq^mKTO9)6~0y`&UxWUEpf2=|uya0ebD zNB7;j4aA>In5S9!W8&ukayQj<8=UltOo?XUx75MxmF|?a3%rG*7G>8{Ni%=nQ5@gV z5xuZ?bYJ2^S21<&ncTZ7^9aQG`lFU~$@j`TsTH7JOLOIs+!L?vOp(w9-Q;>8s$s=g zcg;*U{v;P*gyC~ZxupyxccjIb+93y(W}61?oBpCJp2@u!;Y&O6M5&>6&V}5y8DxN} z3%ElNB-4GY zvxw0Y#vJj^f?0kt@0+m=Ph;$OGBRyekyJSwf2BMJpy8ZW!^DXD({Ce^O^8kcRP0KD z$U@G^2(nPG*{*KgdH_nlM29oxneux1NJnk3I&53QFmMxuIpx805 zWz{L5;%_TvqY|jc5&{PpTn6x)Y?sHpj1oOgPFuLtrMl||bHie_?Zpp^)u^=V0P81t zb4ieu=%6X4sCnL|uz1i_T{F<+JbSBGJ{Zi}x>%{Zw^;-1{QFkm{`aA_|Ce(6-@5Yu z#(4do1d{)6b@7|4gW@K-pK_iYzDqRoBcwq$?tTByF&N)<+n)I18qL@>`>$_kzQwE! z<*;KYoEG_H*%-gb9rrfjt4%xpsYrf1usw6(Oe=4)uZ%QC%|YBw`^CgvN`E%@rzUD zecomq^WupfhwS;W9EdP5X~Ougj42>t^3cmT#H~NK(}Edd`R;tiTE`nU4W6xG;32oO z46`OA!5JEpV-V_OCq;G#7s;F&;ei_xsW0y8rn` zVL&|hpXQ~GtcCmgx1ae!uDSpH5qnLmtT}3(mHOv%_vS2((b&SHhdmBe9t^tn{{S_Y B-(dg% diff --git a/docs/deployment/batch_inference.ipynb b/docs/deployment/batch_inference.ipynb index 031f34870dd..852dfc5564a 100644 --- a/docs/deployment/batch_inference.ipynb +++ b/docs/deployment/batch_inference.ipynb @@ -11,7 +11,7 @@ "\n", "With batch inference, the batch runs are typically generated during some recurring schedule (e.g., hourly, or daily). These inferences are then stored in a database or a file and can be made available to developers or end users. With batch inference, the goal is usually tied to time constraints and the service-level agreement (SLA) of the job. Conversely, in real time serving, the goal is usually to optimize the number of transactions per second that the model can process. An online application displays a result to the user.\n", "\n", - "Batch inference can sometimes take advantage of big data technologies such as Spark to generate predictions. Big data technologies allows data scientists and machine learning engineers to take advantage of scalable compute resources to generate many predictions at once." + "Batch inference can sometimes take advantage of big data technologies, such as Spark, to generate predictions. Big data technologies allow data scientists and machine learning engineers to take advantage of scalable compute resources to generate many predictions simultaneously. To gain a better understanding about the batch inference usage and the function parameters, see the [Batch Inference page on the Function Hub](https://www.mlrun.org/hub/functions/master/batch_inference_v2/)." ] }, { @@ -54,7 +54,7 @@ "\n", "Batch inference is implemented in MLRun by running the function with an input dataset. With MLRun you can easily create any custom logic in a function, including loading a model and calling it.\n", "\n", - "The Function Hub [batch inference function](https://github.com/mlrun/functions/tree/development/batch_inference) is used for running the models in batch as well as performing drift analysis. The function supports the following frameworks:\n", + "The Function Hub [batch inference function](https://github.com/mlrun/functions/tree/master/batch_inference_v2) is used for running the models in batch as well as performing drift analysis. The function supports the following frameworks:\n", "\n", "- Scikit-learn\n", "- XGBoost\n", @@ -94,7 +94,7 @@ "project = mlrun.get_or_create_project(\n", " \"batch-inference\", context=\"./\", user_project=True\n", ")\n", - "batch_inference = mlrun.import_function(\"hub://batch_inference\")" + "batch_inference = mlrun.import_function(\"hub://batch_inference_v2\")" ] }, { @@ -112,7 +112,15 @@ "metadata": {}, "outputs": [], "source": [ - "model_path = mlrun.get_sample_path(\"models/batch-predict/model.pkl\")\n", + "import sys\n", + "\n", + "suffix = (\n", + " mlrun.__version__.split(\"-\")[0].replace(\".\", \"_\")\n", + " if sys.version_info[1] > 7\n", + " else \"3.7\"\n", + ")\n", + "\n", + "model_path = mlrun.get_sample_path(f\"models/batch-predict/model-{suffix}.pkl\")\n", "\n", "model_artifact = project.log_model(\n", " key=\"model\", model_file=model_path, framework=\"sklearn\"\n", @@ -154,8 +162,7 @@ "source": [ "batch_run = project.run_function(\n", " batch_inference,\n", - " inputs={\"dataset\": prediction_set_path},\n", - " params={\"model\": model_artifact.uri},\n", + " inputs={\"dataset\": prediction_set_path, \"model_path\": model_artifact.uri},\n", ")" ] }, @@ -414,8 +421,7 @@ "source": [ "batch_run = project.run_function(\n", " batch_inference,\n", - " inputs={\"dataset\": prediction_set_path},\n", - " params={\"model\": model_artifact.uri},\n", + " inputs={\"dataset\": prediction_set_path, \"model_path\": model_artifact.uri},\n", " schedule=\"*/30 * * * *\",\n", ")" ] @@ -426,13 +432,15 @@ "source": [ "### Drift analysis\n", "\n", - "By default, if a model has a sample set statistics, `batch_inference` performs drift analysis and will produce a data drift table artifact, as well as numerical drift metrics.\n", + "By default, if a model has a sample set statistics, `batch_inference` performs drift analysis and produces a data drift table artifact, as well as numerical drift metrics. In addition, this function either creates or updates an existing [model endpoint](../monitoring/model-monitoring-deployment.html#:~:text=Model%20Endpoint%20%E2%80%94%20A%20combination%20of%20a%20deployed%20Nuclio%20function%20and%20the%20models%20themselves.%20One%20function%20can%20run%20multiple%20endpoints%3B%20however%2C%20statistics%20are%20saved%20per%20endpoint.) record (depends on the provided `endpoint_id`). \n", + "\n", + "In addition, you can define `\"trigger_monitoring_job\": True` to trigger the drift job analysis immediately. \n", "\n", "To provide sample set statistics for the model you can either:\n", "\n", "1. Train the model using MLRun. This allows you to create the sample set during training.\n", "2. Log an external model using `project.log_model` method and provide the training set in the `training_set` parameter.\n", - "3. Provide the set explicitly when calling the `batch_inference` function via the `sample_set` input.\n", + "3. Provide the set explicitly when calling the `batch_inference` function via the `model_endpoint_sample_set` input.\n", "\n", "In the example below, we will provide the training set as the sample set\n" ] @@ -447,11 +455,15 @@ "\n", "batch_run = project.run_function(\n", " batch_inference,\n", - " inputs={\"dataset\": prediction_set_path, \"sample_set\": training_set_path},\n", + " inputs={\n", + " \"dataset\": prediction_set_path,\n", + " \"model_endpoint_sample_set\": training_set_path,\n", + " \"model_path\": model_artifact.uri,\n", + " },\n", " params={\n", - " \"model\": model_artifact.uri,\n", " \"label_columns\": \"label\",\n", " \"perform_drift_analysis\": True,\n", + " \"trigger_monitoring_job\": True,\n", " },\n", ")" ] @@ -460,7 +472,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "In this case, instead of just prediction, you will get drift analysis. The drift table plot that compares the drift between the training data and prediction data per feature:" + "In this case, instead of just prediction, you get drift analysis. If no label column was provided, the job function tries to retrieve the label columns from the logged model artifact. If also not defined in the model, the label columns are generated with the following format `predicted_label_{i}` where `i` is an incremental number. \n", + "\n", + "The drift table plot that compares the drift between the training data and prediction data per feature:" ] }, { @@ -551,52 +565,7 @@ { "cell_type": "markdown", "metadata": {}, - "source": [ - "## `batch_inference` Parameters\n", - "\n", - "**Model Parameters**\n", - "\n", - "* `model`: `str` — The model store path.\n", - "\n", - "**Inference parameters**\n", - "\n", - "*Parameters to specify the dataset for inference.*\n", - "\n", - "* `dataset`: `DatasetType` — The dataset to infer through the model.\n", - " Can be passed in `inputs` as either a Dataset artifact / Feature vector URI or\n", - " in `parameters` as a list, dictionary or numpy array.\n", - "* `drop_columns`: `Union[str, int, List[str], List[int]]` — A string / integer or a list of strings / integers that\n", - " represent the column names / indices to drop. When the dataset is a list or a numpy array this parameter must\n", - " be represented by integers.\n", - "* `label_columns`: `Union[str, List[str]]` — The target label(s) of the column(s) in the dataset for Regression\n", - " or classification tasks. The label column can be accessed from the model object, or the feature vector provided\n", - " if available.\n", - "* `predict_kwargs`: `Dict[str, Any]` — Additional parameters to pass to the prediction of the model.\n", - "\n", - "**Drift parameters**\n", - "\n", - "*Parameters that affect the drift calculation.*\n", - "\n", - "* `perform_drift_analysis`: `bool` = `None` — Whether to perform drift analysis between the sample set of the\n", - " model object to the dataset given. By default, None, which means it will perform drift analysis if the model has\n", - " a sample set statistics. Perform drift analysis will produce a data drift table artifact.\n", - "* `sample_set`: `DatasetType` — A sample dataset to give to compare the inputs in the drift analysis. The\n", - " default chosen sample set will always be the one who is set in the model artifact itself.\n", - "* `drift_threshold`: `float` = `0.7` — The threshold of which to mark drifts. Default is 0.7.\n", - "* `possible_drift_threshold`: `float` = `0.5` — The threshold of which to mark possible drifts. Default is 0.5.\n", - "* `inf_capping`: `float` = `10.0` — The value to set for when it reached infinity. Default is 10.0.\n", - "\n", - "**Logging parameters**\n", - "\n", - "*Parameters to control the automatic logging feature of MLRun. You can adjust the logging outputs as relevant and if\n", - "not passed, a default list of artifacts and metrics is produced and calculated.*\n", - "\n", - "* `log_result_set`: `bool` = `True` — Whether to log the result set - a DataFrame of the given inputs concatenated\n", - " with the predictions. Default is True.\n", - "* `result_set_name`: `str` = `\"prediction\"` — The db key to set name of the prediction result and the filename\n", - " Default is 'prediction'.\n", - "* `artifacts_tag`: `str` — Tag to use for all the artifacts resulted from the function.\n" - ] + "source": [] } ], "metadata": { diff --git a/docs/monitoring/model-monitoring-deployment.ipynb b/docs/monitoring/model-monitoring-deployment.ipynb index 97d3cf9ec57..f8c8bf0ace3 100644 --- a/docs/monitoring/model-monitoring-deployment.ipynb +++ b/docs/monitoring/model-monitoring-deployment.ipynb @@ -53,7 +53,7 @@ "* **Total Variation Distance** (TVD) — The statistical difference between the actual predictions and the model's trained predictions.\n", "* **Hellinger Distance** — A type of f-divergence that quantifies the similarity between the actual predictions, and the model's trained predictions.\n", "* **Kullback–Leibler Divergence** (KLD) — The measure of how the probability distribution of actual predictions is different from the second model's trained reference probability distribution.\n", - "* **Model Endpoint** — A combination of a deployed Nuclio function and the models themselves. One function can run multiple endpoints; however, statistics are saved per endpoint.\n", + "* **Model Endpoint** — A combination of a model and a runtime function that can be a deployed Nuclio function or a job runtime. One function can run multiple endpoints; however, statistics are saved per endpoint.\n", "\n", "**See also**\n", "- [Model monitoring and drift detection tutorial](../tutorials/05-model-monitoring.html)\n", @@ -78,20 +78,16 @@ "\n", "The summary page contains the following fields:\n", "* **Name** — the name of the model endpoint\n", + "* **Function** — the name of the related function \n", "* **Version** — user configured version taken from model deployment\n", "* **Class** — the implementation class that is used by the endpoint\n", - "* **Model** — user defined name for the model\n", "* **Labels** — user configurable tags that are searchable\n", "* **Uptime** — first request for production data\n", "* **Last Prediction** — most recent request for production data\n", + "* **Average Latency** — Average latency time of serving a single event in the last hour\n", "* **Error Count** — includes prediction process errors such as operational issues (For example, a function in a failed state), as well as data processing errors\n", "(For example, invalid timestamps, request ids, type mismatches etc.)\n", "* **Drift** — indication of drift status (no drift (green), possible drift (yellow), drift detected (red))\n", - "* **Accuracy** — a numeric value representing the accuracy of model predictions (N/A)\n", - "\n", - "```{note}\n", - "Model Accuracy is currently under development.\n", - "```\n", "\n", "### Model endpoint overview\n", "The Model Endpoints overview pane displays general information about the selected model.\n", @@ -103,28 +99,26 @@ "* **Model Class** — the implementation class that is used by the endpoint\n", "* **Model Artifact** — reference to the model's file location\n", "* **Function URI** — the MLRun function to access the model\n", + "* **Function Tag** — the MLRun function tag\n", + "* **Feature set** — the monitoring feature set that points to the monitoring parquet directory\n", "* **Last Prediction** — most recent request for production data\n", "* **Error Count** — includes prediction process errors such as operational issues (For example, a function in a failed state), as well as data processing errors\n", "(For example, invalid timestamps, request ids, type mismatches etc.)\n", "* **Accuracy** — a numeric value representing the accuracy of model predictions (N/A)\n", "* **Stream path** — the input and output stream of the selected model\n", + "* **Mean TVD** — the mean value of the [Total Variance Distance](../monitoring/model-monitoring-deployment.html#:~:text=Total%20Variation%20Distance%20(TVD)%20%E2%80%94%20The%20statistical%20difference%20between%20the%20actual%20predictions%20and%20the%20model%E2%80%99s%20trained%20predictions) of the model features and labels\n", + "* **Mean Hellinger** — the mean value of the [Hellinger Distance](../monitoring/model-monitoring-deployment.html#:~:text=Hellinger%20Distance%20%E2%80%94%20A%20type%20of%20f%2Ddivergence%20that%20quantifies%20the%20similarity%20between%20the%20actual%20predictions%2C%20and%20the%20model%E2%80%99s%20trained%20predictions) of the model features and labels\n", + "* **Mean KLD** — the mean value of the [KL Divergence](../monitoring/model-monitoring-deployment.html#:~:text=Kullback%E2%80%93Leibler%20Divergence%20(KLD)%20%E2%80%94%20The%20measure%20of%20how%20the%20probability%20distribution%20of%20actual%20predictions%20is%20different%20from%20the%20second%20model%E2%80%99s%20trained%20reference%20probability%20distribution) of the model features and labels\n", + "* **Drift Actual Value** — the resulted drift value of the latest drift analysis calculation. \n", + "* **Drift Detected Threshold** — pre-defined value to determine a drift \n", + "* **Possible Drift Threshold** — pre-defined value to determine a possible drift\n", "\n", - "Use the ellipsis to view the YAML resource file for details about the monitored resource.\n", - "\n", - "### Model drift analysis\n", - "The Drift Analysis pane provides performance statistics for the currently selected model.\n", - "\n", - "![Model Endpoints Drift Analysis](../_static/images/IG_model_endpoints_drift_analysis.png)\n", - "\n", - "Each of the following fields has both sum and mean numbers displayed. For definitions of the terms see [Common Terminology](#common-terminology).\n", - "* **TVD**\n", - "* **Hellinger**\n", - "* **KLD**\n", - "\n", - "Use the ellipsis to view the YAML resource file for details about the monitored resource.\n", + "```{note}\n", + "Click on **Resource monitoring** to get the relevant [Grafana Details Dashboard](#model-monitoring-details-dashboard) that displays real-time performance data of the selected model in detail. In addition, use the ellipsis to view the YAML resource file for details about the monitored resource.\n", + "```\n", "\n", "### Model features analysis\n", - "The Features Analysis pane provides details of the drift analysis in a table format with each feature in the selected model on its own line.\n", + "The Features Analysis pane provides details of the drift analysis in a table format with each feature and label in the selected model on its own line.\n", "\n", "![Model Endpoints Features Analysis](../_static/images/IG_model_endpoints_features_analysis.png)\n", "\n", @@ -139,8 +133,6 @@ "* **KLD**\n", "* **Histograms**—the approximate representation of the distribution of the data. Hover over the bars in the graph for the details.\n", "\n", - "Use the ellipsis to view the YAML resource file for details about the monitored resource.\n", - "\n", "## Model monitoring using Grafana dashboards\n", "You can deploy a Grafana service in your Iguazio instance and use Grafana Dashboards to view model monitoring details.\n", "There are three dashboards available:\n", From c4d62bf45168ab7c290b6dc2ddd3f9ea3adfb676 Mon Sep 17 00:00:00 2001 From: Gal Topper Date: Mon, 12 Feb 2024 16:51:56 +0800 Subject: [PATCH 023/119] [Kafka] Default `max_workers` to 1 instead of 4 [1.6.x] (#5114) --- mlrun/datastore/sources.py | 6 +++++- mlrun/runtimes/serving.py | 8 -------- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/mlrun/datastore/sources.py b/mlrun/datastore/sources.py index df9e817f13e..a310fb1b5c1 100644 --- a/mlrun/datastore/sources.py +++ b/mlrun/datastore/sources.py @@ -992,6 +992,10 @@ def add_nuclio_trigger(self, function): "workerAllocationMode", "static" ) + trigger_kwargs = {} + if "max_workers" in extra_attributes: + trigger_kwargs = {"max_workers": extra_attributes.pop("max_workers")} + trigger = KafkaTrigger( brokers=extra_attributes.pop("brokers"), topics=extra_attributes.pop("topics"), @@ -1000,7 +1004,7 @@ def add_nuclio_trigger(self, function): initial_offset=extra_attributes.pop("initial_offset"), explicit_ack_mode=explicit_ack_mode, extra_attributes=extra_attributes, - max_workers=extra_attributes.pop("max_workers", 4), + **trigger_kwargs, ) function = function.add_trigger("kafka", trigger) diff --git a/mlrun/runtimes/serving.py b/mlrun/runtimes/serving.py index bab0d509436..8bdd09f46d2 100644 --- a/mlrun/runtimes/serving.py +++ b/mlrun/runtimes/serving.py @@ -488,7 +488,6 @@ def _add_ref_triggers(self): "workerAllocationMode", "static" ) - max_workers_default = 4 if ( stream.path.startswith("kafka://") or "kafka_bootstrap_servers" in stream.options @@ -497,9 +496,6 @@ def _add_ref_triggers(self): if brokers: brokers = brokers.split(",") topic, brokers = parse_kafka_url(stream.path, brokers) - trigger_args["max_workers"] = trigger_args.get( - "max_workers", max_workers_default - ) trigger = KafkaTrigger( brokers=brokers, topics=[topic], @@ -510,10 +506,6 @@ def _add_ref_triggers(self): else: # V3IO doesn't allow hyphens in object names group = group.replace("-", "_") - # Deal with unconventional parameter naming in V3IOStreamTrigger specifically - trigger_args["maxWorkers"] = trigger_args.get( - "maxWorkers", max_workers_default - ) child_function.function_object.add_v3io_stream_trigger( stream.path, group=group, shards=stream.shards, **trigger_args ) From bbf66be498de8b664e7f88b92f3666f73553c5e4 Mon Sep 17 00:00:00 2001 From: Gal Topper Date: Mon, 12 Feb 2024 16:55:24 +0800 Subject: [PATCH 024/119] [Datastore] Fix nuclio deprecation warnings [1.6.x] (#5113) --- mlrun/datastore/sources.py | 4 ++-- mlrun/runtimes/function.py | 11 +++++------ mlrun/runtimes/serving.py | 4 ++-- server/api/crud/model_monitoring/deployment.py | 2 +- 4 files changed, 10 insertions(+), 11 deletions(-) diff --git a/mlrun/datastore/sources.py b/mlrun/datastore/sources.py index a310fb1b5c1..9735c538f97 100644 --- a/mlrun/datastore/sources.py +++ b/mlrun/datastore/sources.py @@ -900,7 +900,7 @@ def add_nuclio_trigger(self, function): engine = function.spec.graph.engine if mlrun.mlconf.is_explicit_ack() and engine == "async": kwargs["explicit_ack_mode"] = "explicitOnly" - kwargs["workerAllocationMode"] = "static" + kwargs["worker_allocation_mode"] = "static" function.add_v3io_stream_trigger( self.path, @@ -989,7 +989,7 @@ def add_nuclio_trigger(self, function): if mlrun.mlconf.is_explicit_ack() and engine == "async": explicit_ack_mode = "explicitOnly" extra_attributes["workerAllocationMode"] = extra_attributes.get( - "workerAllocationMode", "static" + "worker_allocation_mode", "static" ) trigger_kwargs = {} diff --git a/mlrun/runtimes/function.py b/mlrun/runtimes/function.py index 523336c0228..7a7acfaf145 100644 --- a/mlrun/runtimes/function.py +++ b/mlrun/runtimes/function.py @@ -500,10 +500,9 @@ def add_v3io_stream_trigger( # verify v3io stream trigger name is valid mlrun.utils.helpers.validate_v3io_stream_consumer_group(group) - consumer_group = kwargs.pop("consumerGroup", None) - if consumer_group: + if "consumer_group" in kwargs: logger.warning( - "'consumerGroup' kwargs value is ignored. use group argument instead" + "'consumer_group' in kwargs will be ignored. Use group parameter instead." ) container, path = split_path(stream_path) @@ -517,11 +516,11 @@ def add_v3io_stream_trigger( name=name, container=container, path=path[1:], - consumerGroup=group, - seekTo=seek_to, + consumer_group=group, + seek_to=seek_to, webapi=endpoint or "http://v3io-webapi:8081", extra_attributes=extra_attributes, - readBatchSize=256, + read_batch_size=256, **kwargs, ), ) diff --git a/mlrun/runtimes/serving.py b/mlrun/runtimes/serving.py index 8bdd09f46d2..7190ea22d69 100644 --- a/mlrun/runtimes/serving.py +++ b/mlrun/runtimes/serving.py @@ -484,8 +484,8 @@ def _add_ref_triggers(self): ) extra_attributes = trigger_args.get("extra_attributes", {}) trigger_args["extra_attributes"] = extra_attributes - extra_attributes["workerAllocationMode"] = extra_attributes.get( - "workerAllocationMode", "static" + extra_attributes["worker_allocation_mode"] = extra_attributes.get( + "worker_allocation_mode", "static" ) if ( diff --git a/server/api/crud/model_monitoring/deployment.py b/server/api/crud/model_monitoring/deployment.py index 2d113014ee6..301c6561fb1 100644 --- a/server/api/crud/model_monitoring/deployment.py +++ b/server/api/crud/model_monitoring/deployment.py @@ -674,7 +674,7 @@ def _apply_stream_trigger( kwargs["access_key"] = model_monitoring_access_key if mlrun.mlconf.is_explicit_ack(): kwargs["explicit_ack_mode"] = "explicitOnly" - kwargs["workerAllocationMode"] = "static" + kwargs["worker_allocation_mode"] = "static" # Generate V3IO stream trigger function.add_v3io_stream_trigger( From afecd24bc95a9f7803869044c4d8ec90feec0051 Mon Sep 17 00:00:00 2001 From: Alon Maor <48641682+alonmr@users.noreply.github.com> Date: Mon, 12 Feb 2024 15:06:20 +0200 Subject: [PATCH 025/119] [API] Check background task state on project deletion wait [1.6.x] (#5104) --- server/api/api/endpoints/projects.py | 2 +- server/api/api/utils.py | 27 +++++- server/api/crud/projects.py | 19 +++++ server/api/utils/background_tasks/internal.py | 3 +- server/api/utils/projects/follower.py | 3 +- server/api/utils/projects/leader.py | 1 + server/api/utils/projects/member.py | 1 + tests/api/api/test_projects.py | 82 +++++++++++++++++-- tests/api/conftest.py | 28 ++++++- 9 files changed, 150 insertions(+), 16 deletions(-) diff --git a/server/api/api/endpoints/projects.py b/server/api/api/endpoints/projects.py index 54b4537c0ae..c0d9259c9bc 100644 --- a/server/api/api/endpoints/projects.py +++ b/server/api/api/endpoints/projects.py @@ -262,7 +262,7 @@ async def delete_project( return fastapi.Response(status_code=http.HTTPStatus.ACCEPTED.value) else: - # For iguzio < 3.5.5, the project deletion job is triggered while zebo does not wait for it to complete. + # For iguazio < 3.5.5, the project deletion job is triggered while iguazio does not wait for it to complete. # We wait for it here to make sure we respond with a proper status code. await run_in_threadpool( server.api.api.utils.verify_project_is_deleted, name, auth_info diff --git a/server/api/api/utils.py b/server/api/api/utils.py index 8e1757b7c15..f7de17f55df 100644 --- a/server/api/api/utils.py +++ b/server/api/api/utils.py @@ -18,6 +18,7 @@ import re import traceback import typing +import uuid from hashlib import sha1, sha224 from http import HTTPStatus from os import environ @@ -1125,15 +1126,18 @@ def get_or_create_project_deletion_background_task( background_task_kind=background_task_kind, ) + background_task_name = str(uuid.uuid4()) return server.api.utils.background_tasks.InternalBackgroundTasksHandler().create_background_task( background_task_kind, mlrun.mlconf.background_tasks.default_timeouts.operations.delete_project, _delete_project, + background_task_name, db_session=db_session, project_name=project_name, deletion_strategy=deletion_strategy, auth_info=auth_info, wait_for_project_deletion=wait_for_project_deletion, + background_task_name=background_task_name, ) @@ -1143,6 +1147,7 @@ async def _delete_project( deletion_strategy: mlrun.common.schemas.DeletionStrategy, auth_info: mlrun.common.schemas.AuthInfo, wait_for_project_deletion: bool, + background_task_name: str, ): force_deleted = False try: @@ -1154,6 +1159,7 @@ async def _delete_project( auth_info.projects_role, auth_info, wait_for_completion=True, + background_task_name=background_task_name, ) except mlrun.errors.MLRunNotFoundError as exc: if not server.api.utils.helpers.is_request_from_leader(auth_info.projects_role): @@ -1188,12 +1194,31 @@ async def _delete_project( def verify_project_is_deleted(project_name, auth_info): def _verify_project_is_deleted(): try: - server.api.db.session.run_function_with_new_db_session( + project = server.api.db.session.run_function_with_new_db_session( get_project_member().get_project, project_name, auth_info.session ) except mlrun.errors.MLRunNotFoundError: return else: + project_status = project.status.dict() + if background_task_name := project_status.get( + "deletion_background_task_name" + ): + bg_task = server.api.utils.background_tasks.InternalBackgroundTasksHandler().get_background_task( + name=background_task_name, raise_on_not_found=False + ) + if ( + bg_task + and bg_task.status.state + == mlrun.common.schemas.BackgroundTaskState.failed + ): + # Background task failed, stop retrying + raise mlrun.errors.MLRunFatalFailureError( + original_exception=mlrun.errors.MLRunInternalServerError( + f"Failed to delete project {project_name}: {bg_task.status.error}" + ) + ) + raise mlrun.errors.MLRunInternalServerError( f"Project {project_name} was not deleted" ) diff --git a/server/api/crud/projects.py b/server/api/crud/projects.py index 36a4f708a36..451378c4caf 100644 --- a/server/api/crud/projects.py +++ b/server/api/crud/projects.py @@ -100,8 +100,12 @@ def delete_project( name: str, deletion_strategy: mlrun.common.schemas.DeletionStrategy = mlrun.common.schemas.DeletionStrategy.default(), auth_info: mlrun.common.schemas.AuthInfo = mlrun.common.schemas.AuthInfo(), + background_task_name: str = None, ): logger.debug("Deleting project", name=name, deletion_strategy=deletion_strategy) + self._enrich_project_with_deletion_background_task_name( + session, name, background_task_name + ) if ( deletion_strategy.is_restricted() or deletion_strategy == mlrun.common.schemas.DeletionStrategy.check @@ -472,3 +476,18 @@ def _verify_no_project_function_pods(): False, _verify_no_project_function_pods, ) + + @staticmethod + def _enrich_project_with_deletion_background_task_name( + session: sqlalchemy.orm.Session, name: str, background_task_name: str + ): + if not background_task_name: + return + + project_patch = { + "status": {"deletion_background_task_name": background_task_name} + } + + server.api.utils.singletons.db.get_db().patch_project( + session, name, project_patch + ) diff --git a/server/api/utils/background_tasks/internal.py b/server/api/utils/background_tasks/internal.py index 32724aedfc8..2ff8d10a56d 100644 --- a/server/api/utils/background_tasks/internal.py +++ b/server/api/utils/background_tasks/internal.py @@ -48,10 +48,11 @@ def create_background_task( kind: str, timeout: typing.Optional[int], # in seconds function, + name: typing.Optional[str] = None, *args, **kwargs, ) -> typing.Tuple[typing.Callable, str]: - name = str(uuid.uuid4()) + name = name or str(uuid.uuid4()) # sanity if name in self._internal_background_tasks: raise RuntimeError("Background task name already exists") diff --git a/server/api/utils/projects/follower.py b/server/api/utils/projects/follower.py index bfa210ac23f..8b124fb799e 100644 --- a/server/api/utils/projects/follower.py +++ b/server/api/utils/projects/follower.py @@ -203,12 +203,13 @@ def delete_project( projects_role: typing.Optional[mlrun.common.schemas.ProjectsRole] = None, auth_info: mlrun.common.schemas.AuthInfo = mlrun.common.schemas.AuthInfo(), wait_for_completion: bool = True, + background_task_name: str = None, ) -> bool: if server.api.utils.helpers.is_request_from_leader( projects_role, leader_name=self._leader_name ): server.api.crud.Projects().delete_project( - db_session, name, deletion_strategy, auth_info + db_session, name, deletion_strategy, auth_info, background_task_name ) else: return self._leader_client.delete_project( diff --git a/server/api/utils/projects/leader.py b/server/api/utils/projects/leader.py index e2b232e8e51..e32205745ca 100644 --- a/server/api/utils/projects/leader.py +++ b/server/api/utils/projects/leader.py @@ -110,6 +110,7 @@ def delete_project( projects_role: typing.Optional[mlrun.common.schemas.ProjectsRole] = None, auth_info: mlrun.common.schemas.AuthInfo = mlrun.common.schemas.AuthInfo(), wait_for_completion: bool = True, + background_task_name: str = None, ) -> bool: self._projects_in_deletion.add(name) try: diff --git a/server/api/utils/projects/member.py b/server/api/utils/projects/member.py index 11c3c45e379..edfa5be6561 100644 --- a/server/api/utils/projects/member.py +++ b/server/api/utils/projects/member.py @@ -93,6 +93,7 @@ def delete_project( projects_role: typing.Optional[mlrun.common.schemas.ProjectsRole] = None, auth_info: mlrun.common.schemas.AuthInfo = mlrun.common.schemas.AuthInfo(), wait_for_completion: bool = True, + background_task_name: str = None, ) -> bool: pass diff --git a/tests/api/api/test_projects.py b/tests/api/api/test_projects.py index 7403af8e011..071f0ba8cca 100644 --- a/tests/api/api/test_projects.py +++ b/tests/api/api/test_projects.py @@ -38,6 +38,7 @@ import server.api.crud import server.api.main import server.api.utils.auth.verifier +import server.api.utils.background_tasks import server.api.utils.clients.log_collector import server.api.utils.singletons.db import server.api.utils.singletons.project_member @@ -958,7 +959,7 @@ def test_project_with_parameters( ) def test_delete_project_not_found_in_leader( unversioned_client: TestClient, - mock_project_leader_iguazio_client, + mock_project_follower_iguazio_client, delete_api_version: str, ) -> None: project = mlrun.common.schemas.Project( @@ -970,15 +971,80 @@ def test_delete_project_not_found_in_leader( assert response.status_code == HTTPStatus.CREATED.value _assert_project_response(project, response) - response = unversioned_client.delete( - f"{delete_api_version}/projects/{project.metadata.name}", - ) - assert response.status_code == HTTPStatus.ACCEPTED.value + with unittest.mock.patch.object( + mock_project_follower_iguazio_client, + "delete_project", + side_effect=mlrun.errors.MLRunNotFoundError("Project not found"), + ): + response = unversioned_client.delete( + f"{delete_api_version}/projects/{project.metadata.name}", + ) + assert response.status_code == HTTPStatus.ACCEPTED.value - response = unversioned_client.get( - f"v1/projects/{project.metadata.name}", + response = unversioned_client.get( + f"v1/projects/{project.metadata.name}", + ) + assert response.status_code == HTTPStatus.NOT_FOUND.value + + +# Test should not run more than a few seconds because we test that if the background task fails, +# the wrapper task fails fast +@pytest.mark.timeout(10) +@pytest.mark.parametrize( + "delete_api_version", + [ + "v1", + "v2", + ], +) +def test_delete_project_fail_fast( + unversioned_client: TestClient, + mock_project_follower_iguazio_client, + delete_api_version: str, +) -> None: + # Set the igz version for the project leader mock + # We only test igz version < 3.5.5 flow because from 3.5.5 iguazio waits for the inner background task to + # finish so the wrapper task does not wait for the inner task + mlrun.mlconf.igz_version = "3.5.4" + project = mlrun.common.schemas.Project( + metadata=mlrun.common.schemas.ProjectMetadata(name="project-name"), + spec=mlrun.common.schemas.ProjectSpec(), ) - assert response.status_code == HTTPStatus.NOT_FOUND.value + + response = unversioned_client.post("v1/projects", json=project.dict()) + assert response.status_code == HTTPStatus.CREATED.value + _assert_project_response(project, response) + + with unittest.mock.patch( + "server.api.crud.projects.Projects.delete_project_resources", + side_effect=Exception("some error"), + ): + response = unversioned_client.delete( + f"{delete_api_version}/projects/{project.metadata.name}", + headers={ + mlrun.common.schemas.HeaderNames.deletion_strategy: mlrun.common.schemas.DeletionStrategy.cascading, + }, + ) + if delete_api_version == "v1": + assert response.status_code == HTTPStatus.INTERNAL_SERVER_ERROR.value + assert ( + "Failed to delete project project-name: some error" + in response.json()["detail"] + ) + else: + assert response.status_code == HTTPStatus.ACCEPTED.value + background_task = mlrun.common.schemas.BackgroundTask(**response.json()) + background_task = server.api.utils.background_tasks.InternalBackgroundTasksHandler().get_background_task( + background_task.metadata.name + ) + assert ( + background_task.status.state + == mlrun.common.schemas.BackgroundTaskState.failed + ) + assert ( + "Failed to delete project project-name: some error" + in background_task.status.error + ) def _create_resources_of_all_kinds( diff --git a/tests/api/conftest.py b/tests/api/conftest.py index 1cadbf8da58..de390e8cd58 100644 --- a/tests/api/conftest.py +++ b/tests/api/conftest.py @@ -22,6 +22,7 @@ import httpx import kfp import pytest +import semver import sqlalchemy.orm from fastapi.testclient import TestClient @@ -347,6 +348,7 @@ class MockedProjectFollowerIguazioClient( ): def __init__(self): self._db_session = None + self._unversioned_client = None def create_project( self, @@ -372,7 +374,21 @@ def delete_project( deletion_strategy: mlrun.common.schemas.DeletionStrategy = mlrun.common.schemas.DeletionStrategy.default(), wait_for_completion: bool = True, ) -> bool: - raise mlrun.errors.MLRunNotFoundError("Project not found") + api_version = "v2" + igz_version = mlrun.mlconf.get_parsed_igz_version() + if igz_version and igz_version < semver.VersionInfo.parse("3.5.5"): + api_version = "v1" + + self._unversioned_client.delete( + f"{api_version}/projects/{name}", + headers={ + mlrun.common.schemas.HeaderNames.projects_role: mlrun.mlconf.httpdb.projects.leader, + mlrun.common.schemas.HeaderNames.deletion_strategy: deletion_strategy, + }, + ) + + # Mock waiting for completion in iguazio (return False to indicate 'not running in background') + return False def list_projects( self, @@ -404,7 +420,9 @@ def get_project_owner( @pytest.fixture() -def mock_project_leader_iguazio_client(db: sqlalchemy.orm.Session): +def mock_project_follower_iguazio_client( + db: sqlalchemy.orm.Session, unversioned_client: TestClient +): """ This fixture mocks the project leader iguazio client. """ @@ -413,8 +431,10 @@ def mock_project_leader_iguazio_client(db: sqlalchemy.orm.Session): old_iguazio_client = server.api.utils.clients.iguazio.Client server.api.utils.clients.iguazio.Client = MockedProjectFollowerIguazioClient server.api.utils.singletons.project_member.initialize_project_member() - MockedProjectFollowerIguazioClient()._db_session = db + iguazio_client = MockedProjectFollowerIguazioClient() + iguazio_client._db_session = db + iguazio_client._unversioned_client = unversioned_client - yield + yield iguazio_client server.api.utils.clients.iguazio.Client = old_iguazio_client From b88fc3cf3c595647a296934ba8077216eef164ad Mon Sep 17 00:00:00 2001 From: Gal Topper Date: Mon, 12 Feb 2024 21:26:52 +0800 Subject: [PATCH 026/119] [Model Monitoring] Fix server-side nuclio version resolution in explicit ack switch [1.6.x] (#5117) --- mlrun/config.py | 9 +++++---- server/api/crud/model_monitoring/deployment.py | 3 ++- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/mlrun/config.py b/mlrun/config.py index 03a3ba0dca4..9b1b6e936c2 100644 --- a/mlrun/config.py +++ b/mlrun/config.py @@ -1148,11 +1148,12 @@ def get_s3_storage_options(self) -> typing.Dict[str, typing.Any]: return storage_options - def is_explicit_ack(self) -> bool: + def is_explicit_ack(self, version=None) -> bool: + if not version: + version = self.nuclio_version return self.httpdb.nuclio.explicit_ack == "enabled" and ( - not self.nuclio_version - or semver.VersionInfo.parse(self.nuclio_version) - >= semver.VersionInfo.parse("1.12.10") + not version + or semver.VersionInfo.parse(version) >= semver.VersionInfo.parse("1.12.10") ) diff --git a/server/api/crud/model_monitoring/deployment.py b/server/api/crud/model_monitoring/deployment.py index 301c6561fb1..259edf3d72e 100644 --- a/server/api/crud/model_monitoring/deployment.py +++ b/server/api/crud/model_monitoring/deployment.py @@ -32,6 +32,7 @@ from mlrun.utils import logger from server.api.api import deps from server.api.crud.model_monitoring.helpers import Seconds, seconds2minutes +from server.api.utils.runtimes.nuclio import resolve_nuclio_version _MODEL_MONITORING_COMMON_PATH = ( pathlib.Path(__file__).parents[4] / "mlrun" / "model_monitoring" @@ -672,7 +673,7 @@ def _apply_stream_trigger( kwargs = {} if function_name != mm_constants.MonitoringFunctionNames.STREAM: kwargs["access_key"] = model_monitoring_access_key - if mlrun.mlconf.is_explicit_ack(): + if mlrun.mlconf.is_explicit_ack(version=resolve_nuclio_version()): kwargs["explicit_ack_mode"] = "explicitOnly" kwargs["worker_allocation_mode"] = "static" From 96b737bb0600ec143c52833e57853e365639617f Mon Sep 17 00:00:00 2001 From: alxtkr77 <3098237+alxtkr77@users.noreply.github.com> Date: Mon, 12 Feb 2024 18:08:28 +0200 Subject: [PATCH 027/119] [Documentation] Fix doc strings for several feature set APIs [1.6.x] (#5122) --- mlrun/feature_store/api.py | 146 ++++++++++++++--------------- mlrun/feature_store/feature_set.py | 89 ++++++++++++++++++ 2 files changed, 162 insertions(+), 73 deletions(-) diff --git a/mlrun/feature_store/api.py b/mlrun/feature_store/api.py index 88c268db94b..8b474777480 100644 --- a/mlrun/feature_store/api.py +++ b/mlrun/feature_store/api.py @@ -439,40 +439,6 @@ def ingest( mlrun_context=None, spark_context=None, overwrite=None, -) -> Optional[pd.DataFrame]: - if mlrun_context is None: - deprecated( - version="1.6.0", - reason="Calling 'ingest' with mlrun_context=None is deprecated and will be removed in 1.8.0,\ - use 'FeatureSet.ingest()' instead", - category=FutureWarning, - ) - - return _ingest( - featureset, - source, - targets, - namespace, - return_df, - infer_options, - run_config, - mlrun_context, - spark_context, - overwrite, - ) - - -def _ingest( - featureset: Union[FeatureSet, str] = None, - source=None, - targets: List[DataTargetBase] = None, - namespace=None, - return_df: bool = True, - infer_options: InferOptions = InferOptions.default(), - run_config: RunConfig = None, - mlrun_context=None, - spark_context=None, - overwrite=None, ) -> Optional[pd.DataFrame]: """Read local DataFrame, file, URL, or source into the feature store Ingest reads from the source, run the graph transformations, infers metadata and stats @@ -520,6 +486,40 @@ def _ingest( False for scheduled ingest - does not delete the target) :return: if return_df is True, a dataframe will be returned based on the graph """ + if mlrun_context is None: + deprecated( + version="1.6.0", + reason="Calling 'ingest' with mlrun_context=None is deprecated and will be removed in 1.8.0,\ + use 'FeatureSet.ingest()' instead", + category=FutureWarning, + ) + + return _ingest( + featureset, + source, + targets, + namespace, + return_df, + infer_options, + run_config, + mlrun_context, + spark_context, + overwrite, + ) + + +def _ingest( + featureset: Union[FeatureSet, str] = None, + source=None, + targets: List[DataTargetBase] = None, + namespace=None, + return_df: bool = True, + infer_options: InferOptions = InferOptions.default(), + run_config: RunConfig = None, + mlrun_context=None, + spark_context=None, + overwrite=None, +) -> Optional[pd.DataFrame]: if isinstance(source, pd.DataFrame): source = _rename_source_dataframe_columns(source) @@ -769,26 +769,6 @@ def preview( options: InferOptions = None, verbose: bool = False, sample_size: int = None, -) -> pd.DataFrame: - return _preview( - featureset, - source, - entity_columns, - namespace, - options, - verbose, - sample_size, - ) - - -def _preview( - featureset: FeatureSet, - source, - entity_columns: list = None, - namespace=None, - options: InferOptions = None, - verbose: bool = False, - sample_size: int = None, ) -> pd.DataFrame: """run the ingestion pipeline with local DataFrame/file data and infer features schema and stats @@ -812,6 +792,26 @@ def _preview( :param verbose: verbose log :param sample_size: num of rows to sample from the dataset (for large datasets) """ + return _preview( + featureset, + source, + entity_columns, + namespace, + options, + verbose, + sample_size, + ) + + +def _preview( + featureset: FeatureSet, + source, + entity_columns: list = None, + namespace=None, + options: InferOptions = None, + verbose: bool = False, + sample_size: int = None, +) -> pd.DataFrame: if isinstance(source, pd.DataFrame): source = _rename_source_dataframe_columns(source) @@ -905,24 +905,6 @@ def deploy_ingestion_service_v2( name: str = None, run_config: RunConfig = None, verbose=False, -) -> typing.Tuple[str, BaseRuntime]: - return _deploy_ingestion_service_v2( - featureset, - source, - targets, - name, - run_config, - verbose, - ) - - -def _deploy_ingestion_service_v2( - featureset: Union[FeatureSet, str], - source: DataSource = None, - targets: List[DataTargetBase] = None, - name: str = None, - run_config: RunConfig = None, - verbose=False, ) -> typing.Tuple[str, BaseRuntime]: """Start real-time ingestion service using nuclio function @@ -937,7 +919,7 @@ def _deploy_ingestion_service_v2( source = HTTPSource() func = mlrun.code_to_function("ingest", kind="serving").apply(mount_v3io()) config = RunConfig(function=func) - my_set.deploy_ingestion_service(source, run_config=config) + deploy_ingestion_service_v2(my_set, source, run_config=config) :param featureset: feature set object or uri :param source: data source object describing the online or offline source @@ -949,6 +931,24 @@ def _deploy_ingestion_service_v2( :return: URL to access the deployed ingestion service, and the function that was deployed (which will differ from the function passed in via the run_config parameter). """ + return _deploy_ingestion_service_v2( + featureset, + source, + targets, + name, + run_config, + verbose, + ) + + +def _deploy_ingestion_service_v2( + featureset: Union[FeatureSet, str], + source: DataSource = None, + targets: List[DataTargetBase] = None, + name: str = None, + run_config: RunConfig = None, + verbose=False, +) -> typing.Tuple[str, BaseRuntime]: if isinstance(featureset, str): featureset = get_feature_set_by_uri(featureset) diff --git a/mlrun/feature_store/feature_set.py b/mlrun/feature_store/feature_set.py index 580e2b3000b..0e4b5e65092 100644 --- a/mlrun/feature_store/feature_set.py +++ b/mlrun/feature_store/feature_set.py @@ -992,6 +992,50 @@ def ingest( spark_context=None, overwrite=None, ) -> Optional[pd.DataFrame]: + """Read local DataFrame, file, URL, or source into the feature store + Ingest reads from the source, run the graph transformations, infers metadata and stats + and writes the results to the default of specified targets + + when targets are not specified data is stored in the configured default targets + (will usually be NoSQL for real-time and Parquet for offline). + + the `run_config` parameter allow specifying the function and job configuration, + see: :py:class:`~mlrun.feature_store.RunConfig` + + example:: + + stocks_set = FeatureSet("stocks", entities=[Entity("ticker")]) + stocks = pd.read_csv("stocks.csv") + df = stocks_set.ingest(stocks, infer_options=fstore.InferOptions.default()) + + # for running as remote job + config = RunConfig(image='mlrun/mlrun') + df = ingest(stocks_set, stocks, run_config=config) + + # specify source and targets + source = CSVSource("mycsv", path="measurements.csv") + targets = [CSVTarget("mycsv", path="./mycsv.csv")] + ingest(measurements, source, targets) + + :param source: source dataframe or other sources (e.g. parquet source see: + :py:class:`~mlrun.datastore.ParquetSource` and other classes in mlrun.datastore with suffix + Source) + :param targets: optional list of data target objects + :param namespace: namespace or module containing graph classes + :param return_df: indicate if to return a dataframe with the graph results + :param infer_options: schema (for discovery of entities, features in featureset), index, stats, + histogram and preview infer options (:py:class:`~mlrun.feature_store.InferOptions`) + :param run_config: function and/or run configuration for remote jobs, + see :py:class:`~mlrun.feature_store.RunConfig` + :param mlrun_context: mlrun context (when running as a job), for internal use ! + :param spark_context: local spark session for spark ingestion, example for creating the spark context: + `spark = SparkSession.builder.appName("Spark function").getOrCreate()` + For remote spark ingestion, this should contain the remote spark service name + :param overwrite: delete the targets' data prior to ingestion + (default: True for non scheduled ingest - deletes the targets that are about to be ingested. + False for scheduled ingest - does not delete the target) + :return: if return_df is True, a dataframe will be returned based on the graph + """ return mlrun.feature_store.api._ingest( self, source, @@ -1014,6 +1058,26 @@ def preview( verbose: bool = False, sample_size: int = None, ) -> pd.DataFrame: + """run the ingestion pipeline with local DataFrame/file data and infer features schema and stats + + example:: + + quotes_set = FeatureSet("stock-quotes", entities=[Entity("ticker")]) + quotes_set.add_aggregation("ask", ["sum", "max"], ["1h", "5h"], "10m") + quotes_set.add_aggregation("bid", ["min", "max"], ["1h"], "10m") + df = quotes_set.preview( + quotes_df, + entity_columns=["ticker"], + ) + + :param source: source dataframe or csv/parquet file path + :param entity_columns: list of entity (index) column names + :param namespace: namespace or module containing graph classes + :param options: schema (for discovery of entities, features in featureset), index, stats, + histogram and preview infer options (:py:class:`~mlrun.feature_store.InferOptions`) + :param verbose: verbose log + :param sample_size: num of rows to sample from the dataset (for large datasets) + """ return mlrun.feature_store.api._preview( self, source, entity_columns, namespace, options, verbose, sample_size ) @@ -1026,6 +1090,31 @@ def deploy_ingestion_service( run_config: RunConfig = None, verbose=False, ) -> Tuple[str, BaseRuntime]: + """Start real-time ingestion service using nuclio function + + Deploy a real-time function implementing feature ingestion pipeline + the source maps to Nuclio event triggers (http, kafka, v3io stream, etc.) + + the `run_config` parameter allow specifying the function and job configuration, + see: :py:class:`~mlrun.feature_store.RunConfig` + + example:: + + source = HTTPSource() + func = mlrun.code_to_function("ingest", kind="serving").apply(mount_v3io()) + config = RunConfig(function=func) + my_set.deploy_ingestion_service(source, run_config=config) + + :param source: data source object describing the online or offline source + :param targets: list of data target objects + :param name: name for the job/function + :param run_config: service runtime configuration (function object/uri, resources, etc..) + :param verbose: verbose log + + :return: URL to access the deployed ingestion service, and the function that was deployed (which will + differ from the function passed in via the run_config parameter). + """ + return mlrun.feature_store.api._deploy_ingestion_service_v2( self, source, targets, name, run_config, verbose ) From 30ccc582957285929bf5ebf4e8f0f8bd817cf453 Mon Sep 17 00:00:00 2001 From: tomer-mamia <125267619+tomerm-iguazio@users.noreply.github.com> Date: Mon, 12 Feb 2024 18:09:58 +0200 Subject: [PATCH 028/119] [Artifacts] Fix import artifact bug [1.6.x] (#5119) --- mlrun/artifacts/manager.py | 3 +++ tests/system/api/test_artifacts.py | 19 +++++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/mlrun/artifacts/manager.py b/mlrun/artifacts/manager.py index 7b6f4dca0f5..78486f2d6c4 100644 --- a/mlrun/artifacts/manager.py +++ b/mlrun/artifacts/manager.py @@ -132,6 +132,9 @@ def ensure_artifact_source_file_exists(item, path, body): # ModelArtifact is a directory. if isinstance(item, ModelArtifact): return + # Could happen in the import artifact scenario - that path is None. + if item.target_path: + return # in DatasetArtifact if hasattr(item, "df") and item.df is not None: return diff --git a/tests/system/api/test_artifacts.py b/tests/system/api/test_artifacts.py index eeda9565ff3..ebd30adda60 100644 --- a/tests/system/api/test_artifacts.py +++ b/tests/system/api/test_artifacts.py @@ -14,6 +14,8 @@ # import pathlib +import tempfile +import uuid import pytest @@ -59,3 +61,20 @@ def test_fail_overflowing_artifact(self): assert ( "Failed committing changes to DB" in run["status"]["error"] ), "run should fail with a reason" + + @pytest.mark.enterprise + def test_import_artifact(self): + temp_dir = tempfile.mkdtemp() + key = f"artifact_key_{uuid.uuid4()}" + body = "my test artifact" + artifact = self.project.log_artifact( + key, body=body, local_path=f"{temp_dir}/test_artifact.txt" + ) + with tempfile.NamedTemporaryFile( + mode="w+", suffix=".yaml", delete=True + ) as temp_file: + artifact.export(temp_file.name) + artifact = self.project.import_artifact( + temp_file.name, new_key=f"imported_artifact_key_{uuid.uuid4()}" + ) + assert artifact.to_dataitem().get().decode() == body From bfa39ee925d2be82dae5dfd6c5240076138d9326 Mon Sep 17 00:00:00 2001 From: Liran BG Date: Mon, 12 Feb 2024 20:40:07 +0200 Subject: [PATCH 029/119] [Project] Fix Project Deletion Failure due to Project Sync Race Condition [1.6.x] (#5123) --- server/api/crud/projects.py | 1 + server/api/utils/projects/follower.py | 70 ++++++++++++------- .../utils/projects/test_follower_member.py | 12 ++++ 3 files changed, 59 insertions(+), 24 deletions(-) diff --git a/server/api/crud/projects.py b/server/api/crud/projects.py index 451378c4caf..62e9e8430b3 100644 --- a/server/api/crud/projects.py +++ b/server/api/crud/projects.py @@ -26,6 +26,7 @@ import mlrun.utils.singleton import server.api.crud import server.api.db.session +import server.api.utils.background_tasks import server.api.utils.clients.nuclio import server.api.utils.events.events_factory as events_factory import server.api.utils.projects.remotes.follower as project_follower diff --git a/server/api/utils/projects/follower.py b/server/api/utils/projects/follower.py index 8b124fb799e..d4f30353247 100644 --- a/server/api/utils/projects/follower.py +++ b/server/api/utils/projects/follower.py @@ -31,6 +31,7 @@ import server.api.crud import server.api.db.session import server.api.utils.auth.verifier +import server.api.utils.background_tasks import server.api.utils.clients.iguazio import server.api.utils.helpers import server.api.utils.periodic @@ -47,6 +48,10 @@ class Member( ): def initialize(self): logger.info("Initializing projects follower") + self._is_chief = ( + mlrun.mlconf.httpdb.clusterization.role + == mlrun.common.schemas.ClusterizationRole.chief + ) self._leader_name = mlrun.mlconf.httpdb.projects.leader self._sync_session = None self._leader_client: server.api.utils.projects.remotes.leader.Member @@ -66,32 +71,24 @@ def initialize(self): ) self._synced_until_datetime = None # run one sync to start off on the right foot and fill out the cache but don't fail initialization on it - try: - # Basically the delete operation in our projects mechanism is fully consistent, meaning the leader won't - # remove the project from its persistency (the source of truth) until it was successfully removed from all - # followers. Therefore, when syncing projects from the leader, we don't need to search for the deletions - # that may happen without us knowing about it (therefore full_sync by default is false). When we - # introduced the chief/worker mechanism, we needed to change the follower to keep its projects in the DB - # instead of in cache. On the switch, since we were using cache and the projects table in the DB was not - # maintained, we know we may have projects that shouldn't be there anymore, ideally we would have trigger - # the full sync only once on the switch, but since we don't have a good heuristic to identify the switch - # we're doing a full_sync on every initialization - full_sync = ( - mlrun.mlconf.httpdb.clusterization.role - == mlrun.common.schemas.ClusterizationRole.chief - ) - self._sync_projects(full_sync=full_sync) - except Exception as exc: - logger.warning( - "Initial projects sync failed", - exc=err_to_str(exc), - traceback=traceback.format_exc(), - ) - self._start_periodic_sync() + if self._is_chief: + try: + # full_sync=True was a temporary measure to handle the move of mlrun from single instance to + # chief-worker model. + # TODO: remove full_sync=True in 1.7.0 if no issues arise + self._sync_projects(full_sync=True) + except Exception as exc: + logger.warning( + "Initial projects sync failed", + exc=err_to_str(exc), + traceback=traceback.format_exc(), + ) + self._start_periodic_sync() def shutdown(self): logger.info("Shutting down projects leader") - self._stop_periodic_sync() + if self._is_chief: + self._stop_periodic_sync() def create_project( self, @@ -290,6 +287,7 @@ async def get_project_summary( ) -> mlrun.common.schemas.ProjectSummary: return await server.api.crud.Projects().get_project_summary(db_session, name) + @server.api.utils.helpers.ensure_running_on_chief def _start_periodic_sync(self): # the > 0 condition is to allow ourselves to disable the sync from configuration if self._periodic_sync_interval_seconds > 0: @@ -304,9 +302,11 @@ def _start_periodic_sync(self): self._sync_projects, ) + @server.api.utils.helpers.ensure_running_on_chief def _stop_periodic_sync(self): server.api.utils.periodic.cancel_periodic_function(self._sync_projects.__name__) + @server.api.utils.helpers.ensure_running_on_chief def _sync_projects(self, full_sync=False): """ :param full_sync: when set to true, in addition to syncing project creation/updates from the leader, we will @@ -345,13 +345,16 @@ def _list_projects_from_leader(self): def _store_projects_from_leader(self, db_session, db_projects, leader_projects): db_projects_names = [project.metadata.name for project in db_projects.projects] - # Don't add projects in non-terminal state if they didn't exist before to prevent race conditions + # Don't add projects in non-terminal state if they didn't exist before, or projects that are currently being + # deleted to prevent race conditions filtered_projects = [] for leader_project in leader_projects: if ( leader_project.status.state not in mlrun.common.schemas.ProjectState.terminal_states() and leader_project.metadata.name not in db_projects_names + ) or self._project_deletion_background_task_exists( + leader_project.metadata.name ): continue filtered_projects.append(leader_project) @@ -363,6 +366,25 @@ def _store_projects_from_leader(self, db_session, db_projects, leader_projects): db_session, project.metadata.name, project ) + @staticmethod + def _project_deletion_background_task_exists(project_name): + background_task_kinds = [ + task_format.format(project_name) + for task_format in [ + server.api.utils.background_tasks.BackgroundTaskKinds.project_deletion_wrapper, + server.api.utils.background_tasks.BackgroundTaskKinds.project_deletion, + ] + ] + return any( + [ + server.api.utils.background_tasks.InternalBackgroundTasksHandler().get_active_background_task_by_kind( + background_task_kind, + raise_on_not_found=False, + ) + for background_task_kind in background_task_kinds + ] + ) + def _archive_projects_missing_from_leader( self, db_session, db_projects, leader_projects ): diff --git a/tests/api/utils/projects/test_follower_member.py b/tests/api/utils/projects/test_follower_member.py index b4696cefbca..53d3769ac01 100644 --- a/tests/api/utils/projects/test_follower_member.py +++ b/tests/api/utils/projects/test_follower_member.py @@ -25,6 +25,7 @@ import mlrun.config import mlrun.errors import server.api.crud +import server.api.utils.background_tasks import server.api.utils.projects.follower import server.api.utils.projects.remotes.leader import server.api.utils.singletons.db @@ -60,6 +61,7 @@ def test_sync_projects( db: sqlalchemy.orm.Session, projects_follower: server.api.utils.projects.follower.Member, nop_leader: server.api.utils.projects.remotes.leader.Member, + monkeypatch, ): project_nothing_changed = _generate_project(name="project-nothing-changed") project_in_creation = _generate_project( @@ -88,6 +90,15 @@ def test_sync_projects( name="project-will-be-unarchived", state=mlrun.common.schemas.ProjectState.archived, ) + + # check race condition with background task where sync might want to create a project that is just deleted + project_just_deleted = _generate_project(name="project-just-deleted") + monkeypatch.setattr( + server.api.utils.background_tasks.InternalBackgroundTasksHandler, + "get_active_background_task_by_kind", + lambda _, kind, raise_on_not_found: project_just_deleted.metadata.name in kind, + ) + for _project in [ project_nothing_changed, project_in_creation, @@ -108,6 +119,7 @@ def test_sync_projects( project_offline, project_moved_to_deletion, project_will_be_unarchived, + project_just_deleted, ], None, ) From 1b13fa2c09514dd944667b7a8cd214e4b9912b33 Mon Sep 17 00:00:00 2001 From: Assaf Ben-Amitai Date: Tue, 13 Feb 2024 12:28:32 +0200 Subject: [PATCH 030/119] [Serving] update api call of get_online_service [1.6.x] (#5131) --- mlrun/serving/routers.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/mlrun/serving/routers.py b/mlrun/serving/routers.py index f5b47db3dd0..b7f5f5ff4cf 100644 --- a/mlrun/serving/routers.py +++ b/mlrun/serving/routers.py @@ -1162,8 +1162,7 @@ def __init__( def post_init(self, mode="sync"): super().post_init(mode) - self._feature_service = mlrun.feature_store.get_online_feature_service( - feature_vector=self.feature_vector_uri, + self._feature_service = self.feature_vector.get_online_feature_service( impute_policy=self.impute_policy, ) @@ -1305,8 +1304,7 @@ def __init__( def post_init(self, mode="sync"): super().post_init(mode) - self._feature_service = mlrun.feature_store.get_online_feature_service( - feature_vector=self.feature_vector_uri, + self._feature_service = self.feature_vector.get_online_feature_service( impute_policy=self.impute_policy, ) From 3cda08a3c54fad9cb0486a17abea5cee437ec222 Mon Sep 17 00:00:00 2001 From: daniels290813 <78727943+daniels290813@users.noreply.github.com> Date: Wed, 14 Feb 2024 10:30:20 +0200 Subject: [PATCH 031/119] [Docs] Bump sklearn and numpy in tutorials [1.6.x] (#5127) --- docs/tutorials/01-mlrun-basics.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/tutorials/01-mlrun-basics.ipynb b/docs/tutorials/01-mlrun-basics.ipynb index 9e1375eeea4..2f0eb2b5b13 100644 --- a/docs/tutorials/01-mlrun-basics.ipynb +++ b/docs/tutorials/01-mlrun-basics.ipynb @@ -67,7 +67,7 @@ "\n", "**Before you start, make sure the MLRun client package is installed and configured properly:**\n", "\n", - "This notebook uses sklearn. If it is not installed in your environment run `!pip install scikit-learn~=1.3`." + "This notebook uses sklearn and numpy. If it is not installed in your environment run `!pip install scikit-learn~=1.4 numpy~=1.26`." ] }, { @@ -82,7 +82,7 @@ "outputs": [], "source": [ "# Install MLRun and sklearn, run this only once (restart the notebook after the install !!!)\n", - "%pip install mlrun scikit-learn~=1.3.0" + "%pip install mlrun scikit-learn~=1.4 numpy~=1.26" ] }, { From 0d81c46b5f073e173838bc990ff2d5d432524706 Mon Sep 17 00:00:00 2001 From: Liran BG Date: Wed, 14 Feb 2024 12:18:01 +0200 Subject: [PATCH 032/119] [Projects] Support email-like username [1.6.x] (#5136) --- mlrun/projects/project.py | 11 +++++------ mlrun/utils/helpers.py | 18 ++++++++++++++++++ tests/utils/test_helpers.py | 27 +++++++++++++++++++++++++++ 3 files changed, 50 insertions(+), 6 deletions(-) diff --git a/mlrun/projects/project.py b/mlrun/projects/project.py index ec6ccd6b7e1..fc3b51cec15 100644 --- a/mlrun/projects/project.py +++ b/mlrun/projects/project.py @@ -30,7 +30,6 @@ import dotenv import git import git.exc -import inflection import kfp import nuclio import requests @@ -171,7 +170,7 @@ def setup(project): :param name: project name :param context: project local directory path (default value = "./") :param init_git: if True, will git init the context dir - :param user_project: add the current user name to the provided project name (making it unique per user) + :param user_project: add the current username to the provided project name (making it unique per user) :param remote: remote Git url :param from_template: path to project YAML/zip file that will be used as a template :param secrets: key:secret dict or SecretsStore used to download sources @@ -319,7 +318,7 @@ def setup(project): :param init_git: if True, will git init the context dir :param subpath: project subpath (within the archive) :param clone: if True, always clone (delete any existing content) - :param user_project: add the current user name to the project name (for db:// prefixes) + :param user_project: add the current username to the project name (for db:// prefixes) :param save: whether to save the created project and artifact in the DB :param sync_functions: sync the project's functions into the project object (will be saved to the DB if save=True) :param parameters: key/value pairs to add to the project.spec.params @@ -420,7 +419,7 @@ def get_or_create_project( save: bool = True, parameters: dict = None, ) -> "MlrunProject": - """Load a project from MLRun DB, or create/import if doesnt exist + """Load a project from MLRun DB, or create/import if it does not exist MLRun looks for a project.yaml file with project definition and objects in the project root path and use it to initialize the project, in addition it runs the project_setup.py file (if it exists) @@ -620,9 +619,9 @@ def _add_username_to_project_name_if_needed(name, user_project): if not name: raise ValueError("user_project must be specified together with name") username = environ.get("V3IO_USERNAME") or getpass.getuser() - normalized_username = inflection.dasherize(username.lower()) + normalized_username = mlrun.utils.normalize_project_username(username.lower()) if username != normalized_username: - logger.info( + logger.debug( "Username was normalized to match the required pattern for project name", username=username, normalized_username=normalized_username, diff --git a/mlrun/utils/helpers.py b/mlrun/utils/helpers.py index e2ac6bced74..240abe657dc 100644 --- a/mlrun/utils/helpers.py +++ b/mlrun/utils/helpers.py @@ -1525,6 +1525,24 @@ def normalize_workflow_name(name, project_name): return name.removeprefix(project_name + "-") +def normalize_project_username(username: str): + username = username.lower() + + # remove domain if exists + username = username.split("@")[0] + + # replace non r'a-z0-9\-_' chars with empty string + username = inflection.parameterize(username, separator="") + + # replace underscore with dashes + username = inflection.dasherize(username) + + # ensure ends with alphanumeric + username = username.rstrip("-_") + + return username + + # run_in threadpool is taken from fastapi to allow us to run sync functions in a threadpool # without importing fastapi in the client async def run_in_threadpool(func, *args, **kwargs): diff --git a/tests/utils/test_helpers.py b/tests/utils/test_helpers.py index 96c4cca533c..6a1376af1e9 100644 --- a/tests/utils/test_helpers.py +++ b/tests/utils/test_helpers.py @@ -927,3 +927,30 @@ def some_func(count_dict, a, b, some_other_thing=None): def test_iterate_list_by_chunks(iterable_list, chunk_size, expected_chunked_list): chunked_list = mlrun.utils.iterate_list_by_chunks(iterable_list, chunk_size) assert list(chunked_list) == expected_chunked_list + + +@pytest.mark.parametrize( + "username,expected_normalized_username", + [ + # sanity, all good + ("test", "test"), + # ensure ends with alphanumeric + ("test.", "test"), + ("test-", "test"), + # lowercase + ("TestUser", "testuser"), + # remove special characters + ("UserName!@#$", "username"), + # dasherize + ("user_name", "user-name"), + ("User-Name_123", "user-name-123"), + # everything with @ (email-like username) + ("User_Name@domain.com", "user-name"), + ("user@domain.com", "user"), + ("user.name@example.com", "username"), + ("user_name@example.com", "user-name"), + ], +) +def test_normalize_username(username, expected_normalized_username): + normalized_username = mlrun.utils.helpers.normalize_project_username(username) + assert normalized_username == expected_normalized_username From f2f2f76e714c190d6f40928f55c0a53ed72dec41 Mon Sep 17 00:00:00 2001 From: alxtkr77 <3098237+alxtkr77@users.noreply.github.com> Date: Wed, 14 Feb 2024 12:24:04 +0200 Subject: [PATCH 033/119] [Datastore] Correct the passing of the secret to the profile reader [1.6.x] (#5134) --- mlrun/datastore/datastore.py | 2 +- mlrun/datastore/datastore_profile.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mlrun/datastore/datastore.py b/mlrun/datastore/datastore.py index 3817a7c8500..83b8c7caa9f 100644 --- a/mlrun/datastore/datastore.py +++ b/mlrun/datastore/datastore.py @@ -195,7 +195,7 @@ def get_or_create_store( store_key = f"{schema}://{endpoint}" if schema == "ds": - datastore_profile = datastore_profile_read(url, project_name) + datastore_profile = datastore_profile_read(url, project_name, secrets) if secrets and datastore_profile.secrets(): secrets = merge(secrets, datastore_profile.secrets()) else: diff --git a/mlrun/datastore/datastore_profile.py b/mlrun/datastore/datastore_profile.py index e7766d7171d..e1299dc441a 100644 --- a/mlrun/datastore/datastore_profile.py +++ b/mlrun/datastore/datastore_profile.py @@ -367,7 +367,7 @@ def safe_literal_eval(value): ) -def datastore_profile_read(url, project_name=""): +def datastore_profile_read(url, project_name="", secrets: dict = None): parsed_url = urlparse(url) if parsed_url.scheme.lower() != "ds": raise mlrun.errors.MLRunInvalidArgumentError( @@ -385,7 +385,7 @@ def datastore_profile_read(url, project_name=""): project_ds_name_private = DatastoreProfile.generate_secret_key( profile_name, project_name ) - private_body = get_secret_or_env(project_ds_name_private) + private_body = get_secret_or_env(project_ds_name_private, secret_provider=secrets) if not public_profile or not private_body: raise mlrun.errors.MLRunInvalidArgumentError( f"Unable to retrieve the datastore profile '{url}' from either the server or local environment. " From ecd3aae569a9e1f6808cb83d0400ff5d875d1d35 Mon Sep 17 00:00:00 2001 From: alxtkr77 <3098237+alxtkr77@users.noreply.github.com> Date: Wed, 14 Feb 2024 15:20:18 +0200 Subject: [PATCH 034/119] [Documentation] Fix doc strings for several feature vector APIs [1.6.x] (#5139) --- mlrun/feature_store/api.py | 112 ++++++++++++------------ mlrun/feature_store/feature_vector.py | 120 ++++++++++++++++++++++++++ 2 files changed, 176 insertions(+), 56 deletions(-) diff --git a/mlrun/feature_store/api.py b/mlrun/feature_store/api.py index 8b474777480..249f760b51f 100644 --- a/mlrun/feature_store/api.py +++ b/mlrun/feature_store/api.py @@ -115,44 +115,6 @@ def get_offline_features( spark_service: str = None, timestamp_for_filtering: Union[str, Dict[str, str]] = None, ): - return _get_offline_features( - feature_vector, - entity_rows, - entity_timestamp_column, - target, - run_config, - drop_columns, - start_time, - end_time, - with_indexes, - update_stats, - engine, - engine_args, - query, - order_by, - spark_service, - timestamp_for_filtering, - ) - - -def _get_offline_features( - feature_vector: Union[str, FeatureVector], - entity_rows=None, - entity_timestamp_column: str = None, - target: DataTargetBase = None, - run_config: RunConfig = None, - drop_columns: List[str] = None, - start_time: Union[str, datetime] = None, - end_time: Union[str, datetime] = None, - with_indexes: bool = False, - update_stats: bool = False, - engine: str = None, - engine_args: dict = None, - query: str = None, - order_by: Union[str, List[str]] = None, - spark_service: str = None, - timestamp_for_filtering: Union[str, Dict[str, str]] = None, -) -> Union[OfflineVectorResponse, RemoteVectorResponse]: """retrieve offline feature vector results specify a feature vector object/uri and retrieve the desired features, their metadata @@ -213,6 +175,44 @@ def _get_offline_features( merge process using start_time and end_time params. """ + return _get_offline_features( + feature_vector, + entity_rows, + entity_timestamp_column, + target, + run_config, + drop_columns, + start_time, + end_time, + with_indexes, + update_stats, + engine, + engine_args, + query, + order_by, + spark_service, + timestamp_for_filtering, + ) + + +def _get_offline_features( + feature_vector: Union[str, FeatureVector], + entity_rows=None, + entity_timestamp_column: str = None, + target: DataTargetBase = None, + run_config: RunConfig = None, + drop_columns: List[str] = None, + start_time: Union[str, datetime] = None, + end_time: Union[str, datetime] = None, + with_indexes: bool = False, + update_stats: bool = False, + engine: str = None, + engine_args: dict = None, + query: str = None, + order_by: Union[str, List[str]] = None, + spark_service: str = None, + timestamp_for_filtering: Union[str, Dict[str, str]] = None, +) -> Union[OfflineVectorResponse, RemoteVectorResponse]: if entity_rows is None and entity_timestamp_column is not None: raise mlrun.errors.MLRunInvalidArgumentError( "entity_timestamp_column param " @@ -282,24 +282,6 @@ def get_online_feature_service( update_stats: bool = False, entity_keys: List[str] = None, ): - return _get_online_feature_service( - feature_vector, - run_config, - fixed_window_type, - impute_policy, - update_stats, - entity_keys, - ) - - -def _get_online_feature_service( - feature_vector: Union[str, FeatureVector], - run_config: RunConfig = None, - fixed_window_type: FixedWindowType = FixedWindowType.LastClosedWindow, - impute_policy: dict = None, - update_stats: bool = False, - entity_keys: List[str] = None, -) -> OnlineVectorService: """initialize and return online feature vector service api, returns :py:class:`~mlrun.feature_store.OnlineVectorService` @@ -363,6 +345,24 @@ def _get_online_feature_service( :return: Initialize the `OnlineVectorService`. Will be used in subclasses where `support_online=True`. """ + return _get_online_feature_service( + feature_vector, + run_config, + fixed_window_type, + impute_policy, + update_stats, + entity_keys, + ) + + +def _get_online_feature_service( + feature_vector: Union[str, FeatureVector], + run_config: RunConfig = None, + fixed_window_type: FixedWindowType = FixedWindowType.LastClosedWindow, + impute_policy: dict = None, + update_stats: bool = False, + entity_keys: List[str] = None, +) -> OnlineVectorService: if isinstance(feature_vector, FeatureVector): update_stats = True feature_vector = _features_to_vector_and_check_permissions( diff --git a/mlrun/feature_store/feature_vector.py b/mlrun/feature_store/feature_vector.py index 3cf58fbbd3f..7970eca6d2d 100644 --- a/mlrun/feature_store/feature_vector.py +++ b/mlrun/feature_store/feature_vector.py @@ -744,6 +744,64 @@ def get_offline_features( spark_service: str = None, timestamp_for_filtering: Union[str, Dict[str, str]] = None, ): + """retrieve offline feature vector results + + specify a feature vector object/uri and retrieve the desired features, their metadata + and statistics. returns :py:class:`~mlrun.feature_store.OfflineVectorResponse`, + results can be returned as a dataframe or written to a target + + The start_time and end_time attributes allow filtering the data to a given time range, they accept + string values or pandas `Timestamp` objects, string values can also be relative, for example: + "now", "now - 1d2h", "now+5m", where a valid pandas Timedelta string follows the verb "now", + for time alignment you can use the verb "floor" e.g. "now -1d floor 1H" will align the time to the last hour + (the floor string is passed to pandas.Timestamp.floor(), can use D, H, T, S for day, hour, min, sec alignment). + Another option to filter the data is by the `query` argument - can be seen in the example. + example:: + + features = [ + "stock-quotes.bid", + "stock-quotes.asks_sum_5h", + "stock-quotes.ask as mycol", + "stocks.*", + ] + vector = FeatureVector(features=features) + vector.get_offline_features(entity_rows=trades, entity_timestamp_column="time", query="ticker in ['GOOG'] + and bid>100") + print(resp.to_dataframe()) + print(vector.get_stats_table()) + resp.to_parquet("./out.parquet") + + :param entity_rows: dataframe with entity rows to join with + :param target: where to write the results to + :param drop_columns: list of columns to drop from the final result + :param entity_timestamp_column: timestamp column name in the entity rows dataframe. can be specified + only if param entity_rows was specified. + :param run_config: function and/or run configuration + see :py:class:`~mlrun.feature_store.RunConfig` + :param start_time: datetime, low limit of time needed to be filtered. Optional. + :param end_time: datetime, high limit of time needed to be filtered. Optional. + :param with_indexes: Return vector with/without the entities and the timestamp_key of the feature + sets and with/without entity_timestamp_column and timestamp_for_filtering + columns. This property can be specified also in the feature vector spec + (feature_vector.spec.with_indexes) + (default False) + :param update_stats: update features statistics from the requested feature sets on the vector. + (default False). + :param engine: processing engine kind ("local", "dask", or "spark") + :param engine_args: kwargs for the processing engine + :param query: The query string used to filter rows on the output + :param spark_service: Name of the spark service to be used (when using a remote-spark runtime) + :param order_by: Name or list of names to order by. The name or the names in the list can be the + feature name or the alias of the feature you pass in the feature list. + :param timestamp_for_filtering: name of the column to filter by, can be str for all the feature sets or a + dictionary ({: , ...}) + that indicates the timestamp column name for each feature set. Optional. + By default, the filter executes on the timestamp_key of each feature set. + Note: the time filtering is performed on each feature set before the + merge process using start_time and end_time params. + + """ + return mlrun.feature_store.api._get_offline_features( self, entity_rows, @@ -771,6 +829,68 @@ def get_online_feature_service( update_stats: bool = False, entity_keys: List[str] = None, ): + """initialize and return online feature vector service api, + returns :py:class:`~mlrun.feature_store.OnlineVectorService` + + :**usage**: + There are two ways to use the function: + + 1. As context manager + + Example:: + + with vector_uri.get_online_feature_service() as svc: + resp = svc.get([{"ticker": "GOOG"}, {"ticker": "MSFT"}]) + print(resp) + resp = svc.get([{"ticker": "AAPL"}], as_list=True) + print(resp) + + Example with imputing:: + + with vector_uri.get_online_feature_service(entity_keys=['id'], + impute_policy={"*": "$mean", "amount": 0)) as svc: + resp = svc.get([{"id": "C123487"}]) + + 2. as simple function, note that in that option you need to close the session. + + Example:: + + svc = vector_uri.get_online_feature_service(entity_keys=['ticker']) + try: + resp = svc.get([{"ticker": "GOOG"}, {"ticker": "MSFT"}]) + print(resp) + resp = svc.get([{"ticker": "AAPL"}], as_list=True) + print(resp) + + finally: + svc.close() + + Example with imputing:: + + svc = vector_uri.get_online_feature_service(entity_keys=['id'], + impute_policy={"*": "$mean", "amount": 0)) + try: + resp = svc.get([{"id": "C123487"}]) + except Exception as e: + handling exception... + finally: + svc.close() + + :param run_config: function and/or run configuration for remote jobs/services + :param impute_policy: a dict with `impute_policy` per feature, the dict key is the feature name and the + dict value indicate which value will be used in case the feature is NaN/empty, the + replaced value can be fixed number for constants or $mean, $max, $min, $std, $count + for statistical values. + "*" is used to specify the default for all features, example: `{"*": "$mean"}` + :param fixed_window_type: determines how to query the fixed window values which were previously inserted by + ingest + :param update_stats: update features statistics from the requested feature sets on the vector. + Default: False. + :param entity_keys: Entity list of the first feature_set in the vector. + The indexes that are used to query the online service. + :return: Initialize the `OnlineVectorService`. + Will be used in subclasses where `support_online=True`. + """ return mlrun.feature_store.api._get_online_feature_service( self, run_config, From 0e475e9ff70062b32c854e991e40f8d52a993417 Mon Sep 17 00:00:00 2001 From: Gal Topper Date: Wed, 14 Feb 2024 21:20:56 +0800 Subject: [PATCH 035/119] [Serving] Fix reference to nonexistent field (#5140) --- mlrun/serving/routers.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/mlrun/serving/routers.py b/mlrun/serving/routers.py index b7f5f5ff4cf..5a3d118ca63 100644 --- a/mlrun/serving/routers.py +++ b/mlrun/serving/routers.py @@ -1111,7 +1111,7 @@ def __init__( url_prefix: str = None, health_prefix: str = None, feature_vector_uri: str = "", - impute_policy: dict = {}, + impute_policy: dict = None, **kwargs, ): """Model router with feature enrichment (from the feature store) @@ -1156,13 +1156,17 @@ def __init__( ) self.feature_vector_uri = feature_vector_uri - self.impute_policy = impute_policy + self.impute_policy = impute_policy or {} self._feature_service = None def post_init(self, mode="sync"): + from ..feature_store import get_feature_vector + super().post_init(mode) - self._feature_service = self.feature_vector.get_online_feature_service( + self._feature_service = get_feature_vector( + self.feature_vector_uri + ).get_online_feature_service( impute_policy=self.impute_policy, ) @@ -1191,7 +1195,7 @@ def __init__( executor_type: Union[ParallelRunnerModes, str] = ParallelRunnerModes.thread, prediction_col_name: str = None, feature_vector_uri: str = "", - impute_policy: dict = {}, + impute_policy: dict = None, **kwargs, ): """Voting Ensemble with feature enrichment (from the feature store) @@ -1298,13 +1302,17 @@ def __init__( ) self.feature_vector_uri = feature_vector_uri - self.impute_policy = impute_policy + self.impute_policy = impute_policy or {} self._feature_service = None def post_init(self, mode="sync"): + from ..feature_store import get_feature_vector + super().post_init(mode) - self._feature_service = self.feature_vector.get_online_feature_service( + self._feature_service = get_feature_vector( + self.feature_vector_uri + ).get_online_feature_service( impute_policy=self.impute_policy, ) From 5b1dadc1b2460a113f926cd33a5bcc64fb3cce39 Mon Sep 17 00:00:00 2001 From: Avi Asulin <34214569+aviaIguazio@users.noreply.github.com> Date: Thu, 15 Feb 2024 16:37:09 +0200 Subject: [PATCH 036/119] [Docs] Update tutorials notebook 3 input cells names [1.6.x] (#5129) --- docs/tutorials/03-model-serving.ipynb | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/tutorials/03-model-serving.ipynb b/docs/tutorials/03-model-serving.ipynb index dd87e086fdd..920260bd549 100644 --- a/docs/tutorials/03-model-serving.ipynb +++ b/docs/tutorials/03-model-serving.ipynb @@ -323,10 +323,10 @@ "sample = {\n", " \"inputs\": [\n", " {\n", - " \"sepal length (cm)\": {0: 5.2, 1: 6.4},\n", - " \"sepal width (cm)\": {0: 2.7, 1: 3.1},\n", - " \"petal length (cm)\": {0: 3.9, 1: 5.5},\n", - " \"petal width (cm)\": {0: 1.4, 1: 1.8},\n", + " \"sepal_length_cm\": {0: 5.2, 1: 6.4},\n", + " \"sepal_width_cm\": {0: 2.7, 1: 3.1},\n", + " \"petal_length_cm\": {0: 3.9, 1: 5.5},\n", + " \"petal_width_cm\": {0: 1.4, 1: 1.8},\n", " }\n", " ]\n", "}\n", From 5e4e4cb41d64e04aaa80d4acce007022142d64a3 Mon Sep 17 00:00:00 2001 From: alxtkr77 <3098237+alxtkr77@users.noreply.github.com> Date: Thu, 15 Feb 2024 19:12:32 +0200 Subject: [PATCH 037/119] [Datatstore] Fix artifact preview/download the from GUI [1.6.x] (#5148) --- mlrun/datastore/datastore_profile.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/mlrun/datastore/datastore_profile.py b/mlrun/datastore/datastore_profile.py index e1299dc441a..b0c410aa851 100644 --- a/mlrun/datastore/datastore_profile.py +++ b/mlrun/datastore/datastore_profile.py @@ -382,6 +382,18 @@ def datastore_profile_read(url, project_name="", secrets: dict = None): public_profile = mlrun.db.get_run_db().get_datastore_profile( profile_name, project_name ) + # The mlrun.db.get_run_db().get_datastore_profile() function is capable of returning + # two distinct types of objects based on its execution context. + # If it operates from the client or within the pod (which is the common scenario), + # it yields an instance of `mlrun.datastore.DatastoreProfile`. Conversely, + # when executed on the server with a direct call to `sqldb`, it produces an instance of + # mlrun.common.schemas.DatastoreProfile. + # In the latter scenario, an extra conversion step is required to transform the object + # into mlrun.datastore.DatastoreProfile. + if isinstance(public_profile, mlrun.common.schemas.DatastoreProfile): + public_profile = DatastoreProfile2Json.create_from_json( + public_json=public_profile.object + ) project_ds_name_private = DatastoreProfile.generate_secret_key( profile_name, project_name ) From 35f4e2cb9946022dc238eb81def077d3f1973cc7 Mon Sep 17 00:00:00 2001 From: Liran BG Date: Mon, 19 Feb 2024 08:54:48 +0200 Subject: [PATCH 038/119] [Project] Increase nuclio project deletion verification timeout [1.6.x] (#5153) --- mlrun/config.py | 2 +- server/api/api/endpoints/projects.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mlrun/config.py b/mlrun/config.py index 9b1b6e936c2..4b80587c02c 100644 --- a/mlrun/config.py +++ b/mlrun/config.py @@ -408,7 +408,7 @@ "iguazio_access_key": "", "iguazio_list_projects_default_page_size": 200, "iguazio_client_job_cache_ttl": "20 minutes", - "nuclio_project_deletion_verification_timeout": "60 seconds", + "nuclio_project_deletion_verification_timeout": "300 seconds", "nuclio_project_deletion_verification_interval": "5 seconds", }, # The API needs to know what is its k8s svc url so it could enrich it in the jobs it creates diff --git a/server/api/api/endpoints/projects.py b/server/api/api/endpoints/projects.py index c0d9259c9bc..a2e72e1f609 100644 --- a/server/api/api/endpoints/projects.py +++ b/server/api/api/endpoints/projects.py @@ -242,7 +242,7 @@ async def delete_project( except mlrun.errors.MLRunNotFoundError as exc: if not server.api.utils.helpers.is_request_from_leader(auth_info.projects_role): logger.debug( - "Project no found in leader, ensuring project deleted in mlrun", + "Project not found in leader, ensuring project deleted in mlrun", err=mlrun.errors.err_to_str(exc), ) force_delete = True From bf97325956f49be99fdbdcf3c7c6fca75c5f8b48 Mon Sep 17 00:00:00 2001 From: Alon Maor <48641682+alonmr@users.noreply.github.com> Date: Mon, 19 Feb 2024 14:04:12 +0200 Subject: [PATCH 039/119] [API] Add force CRD deletion after timeout [1.6.x] (#5155) --- mlrun/config.py | 5 +++- server/api/runtime_handlers/base.py | 45 +++++++++++++++++++++++------ server/api/utils/singletons/k8s.py | 11 ++++++- tests/api/runtime_handlers/base.py | 1 + 4 files changed, 51 insertions(+), 11 deletions(-) diff --git a/mlrun/config.py b/mlrun/config.py index 4b80587c02c..f03311eb662 100644 --- a/mlrun/config.py +++ b/mlrun/config.py @@ -109,7 +109,10 @@ "runs": { # deleting runs is a heavy operation that includes deleting runtime resources, therefore we do it in chunks "batch_delete_runs_chunk_size": 10, - } + }, + "resources": { + "delete_crd_resources_timeout": "5 minutes", + }, }, # the grace period (in seconds) that will be given to runtime resources (after they're in terminal state) # before deleting them (4 hours) diff --git a/server/api/runtime_handlers/base.py b/server/api/runtime_handlers/base.py index 1f093f806aa..b5c8374941f 100644 --- a/server/api/runtime_handlers/base.py +++ b/server/api/runtime_handlers/base.py @@ -18,6 +18,7 @@ from datetime import datetime, timedelta, timezone from typing import Dict, List, Optional, Tuple, Union +import humanfriendly from kubernetes import client as k8s_client from kubernetes.client.rest import ApiException from sqlalchemy.orm import Session @@ -855,6 +856,7 @@ def _verify_pods_removed(): def _wait_for_crds_underlying_pods_deletion( self, deleted_crds: List[Dict], + namespace: str, label_selector: str = None, ): # we're using here the run identifier as the common ground to identify which pods are relevant to which CRD, so @@ -904,19 +906,42 @@ def _verify_crds_underlying_pods_removed(): ) if deleted_crds: - timeout = 180 + timeout = int( + humanfriendly.parse_timespan( + mlrun.mlconf.crud.resources.delete_crd_resources_timeout + ) + ) logger.debug( "Waiting for CRDs underlying pods deletion", timeout=timeout, interval=self.wait_for_deletion_interval, ) - mlrun.utils.retry_until_successful( - self.wait_for_deletion_interval, - timeout, - logger, - True, - _verify_crds_underlying_pods_removed, - ) + + try: + mlrun.utils.retry_until_successful( + self.wait_for_deletion_interval, + timeout, + logger, + True, + _verify_crds_underlying_pods_removed, + ) + except mlrun.errors.MLRunRetryExhaustedError as exc: + logger.warning( + "Failed waiting for CRDs underlying pods deletion, force deleting crds", + exc=err_to_str(exc), + ) + crd_group, crd_version, crd_plural = self._get_crd_info() + for crd_object in deleted_crds: + # Deleting pods in specific states with non 0 grace period can cause the pods to be stuck in + # terminating state, so we're forcing deletion after the grace period passed in this case. + server.api.utils.singletons.k8s.get_k8s_helper().delete_crd( + crd_object["metadata"]["name"], + crd_group, + crd_version, + crd_plural, + namespace, + grace_period_seconds=0, + ) def _delete_pod_resources( self, @@ -1072,7 +1097,9 @@ def _delete_crd_resources( crd_name=crd_object_name, exc=err_to_str(exc), ) - self._wait_for_crds_underlying_pods_deletion(deleted_crds, label_selector) + self._wait_for_crds_underlying_pods_deletion( + deleted_crds, namespace, label_selector + ) return deleted_crds def _pre_deletion_runtime_resource_run_actions( diff --git a/server/api/utils/singletons/k8s.py b/server/api/utils/singletons/k8s.py index 6a57a523686..daec7dd2360 100644 --- a/server/api/utils/singletons/k8s.py +++ b/server/api/utils/singletons/k8s.py @@ -207,7 +207,15 @@ def get_pod_status(self, name, namespace=None): name, namespace, raise_on_not_found=True ).status.phase.lower() - def delete_crd(self, name, crd_group, crd_version, crd_plural, namespace=None): + def delete_crd( + self, + name, + crd_group, + crd_version, + crd_plural, + namespace=None, + grace_period_seconds=None, + ): try: namespace = self.resolve_namespace(namespace) self.crdapi.delete_namespaced_custom_object( @@ -216,6 +224,7 @@ def delete_crd(self, name, crd_group, crd_version, crd_plural, namespace=None): namespace, crd_plural, name, + grace_period_seconds=grace_period_seconds, ) logger.info( "Deleted crd object", diff --git a/tests/api/runtime_handlers/base.py b/tests/api/runtime_handlers/base.py index 33a9e4eb5cb..2187959c434 100644 --- a/tests/api/runtime_handlers/base.py +++ b/tests/api/runtime_handlers/base.py @@ -436,6 +436,7 @@ def _assert_delete_namespaced_custom_objects( expected_custom_object_namespace, crd_plural, expected_custom_object_name, + grace_period_seconds=None, ) for expected_custom_object_name in expected_custom_object_names ] From 15abe1d9ad961383d4f7558848645f0497ac4721 Mon Sep 17 00:00:00 2001 From: daniels290813 <78727943+daniels290813@users.noreply.github.com> Date: Mon, 19 Feb 2024 14:22:15 +0200 Subject: [PATCH 040/119] [Docs] Update mlflow tutorial prerequisites [1.6.x] (#5159) --- docs/tutorials/mlflow.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/tutorials/mlflow.ipynb b/docs/tutorials/mlflow.ipynb index 77feac04052..fe9f9139212 100644 --- a/docs/tutorials/mlflow.ipynb +++ b/docs/tutorials/mlflow.ipynb @@ -33,7 +33,7 @@ "outputs": [], "source": [ "# Install MLRun and scikit-learn if not already installed. Run this only once. Restart the notebook after the install!\n", - "%pip install mlrun scikit-learn~=1.3.0" + "%pip install mlrun scikit-learn~=1.4 xgboost" ] }, { From 98ad544a12b3cbff18dc87edab6f58f7f617f079 Mon Sep 17 00:00:00 2001 From: Liran BG Date: Mon, 19 Feb 2024 14:55:29 +0200 Subject: [PATCH 041/119] [Project] Fix project creation in foreground [1.6.x] (#5163) --- server/api/utils/projects/follower.py | 26 +++++++------------------- 1 file changed, 7 insertions(+), 19 deletions(-) diff --git a/server/api/utils/projects/follower.py b/server/api/utils/projects/follower.py index d4f30353247..d5a13bcb2f9 100644 --- a/server/api/utils/projects/follower.py +++ b/server/api/utils/projects/follower.py @@ -110,25 +110,13 @@ def create_project( ) created_project = None if not is_running_in_background: - # as part of the store_project flow we encountered an error related to the isolation level we use. - # We use the default isolation level, I wasn't able to find exactly what is the default that sql alchemy - # sets but its serializable(once you SELECT a series of rows in a transaction, you will get the - # identical data back each time you re-emit that SELECT) or repeatable read isolation (you’ll see newly - # added rows (and no longer see deleted rows), but for rows that you’ve already loaded, you won’t see - # any change). Eventually, in the store_project flow, we already queried get_project and at the second - # time(below), after the project created, we failed because we got the same result from first query. - # Using session.commit ends the current transaction and start a new one which will result in a - # new query to the DB. - # for further read: https://docs-sqlalchemy.readthedocs.io/ko/latest/faq/sessions.html - # https://docs-sqlalchemy.readthedocs.io/ko/latest/dialects/mysql.html#transaction-isolation-level - # https://dev.mysql.com/doc/refman/8.0/en/innodb-transaction-isolation-levels.html - # TODO: there are multiple isolation level we can choose, READ COMMITTED seems to solve our issue - # but will require deeper investigation and more test coverage - if commit_before_get: - db_session.commit() - - created_project = self.get_project( - db_session, project.metadata.name, leader_session + # not running in background means long-project creation operation might stale + # its db session, so we need to create a new one + # https://jira.iguazeng.com/browse/ML-5764 + created_project = ( + server.api.db.session.run_function_with_new_db_session( + self.get_project, project.metadata.name, leader_session + ) ) return created_project, is_running_in_background From a6515c2fc895996901a24460aca64cd15ecc71e3 Mon Sep 17 00:00:00 2001 From: Liran BG Date: Mon, 19 Feb 2024 15:13:33 +0200 Subject: [PATCH 042/119] [Project] Validate project labels upon creation/update [1.6.x] (#5164) --- mlrun/k8s_utils.py | 51 +++++++++ mlrun/projects/project.py | 26 +++++ mlrun/utils/helpers.py | 7 +- mlrun/utils/regex.py | 6 +- server/api/utils/projects/follower.py | 19 +--- server/api/utils/projects/leader.py | 30 ++--- server/api/utils/projects/member.py | 4 + tests/api/api/test_projects.py | 6 + .../api/utils/projects/test_leader_member.py | 107 +++++++++--------- tests/projects/test_project.py | 44 +++++++ 10 files changed, 215 insertions(+), 85 deletions(-) diff --git a/mlrun/k8s_utils.py b/mlrun/k8s_utils.py index 2a9bbb15134..7d440c096fc 100644 --- a/mlrun/k8s_utils.py +++ b/mlrun/k8s_utils.py @@ -18,6 +18,7 @@ import mlrun.common.schemas import mlrun.errors +import mlrun.utils.regex from .config import config as mlconfig @@ -131,3 +132,53 @@ def sanitize_label_value(value: str) -> str: :return: string fully compliant with k8s label value expectations """ return re.sub(r"([^a-zA-Z0-9_.-]|^[^a-zA-Z0-9]|[^a-zA-Z0-9]$)", "-", value[:63]) + + +def verify_label_key(key): + if not key: + raise mlrun.errors.MLRunInvalidArgumentError("label key cannot be empty") + if key.startswith("k8s.io") or key.startswith("kubernetes.io"): + raise mlrun.errors.MLRunInvalidArgumentError( + "Labels cannot start with 'k8s.io' or 'kubernetes.io'" + ) + + mlrun.utils.helpers.verify_field_regex( + f"project.metadata.labels.'{key}'", + key, + mlrun.utils.regex.k8s_character_limit, + ) + + parts = key.split("/") + if len(parts) == 1: + name = parts[0] + elif len(parts) == 2: + prefix, name = parts + if len(prefix) == 0: + raise mlrun.errors.MLRunInvalidArgumentError( + "Label key prefix cannot be empty" + ) + + # prefix must adhere dns_1123_subdomain + mlrun.utils.helpers.verify_field_regex( + f"Project.metadata.labels.'{key}'", + prefix, + mlrun.utils.regex.dns_1123_subdomain, + ) + else: + raise mlrun.errors.MLRunInvalidArgumentError( + "Label key can only contain one '/'" + ) + + mlrun.utils.helpers.verify_field_regex( + f"project.metadata.labels.'{key}'", + name, + mlrun.utils.regex.qualified_name, + ) + + +def verify_label_value(value, label_key): + mlrun.utils.helpers.verify_field_regex( + f"project.metadata.labels.'{label_key}'", + value, + mlrun.utils.regex.label_value, + ) diff --git a/mlrun/projects/project.py b/mlrun/projects/project.py index fc3b51cec15..983ffff3a28 100644 --- a/mlrun/projects/project.py +++ b/mlrun/projects/project.py @@ -40,6 +40,7 @@ import mlrun.common.schemas.model_monitoring.constants as mm_constants import mlrun.db import mlrun.errors +import mlrun.k8s_utils import mlrun.runtimes import mlrun.runtimes.pod import mlrun.runtimes.utils @@ -693,6 +694,31 @@ def validate_project_name(name: str, raise_on_failure: bool = True) -> bool: return False return True + @staticmethod + def validate_project_labels(labels: dict, raise_on_failure: bool = True) -> bool: + """ + This + https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set + """ + + # no labels is a valid case + if not labels: + return True + if not isinstance(labels, dict): + raise mlrun.errors.MLRunInvalidArgumentError( + "Labels must be a dictionary of key-value pairs" + ) + try: + for key, value in labels.items(): + mlrun.k8s_utils.verify_label_key(key) + mlrun.k8s_utils.verify_label_value(value, label_key=key) + + except mlrun.errors.MLRunInvalidArgumentError: + if raise_on_failure: + raise + return False + return True + class ProjectSpec(ModelObj): def __init__( diff --git a/mlrun/utils/helpers.py b/mlrun/utils/helpers.py index 240abe657dc..058c1afcd91 100644 --- a/mlrun/utils/helpers.py +++ b/mlrun/utils/helpers.py @@ -176,6 +176,8 @@ def verify_field_regex( log_message: str = "Field is malformed. Does not match required pattern", mode: mlrun.common.schemas.RegexMatchModes = mlrun.common.schemas.RegexMatchModes.all, ) -> bool: + # limit the error message + max_chars = 63 for pattern in patterns: if not re.match(pattern, str(field_value)): log_func = logger.warn if raise_on_failure else logger.debug @@ -188,7 +190,8 @@ def verify_field_regex( if mode == mlrun.common.schemas.RegexMatchModes.all: if raise_on_failure: raise mlrun.errors.MLRunInvalidArgumentError( - f"Field '{field_name}' is malformed. '{field_value}' does not match required pattern: {pattern}" + f"Field '{field_name[:max_chars]}' is malformed. '{field_value[:max_chars]}' " + f"does not match required pattern: {pattern}" ) return False elif mode == mlrun.common.schemas.RegexMatchModes.any: @@ -198,7 +201,7 @@ def verify_field_regex( elif mode == mlrun.common.schemas.RegexMatchModes.any: if raise_on_failure: raise mlrun.errors.MLRunInvalidArgumentError( - f"Field '{field_name}' is malformed. '{field_value}' does not match any of the" + f"Field '{field_name[:max_chars]}' is malformed. '{field_value[:max_chars]}' does not match any of the" f" required patterns: {patterns}" ) return False diff --git a/mlrun/utils/regex.py b/mlrun/utils/regex.py index f8363cac700..5eae4f4b631 100644 --- a/mlrun/utils/regex.py +++ b/mlrun/utils/regex.py @@ -21,9 +21,13 @@ # k8s character limit is for 63 characters k8s_character_limit = [r"^.{0,63}$"] +# k8s name +# https://github.com/kubernetes/apimachinery/blob/kubernetes-1.25.16/pkg/util/validation/validation.go#L33 +qualified_name = [r"^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$"] + # k8s label value format # https://github.com/kubernetes/kubernetes/blob/v1.20.0/staging/src/k8s.io/apimachinery/pkg/util/validation/validation.go#L161 -label_value = k8s_character_limit + [r"^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$"] +label_value = k8s_character_limit + qualified_name # DNS Subdomain (RFC 1123) - used by k8s for most resource names format # https://github.com/kubernetes/kubernetes/blob/v1.20.0/staging/src/k8s.io/apimachinery/pkg/util/validation/validation.go#L204 diff --git a/server/api/utils/projects/follower.py b/server/api/utils/projects/follower.py index d5a13bcb2f9..93c25569fa0 100644 --- a/server/api/utils/projects/follower.py +++ b/server/api/utils/projects/follower.py @@ -99,6 +99,7 @@ def create_project( wait_for_completion: bool = True, commit_before_get: bool = False, ) -> typing.Tuple[typing.Optional[mlrun.common.schemas.Project], bool]: + self._validate_project(project) if server.api.utils.helpers.is_request_from_leader( projects_role, leader_name=self._leader_name ): @@ -129,6 +130,7 @@ def store_project( leader_session: typing.Optional[str] = None, wait_for_completion: bool = True, ) -> typing.Tuple[typing.Optional[mlrun.common.schemas.Project], bool]: + self._validate_project(project) if server.api.utils.helpers.is_request_from_leader( projects_role, leader_name=self._leader_name ): @@ -408,24 +410,9 @@ def _archive_projects_missing_from_leader( def _update_latest_synced_datetime(self, latest_updated_at): if latest_updated_at: - # sanity and defensive programming - if the leader returned a latest_updated_at that is older + # sanity and defensive programming - if the leader returned the latest_updated_at that is older # than the epoch, we'll set it to the epoch epoch = pytz.UTC.localize(datetime.datetime.utcfromtimestamp(0)) if latest_updated_at < epoch: latest_updated_at = epoch self._synced_until_datetime = latest_updated_at - - @staticmethod - def _is_project_matching_labels( - labels: typing.List[str], project: mlrun.common.schemas.Project - ): - if not project.metadata.labels: - return False - for label in labels: - if "=" in label: - name, value = [v.strip() for v in label.split("=", 1)] - if name not in project.metadata.labels: - return False - return value == project.metadata.labels[name] - else: - return label in project.metadata.labels diff --git a/server/api/utils/projects/leader.py b/server/api/utils/projects/leader.py index e32205745ca..8097f046e35 100644 --- a/server/api/utils/projects/leader.py +++ b/server/api/utils/projects/leader.py @@ -66,7 +66,7 @@ def create_project( wait_for_completion: bool = True, commit_before_get: bool = False, ) -> typing.Tuple[typing.Optional[mlrun.common.schemas.Project], bool]: - self._enrich_and_validate_before_creation(project) + self._enrich_and_validate(project) self._run_on_all_followers(True, "create_project", db_session, project) return self.get_project(db_session, project.metadata.name), False @@ -79,8 +79,7 @@ def store_project( leader_session: typing.Optional[str] = None, wait_for_completion: bool = True, ) -> typing.Tuple[typing.Optional[mlrun.common.schemas.Project], bool]: - self._enrich_project(project) - mlrun.projects.ProjectMetadata.validate_project_name(name) + self._enrich_and_validate(project) self._validate_body_and_path_names_matches(name, project) self._run_on_all_followers(True, "store_project", db_session, name, project) return self.get_project(db_session, name), False @@ -269,7 +268,7 @@ def _ensure_project_synced( # Heuristically pick the first follower project_follower_name = list(follower_names)[0] project = followers_projects_map[project_follower_name][project_name] - self._enrich_and_validate_before_creation(project) + self._enrich_and_validate(project) self._leader_follower.create_project(db_session, project) except Exception as exc: logger.warning( @@ -290,7 +289,8 @@ def _ensure_project_synced( missing_followers = set(follower_names).symmetric_difference( self._followers.keys() ) - if self._should_sync_project_to_followers(project_name): + project.metadata.name = project_name + if self._should_sync_project_to_followers(project): if missing_followers: self._create_project_in_missing_followers( db_session, @@ -319,7 +319,7 @@ def _store_project_in_followers( project_name=project_name, ) try: - self._enrich_and_validate_before_creation(project) + self._enrich_and_validate(project) self._followers[follower_name].store_project( db_session, project_name, @@ -351,7 +351,7 @@ def _create_project_in_missing_followers( project_name=project_name, ) try: - self._enrich_and_validate_before_creation(project) + self._enrich_and_validate(project) self._followers[missing_follower].create_project( db_session, project, @@ -366,14 +366,18 @@ def _create_project_in_missing_followers( traceback=traceback.format_exc(), ) - def _should_sync_project_to_followers(self, project_name: str) -> bool: + def _should_sync_project_to_followers( + self, project: mlrun.common.schemas.Project + ) -> bool: """ projects name validation is enforced on creation, the only way for a project name to be invalid is if it was created prior to 0.6.0, and the version was upgraded we do not want to sync these projects since it will - anyways fail (Nuclio doesn't allow these names as well) + anyway fail (Nuclio doesn't allow these names as well) """ return mlrun.projects.ProjectMetadata.validate_project_name( - project_name, raise_on_failure=False + project.metadata.name, raise_on_failure=False + ) and mlrun.projects.ProjectMetadata.validate_project_labels( + project.metadata.labels, raise_on_failure=False ) def _run_on_all_followers( @@ -423,11 +427,9 @@ def _initialize_follower( raise ValueError(f"Unknown follower name: {name}") return followers_classes_map[name] - def _enrich_and_validate_before_creation( - self, project: mlrun.common.schemas.Project - ): + def _enrich_and_validate(self, project: mlrun.common.schemas.Project): self._enrich_project(project) - mlrun.projects.ProjectMetadata.validate_project_name(project.metadata.name) + self._validate_project(project) @staticmethod def _enrich_project(project: mlrun.common.schemas.Project): diff --git a/server/api/utils/projects/member.py b/server/api/utils/projects/member.py index edfa5be6561..05e48647361 100644 --- a/server/api/utils/projects/member.py +++ b/server/api/utils/projects/member.py @@ -160,3 +160,7 @@ async def post_delete_project( ): await server.api.crud.Logs().stop_logs_for_project(project_name) await server.api.crud.Logs().delete_project_logs(project_name) + + def _validate_project(self, project: mlrun.common.schemas.Project): + mlrun.projects.ProjectMetadata.validate_project_name(project.metadata.name) + mlrun.projects.ProjectMetadata.validate_project_labels(project.metadata.labels) diff --git a/tests/api/api/test_projects.py b/tests/api/api/test_projects.py index 071f0ba8cca..205868606db 100644 --- a/tests/api/api/test_projects.py +++ b/tests/api/api/test_projects.py @@ -771,6 +771,12 @@ def test_projects_crud( ), ) + # create - fail invalid label + invalid_project_create_request = project_1.dict() + invalid_project_create_request["metadata"]["labels"] = {".a": "invalid-label"} + response = client.post("projects", json=invalid_project_create_request) + assert response.status_code == HTTPStatus.BAD_REQUEST.value + # create response = client.post("projects", json=project_1.dict()) assert response.status_code == HTTPStatus.CREATED.value diff --git a/tests/api/utils/projects/test_leader_member.py b/tests/api/utils/projects/test_leader_member.py index d302edd6c6c..13aa51c9675 100644 --- a/tests/api/utils/projects/test_leader_member.py +++ b/tests/api/utils/projects/test_leader_member.py @@ -254,73 +254,76 @@ def test_create_project( _assert_project_in_followers([leader_follower, nop_follower], project) -def test_create_and_store_project_failure_invalid_name( - db: sqlalchemy.orm.Session, - projects_leader: server.api.utils.projects.leader.Member, - leader_follower: server.api.utils.projects.remotes.follower.Member, -): - cases = [ - {"name": "asd3", "valid": True}, - {"name": "asd-asd", "valid": True}, - {"name": "333", "valid": True}, - {"name": "3-a-b", "valid": True}, - {"name": "5-a-a-5", "valid": True}, - { +@pytest.mark.parametrize( + "project_name, valid", + [ + ("asd3", True), + ("asd-asd", True), + ("333", True), + ("3-a-b", True), + ("5-a-a-5", True), + ( # Invalid because the first letter is - - "name": "-as-123-2-8a", - "valid": False, - }, - { + "-as-123-2-8a", + False, + ), + ( # Invalid because there is . - "name": "as-123-2.a", - "valid": False, - }, - { + "as-123-2.a", + False, + ), + ( # Invalid because A is not allowed - "name": "As-123-2-8Aa", - "valid": False, - }, - { + "As-123-2-8Aa", + False, + ), + ( # Invalid because _ is not allowed - "name": "as-123_2-8aa", - "valid": False, - }, - { + "as-123_2-8aa", + False, + ), + ( # Invalid because it's more than 63 characters - "name": "azsxdcfvg-azsxdcfvg-azsxdcfvg-azsxdcfvg-azsxdcfvg-azsxdcfvg-azsx", - "valid": False, - }, - ] - for case in cases: - project_name = case["name"] - project = mlrun.common.schemas.Project( - metadata=mlrun.common.schemas.ProjectMetadata(name=project_name), + "azsxdcfvg-azsxdcfvg-azsxdcfvg-azsxdcfvg-azsxdcfvg-azsxdcfvg-azsx", + False, + ), + ], +) +def test_create_and_store_project_failure_invalid_name( + db: sqlalchemy.orm.Session, + projects_leader: server.api.utils.projects.leader.Member, + leader_follower: server.api.utils.projects.remotes.follower.Member, + project_name, + valid, +): + project = mlrun.common.schemas.Project( + metadata=mlrun.common.schemas.ProjectMetadata(name=project_name), + ) + if valid: + projects_leader.create_project( + None, + project, + ) + _assert_project_in_followers([leader_follower], project) + projects_leader.store_project( + None, + project_name, + project, ) - if case["valid"]: + _assert_project_in_followers([leader_follower], project) + else: + with pytest.raises(mlrun.errors.MLRunInvalidArgumentError): projects_leader.create_project( None, project, ) - _assert_project_in_followers([leader_follower], project) + with pytest.raises(mlrun.errors.MLRunInvalidArgumentError): projects_leader.store_project( None, project_name, project, ) - _assert_project_in_followers([leader_follower], project) - else: - with pytest.raises(mlrun.errors.MLRunInvalidArgumentError): - projects_leader.create_project( - None, - project, - ) - with pytest.raises(mlrun.errors.MLRunInvalidArgumentError): - projects_leader.store_project( - None, - project_name, - project, - ) - _assert_project_not_in_followers([leader_follower], project_name) + _assert_project_not_in_followers([leader_follower], project_name) def test_ensure_project( diff --git a/tests/projects/test_project.py b/tests/projects/test_project.py index 1571ff798e9..ac40331ca5c 100644 --- a/tests/projects/test_project.py +++ b/tests/projects/test_project.py @@ -1596,3 +1596,47 @@ def test_project_build_image( # If no base image was used, then mlrun/mlrun is expected assert build_config.base_image == base_image or "mlrun/mlrun" assert project.default_image == image_name + + +@pytest.mark.parametrize( + "project_name, valid", + [ + ("project", True), + ("project-name", True), + ("project-name-1", True), + ("1project", True), + ("project_name", False), + ("project@", False), + ("project/a", False), + ], +) +def test_project_name_validation(project_name, valid): + assert valid == mlrun.projects.ProjectMetadata.validate_project_name( + project_name, raise_on_failure=False + ) + + +@pytest.mark.parametrize( + "project_labels, valid", + [ + ({}, True), + ({"key": "value"}, True), + ({"some.key": "value"}, True), + ({"key.some/a": "value"}, True), + # too many subcomponents + ({"key/a/b": "value"}, False), + # must start with alphanumeric + ({".key": "value"}, False), + ({"/key": "value"}, False), + # no key + ({"": "value"}, False), + # long value + ({"key": "a" * 64}, False), + # long key + ({"a" * 64: "a"}, False), + ], +) +def test_project_labels_validation(project_labels, valid): + assert valid == mlrun.projects.ProjectMetadata.validate_project_labels( + project_labels, raise_on_failure=False + ) From 092a991da5ef704304aab333c7699afe699376fe Mon Sep 17 00:00:00 2001 From: Liran BG Date: Mon, 19 Feb 2024 15:33:39 +0200 Subject: [PATCH 043/119] [API] Bump uvicorn [1.6.x] (#5165) --- dockerfiles/mlrun-api/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dockerfiles/mlrun-api/requirements.txt b/dockerfiles/mlrun-api/requirements.txt index ca1f2ffcafd..132efe7de37 100644 --- a/dockerfiles/mlrun-api/requirements.txt +++ b/dockerfiles/mlrun-api/requirements.txt @@ -1,4 +1,4 @@ -uvicorn~=0.23.2 +uvicorn~=0.27.1 dask-kubernetes~=0.11.0 apscheduler~=3.6, !=3.10.2 sqlite3-to-mysql~=1.4 From b8188f94dc3e0bfc4bb74ab4296bec213b019e67 Mon Sep 17 00:00:00 2001 From: Liran BG Date: Mon, 19 Feb 2024 17:35:53 +0200 Subject: [PATCH 044/119] [Projects] Fix verification blocking call [1.6.x] (#5166) --- server/api/api/endpoints/projects_v2.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/server/api/api/endpoints/projects_v2.py b/server/api/api/endpoints/projects_v2.py index c8a4243a2a3..9488f05cb28 100644 --- a/server/api/api/endpoints/projects_v2.py +++ b/server/api/api/endpoints/projects_v2.py @@ -93,9 +93,14 @@ async def delete_project( # we need to implement the verify_project_is_empty, since we don't want # to spawn a background task for this, only to return a response if deletion_strategy.strategy_to_check(): - server.api.crud.Projects().verify_project_is_empty(db_session, name, auth_info) + await run_in_threadpool( + server.api.crud.Projects().verify_project_is_empty, + db_session, + name, + auth_info, + ) if deletion_strategy == mlrun.common.schemas.DeletionStrategy.check: - # if the strategy is check, we don't want to delete the project, only to check if it is empty + # if the strategy is checked, we don't want to delete the project, only to check if it is empty return fastapi.Response(status_code=http.HTTPStatus.NO_CONTENT.value) task, task_name = await run_in_threadpool( From 4b8f4cf5dcda35756a010b2d0922382d94aabed6 Mon Sep 17 00:00:00 2001 From: Assaf Ben-Amitai Date: Tue, 20 Feb 2024 08:26:02 +0200 Subject: [PATCH 045/119] [Docs] Improve feature store documentation; sources and targets; partitioning [1.6.x] (#5169) --- docs/change-log/index.md | 2 +- docs/cheat-sheet.md | 4 +- docs/feature-store/feature-sets.md | 4 +- docs/feature-store/feature-store-overview.md | 2 +- docs/feature-store/feature-store.md | 1 + docs/feature-store/sources-targets.md | 76 ++++++++++++++++++++ docs/feature-store/transformations.md | 16 +++++ docs/serving/available-steps.md | 43 +---------- 8 files changed, 101 insertions(+), 47 deletions(-) create mode 100644 docs/feature-store/sources-targets.md diff --git a/docs/change-log/index.md b/docs/change-log/index.md index f967203308d..76f22c75926 100644 --- a/docs/change-log/index.md +++ b/docs/change-log/index.md @@ -403,7 +403,7 @@ Starting with v1.3.0, and continuing in subsequent releases, obsolete functions |ML-3378 |Aggregation over a fixed-window that starts at or near the epoch now functions as expected. [View in Git](https://github.com/mlrun/storey/pull/418). | |ML-3380 |Documentation: added details on [aggregation in windows](../feature-store/transformations.html#aggregations). | |ML-3389 |Hyperparams run does not present artifacts iteration when selector is not defined. [View in Git](https://github.com/mlrun/ui/pull/1635). | -|ML-3424 |Documentation: new matrix of which engines support which [sources](../serving/available-steps.html#sources)/[targets](../serving/available-steps.html#targets). [View in Git](https://github.com/mlrun/mlrun/pull/3279). | +|ML-3424 |Documentation: new matrix of which engines support which [sources](../feature-store/sources-targets.html#sources)/[targets](../feature-store/sources-targets.html#targets). [View in Git](https://github.com/mlrun/mlrun/pull/3279). | |ML-3505 |Removed the upperbound on the `google-cloud-bigquery` requirement. | |ML-3575 |`project.run_function()` now uses the argument `artifact_path` (previously used the project's configured `artifact_path` instead). [View in Git](https://github.com/mlrun/mlrun/pull/3246). | |ML-3403 |Error on Spark ingestion with offline target without defined path (error: `NoneType` object has no attribute `startswith`). Fix: default path defined. [View in Git](https://github.com/mlrun/mlrun/pull/3118). | diff --git a/docs/cheat-sheet.md b/docs/cheat-sheet.md index c870c9ac325..a671582a6fe 100644 --- a/docs/cheat-sheet.md +++ b/docs/cheat-sheet.md @@ -620,7 +620,7 @@ Docs: [Ingest data using the feature store](./data-prep/ingest-data-fs.html) ### Sources -Docs: [Sources](./serving/available-steps.html#sources) +Docs: [Sources](./feature-store/sources-targets.html#sources) ```python from mlrun.datastore.sources import CSVSource, ParquetSource, BigQuerySource, KafkaSource @@ -666,7 +666,7 @@ snowflake_df = snowflake_source.to_dataframe() ### Targets -Docs: [Targets](./serving/available-steps.html#targets) +Docs: [Targets](./feature-store/sources-targets.html#targets), [Partitioning on Parquet target](./feature-store/sources-targets.html#partitioning-on-parquet-target) ```python from mlrun.datastore.targets import CSVTarget, ParquetTarget diff --git a/docs/feature-store/feature-sets.md b/docs/feature-store/feature-sets.md index e6d8e1bd5c5..1aafe1c8ca4 100644 --- a/docs/feature-store/feature-sets.md +++ b/docs/feature-store/feature-sets.md @@ -11,9 +11,9 @@ The feature set object contains the following information: - **Metadata** — General information which is helpful for search and organization. Examples are project, name, owner, last update, description, labels, etc. - **Key attributes** — Entity, timestamp key (optional), label column. - **Features** — The list of features along with their schema, metadata, validation policies and statistics. -- **Source** — The online or offline data source definitions and ingestion policy (file, database, stream, http endpoint, etc.). See the [source descriptions](../serving/available-steps.html#sources). +- **Source** — The online or offline data source definitions and ingestion policy (file, database, stream, http endpoint, etc.). See the [source descriptions](./sources-targets.html#sources). - **Transformation** — The data transformation pipeline (e.g. aggregation, enrichment etc.). -- **Target stores** — The type (i.e. parquet/csv or key value), location and status for the feature set materialized data. See the [target descriptions](../serving/available-steps.html#targets). +- **Target stores** — The type (i.e. parquet/csv or key value), location and status for the feature set materialized data. See the [target descriptions](./sources-targets.html#targets). - **Function** — The type (storey, pandas, spark) and attributes of the data pipeline serverless functions. **In this section** diff --git a/docs/feature-store/feature-store-overview.md b/docs/feature-store/feature-store-overview.md index 2b5e7d5b144..020e7d30bbd 100644 --- a/docs/feature-store/feature-store-overview.md +++ b/docs/feature-store/feature-store-overview.md @@ -38,7 +38,7 @@ training and serves as the input to the model training process. During model ser ![How feature store works](../_static/images/feature-store-flow.png) -The common flow when working with the feature store is to first define the feature set with its source, transformation graph, and targets. +The common flow when working with the feature store is to first define the feature set with its source, transformation graph, and targets. (See the supported {ref}`sources-targets`.) MLRun's robust transformation engine performs complex operations with just a few lines of Python code. To test the execution process, call the `infer` method with a sample DataFrame. This runs all operations in memory without storing the results. diff --git a/docs/feature-store/feature-store.md b/docs/feature-store/feature-store.md index 9fd70c05bc6..613c2585090 100644 --- a/docs/feature-store/feature-store.md +++ b/docs/feature-store/feature-store.md @@ -26,5 +26,6 @@ feature-store-overview feature-sets transformations feature-vectors +sources-targets ./end-to-end-demo/index ``` \ No newline at end of file diff --git a/docs/feature-store/sources-targets.md b/docs/feature-store/sources-targets.md new file mode 100644 index 00000000000..37b0ece6aef --- /dev/null +++ b/docs/feature-store/sources-targets.md @@ -0,0 +1,76 @@ +(sources-targets)= +# Sources and targets + + +- [Sources](#sources) +- [Targets](#targets) +- [ParquetTarget](#parquettarget) +- [NoSql target](#nosql-target) + + + +# Sources +| Class name | Description | storey | spark | pandas | +| -------------------------------------------------- | --------------------------------- | --- | --- | --- | +| [mlrun.datastore.BigQuerySource](../api/mlrun.datastore.html#mlrun.datastore.BigQuerySource) | Reads Google BigQuery query results as input source for a flow ("batch" source). | N | Y | Y | +| mlrun.datastore.SnowFlakeSource | Reads Snowflake query results as input source for a flow ("batch" source). | N | Y | N | +| mlrun.datastore.SQLSource | Reads SQL query results as input source for a flow ("batch" source). | Y | N | Y | +| [mlrun.datastore.CSVSource](https://storey.readthedocs.io/en/latest/api.html#storey.sources.CSVSource) | Reads a CSV file as input source for a flow ("batch" source). | Y | Y | Y | +| [storey.sources.DataframeSource](https://storey.readthedocs.io/en/latest/api.html#storey.sources.DataframeSource) | Reads data frame as input source for a flow ("batch" source). | Y | N | N | +| [mlrun.datastore.HttpSource](../api/mlrun.datastore.html#mlrun.datastore.HttpSource) | Sets the HTTP-endpoint source for the flow (event-based source). | Y | N | N | +| [mlrun.datastore.KafkaSource](../api/mlrun.datastore.html#mlrun.datastore.KafkaSource) | Sets the kafka source for the flow (event-based source). | Y | N | N | +| [mlrun.datastore.ParquetSource](https://storey.readthedocs.io/en/latest/api.html#storey.sources.ParquetSource) | Reads the Parquet file/dir as the input source for a flow ("batch" source). | Y | Y | Y | +| [mlrun.datastore.StreamSource](../api/mlrun.datastore.html#mlrun.datastore.StreamSource) | Sets the stream source for the flow. If the stream doesn’t exist it creates it (event-based source). | Y | N | N | + +# Targets +| Class name | Description | storey | spark | pandas | +| -------------------------------------------------- | ------------------------------------------------------- | --- | --- | --- | +| [mlrun.datastore.CSVTarget](https://storey.readthedocs.io/en/latest/api.html#storey.targets.CSVTarget) | Writes events to a CSV file (offline target). | Y | Y | Y | +| [mlrun.datastore.KafkaTarget](https://storey.readthedocs.io/en/latest/api.html#storey.targets.KafkaTarget) | Writes all incoming events into a Kafka stream (online target). | Y | N | N | +| [mlrun.datastore.NoSqlTarget](https://storey.readthedocs.io/en/latest/api.html#storey.targets.NoSqlTarget) | The default online target. Persists the data in V3IO table to its associated storage by key (online target). | Y | Y | Y | +| mlrun.datastore.RedisNoSqlTarget | Persists the data in Redis table to its associated storage by key (online target). | Y | Y | N | +| mlrun.datastore.SqlTarget | The default offline target. Persists the data in SQL table to its associated storage by key (offline target). | Y | N | Y | +| [mlrun.datastore.ParquetTarget](https://storey.readthedocs.io/en/latest/api.html#storey.targets.ParquetTarget)| The Parquet target storage driver, used to materialize feature set/vector data into parquet files (online target). | Y | Y | Y | +| [mlrun.datastore.StreamTarget](https://storey.readthedocs.io/en/latest/api.html#storey.targets.StreamTarget) | Writes all incoming events into a V3IO stream (offline target). | Y | N | N | + +## ParquetTarget + +### Partitioning + +When writing data to a {py:meth}`~mlrun.datastore.ParquetTarget`, you can use partitioning. Partitioning organizes data +in Parquet files by dividing large data sets into smaller and more manageable pieces. The data is divided +into separate files according to specific criteria, for example: date, time, or specific values in a column. +Partitioning, when configured correctly, improves read performance by reducing the amount of data that needs to be +processed for any given function, for example, when reading back a limited time range with `get_offline_features()`. + +When using the pandas engine for ingestion, pandas incurs a maximum limit of 1024 partitions on each ingestion. +If the data being ingested spans over more than 1024 partitions, the ingestion fails. +Decrease the number of partitions by filtering the time (for example, using start_filter/end_filter of the +{py:meth}`~mlrun.datastore.ParquetSource`), and/or increasing the `time_partitioning_granularity`. + +storey processes the data row by row (as a streaming engine, it doesn't get all the data up front, so it needs to process row by row). +These rows are batched together according to the partitions defined, and they are +written to each partition separately. (Therefore, storey does not have the 1024 limitation.) + +Configure partitioning with: +- `partitioned` — Optional. Whether to partition the file. False by default. If True without passing any other partition fields, the data is partitioned by /year/month/day/hour. +- `key_bucketing_number` — Optional. None by default: does not partition by key. 0 partitions by the key as is. Any other number "X" creates X partitions and hashes the keys to one of them. +- `partition_cols` — Optional. Name of columns from the data to partition by. +- `time_partitioning_granularity` — Optional. The smallest time unit to partition the data by, in the format /year/month/day/hour (default). For example “hour” yields the smallest possible partitions. + +For example: +- `ParquetTarget()` partitions by year/month/day/hour/ +- `ParquetTarget(partition_cols=[])` writes to a directory without partitioning +- `ParquetTarget(partition_cols=["col1", "col2"])` partitions by col1/col2/ +- `ParquetTarget(time_partitioning_granularity="day")` partitions by year/month/day/ +- `ParquetTarget(partition_cols=["col1", "col2"], time_partitioning_granularity="day")` partitions by col1/col2/year/month/day/ + +Disable partitioning with: +- `ParquetTarget(partitioned=False)` + +## NoSql target + +The {py:meth}`~mlrun.datastore.NoSqlTarget` is a V3IO key-value based target. It is the default target for real-time data. +It supports low latency data retrieval based on key access, making it ideal for online applications. + +The combination of a NoSQL target with the storey engine does not support features of type string with a value containing both quote (') and double-quote ("). diff --git a/docs/feature-store/transformations.md b/docs/feature-store/transformations.md index ef938697716..0f1f40c91be 100644 --- a/docs/feature-store/transformations.md +++ b/docs/feature-store/transformations.md @@ -39,6 +39,7 @@ to the [feature store example](./basic-demo.html). - [Aggregations](#aggregations) - [Built-in transformations](#built-in-transformations) - [Custom transformations](#custom-transformations) +- [Data transformation steps](#data-transformation-steps) ## Aggregations @@ -283,5 +284,20 @@ feature_set.graph.to(MultiplyFeature(feature="number1", value=4)) df_pandas = feature_set.ingest(data) ``` +## Data transformation steps + +The following table lists the available data-transformation steps. The next table details the ingestion engines support of these steps. + +| Class name | Description | Storey | Spark | Pandas | +|----------------------------|----------------------------------| ---- | ---- | ---- | +| {py:meth}`#mlrun.feature_store.FeatureSet.add_aggregation` | Aggregates the data into the table object provided for later persistence, and outputs an event enriched with the requested aggregation features. | Y
    Not supported with online target SQLTarget | Y | N | +| {py:meth}`mlrun.feature_store.steps.DateExtractor` | Extract a date-time component. | Y | N
    Supports part extract (ex. day_of_week) but does not support boolean (ex. is_leap_year) | Y | +| {py:meth}`mlrun.feature_store.steps.DropFeatures` | Drop features from feature list. | Y | Y | Y | +| {py:meth}`mlrun.feature_store.steps.Imputer` | Replace None values with default values. | Y | Y | Y | +| {py:meth}`mlrun.feature_store.steps.MapValues` | Map column values to new values. | Y | Y | Y | +| {py:meth}`mlrun.feature_store.steps.OneHotEncoder` | Create new binary fields, one per category (one hot encoded). | Y | Y | Y | +| {py:meth}`mlrun.feature_store.steps.SetEventMetadata` | Set the event metadata (id, key, timestamp) from the event body. | Y | N | N | +| {py:meth}`mlrun.feature_store.steps.FeaturesetValidator` | Validate feature values according to the feature set validation policy | Y | N | Y | + diff --git a/docs/serving/available-steps.md b/docs/serving/available-steps.md index ea8d7a883ac..b637af46846 100644 --- a/docs/serving/available-steps.md +++ b/docs/serving/available-steps.md @@ -6,14 +6,13 @@ MlRun provides you with many built-in steps that you can use when building your Click on the step names in the following sections to see the full usage. - [Base Operators](#base-operators) -- [Data Transformations](#data-transformations) - [External IO and data enrichment](#external-io-and-data-enrichment) -- [Sources](#sources) -- [Targets](#targets) - [Models](#models) - [Routers](#routers) - [Other](#other) +See also [Data transformations](../feature-store/transformations.html#data-transformation-steps). + ## Base Operators @@ -33,21 +32,6 @@ Click on the step names in the following sections to see the full usage. [storey.transformations.SampleWindow](https://storey.readthedocs.io/en/latest/api.html#storey.transformations.SampleWindow) | Emits a single event in a window of `window_size` events, in accordance with `emit_period` and `emit_before_termination`. | -## Data Transformations - -The following table lists the available data-transformation steps. The next table details the ingestion engines support of these steps. - -| Class name | Description | Storey | Spark | Pandas | -|----------------------------|----------------------------------| ---- | ---- | ---- | -| [mlrun.feature_store.add_aggregation](../api/mlrun.feature_store.html#mlrun.feature_store.FeatureSet.add_aggregation) | Aggregates the data into the table object provided for later persistence, and outputs an event enriched with the requested aggregation features. | Y
    Not supported with online target SQLTarget | Y | N | -| [mlrun.feature_store.DateExtractor](../api/mlrun.feature_store.html#mlrun.feature_store.steps.DateExtractor) | Extract a date-time component. | Y | N
    Supports part extract (ex. day_of_week) but does not support boolean (ex. is_leap_year) | Y | -| [mlrun.feature_store.DropFeatures](../api/mlrun.feature_store.html#mlrun.feature_store.steps.DropFeatures) | Drop features from feature list. | Y | Y | Y | -| [mlrun.feature_store.Imputer](../api/mlrun.feature_store.html#mlrun.feature_store.steps.Imputer) | Replace None values with default values. | Y | Y | Y | -| [mlrun.feature_store.MapValues](../api/mlrun.feature_store.html#mlrun.feature_store.steps.MapValues) | Map column values to new values. | Y | Y | Y | -| [mlrun.feature_store.OneHotEncoder](../api/mlrun.feature_store.html#mlrun.feature_store.steps.OneHotEncoder) | Create new binary fields, one per category (one hot encoded). | Y | Y | Y | -| [mlrun.feature_store.SetEventMetadata](../api/mlrun.feature_store.html#mlrun.feature_store.steps.SetEventMetadata) | Set the event metadata (id, key, timestamp) from the event body. | Y | N | N | -| [mlrun.feature_store.steps.FeaturesetValidator](../../api/mlrun.feature_store.html#mlrun.feature_store.steps.FeaturesetValidator) | Validate feature values according to the feature set validation policy | Y | N | Y | - ## External IO and data enrichment @@ -62,29 +46,6 @@ The following table lists the available data-transformation steps. The next tabl | [storey.transformations.SendToHttp](https://storey.readthedocs.io/en/latest/api.html#storey.transformations.SendToHttp) | Joins each event with data from any HTTP source. Used for event augmentation. | -## Sources -| Class name | Description | storey | spark | pandas | -| -------------------------------------------------- | --------------------------------- | --- | --- | --- | -| [mlrun.datastore.BigQuerySource](../api/mlrun.datastore.html#mlrun.datastore.BigQuerySource) | Reads Google BigQuery query results as input source for a flow. | N | Y | Y | -| mlrun.datastore.SnowFlakeSource | Reads Snowflake query results as input source for a flow. | N | Y | N | -| mlrun.datastore.SQLSource | Reads SQL query results as input source for a flow. | Y | N | Y | -| [mlrun.datastore.CSVSource](https://storey.readthedocs.io/en/latest/api.html#storey.sources.CSVSource) | Reads a CSV file as input source for a flow. | Y | Y | Y | -| [storey.sources.DataframeSource](https://storey.readthedocs.io/en/latest/api.html#storey.sources.DataframeSource) | Reads data frame as input source for a flow. | Y | N | N | -| [mlrun.datastore.HttpSource](../api/mlrun.datastore.html#mlrun.datastore.HttpSource) | Sets the HTTP-endpoint source for the flow. | Y | N | N | -| [mlrun.datastore.KafkaSource](../api/mlrun.datastore.html#mlrun.datastore.KafkaSource) | Sets the kafka source for the flow. | Y | N | N | -| [mlrun.datastore.ParquetSource](https://storey.readthedocs.io/en/latest/api.html#storey.sources.ParquetSource) | Reads the Parquet file/dir as the input source for a flow. | Y | Y | Y | -| [mlrun.datastore.StreamSource](../api/mlrun.datastore.html#mlrun.datastore.StreamSource) | Sets the stream source for the flow. If the stream doesn’t exist it creates it. | Y | N | N | - -## Targets -| Class name | Description | storey | spark | pandas | -| -------------------------------------------------- | ------------------------------------------------------- | --- | --- | --- | -| [mlrun.datastore.CSVTarget](https://storey.readthedocs.io/en/latest/api.html#storey.targets.CSVTarget) | Writes events to a CSV file. | Y | Y | Y | -| [mlrun.datastore.NoSqlTarget](https://storey.readthedocs.io/en/latest/api.html#storey.targets.NoSqlTarget) | Persists the data in V3IO table to its associated storage by key. | Y | Y | Y | -| mlrun.datastore.RedisNoSqlTarget | Persists the data in Redis table to its associated storage by key. | Y | Y | N | -| mlrun.datastore.SqlTarget | Persists the data in SQL table to its associated storage by key. | Y | N | Y | -| [mlrun.datastore.ParquetTarget](https://storey.readthedocs.io/en/latest/api.html#storey.targets.ParquetTarget) | The Parquet target storage driver, used to materialize feature set/vector data into parquet files. | Y | Y | Y | -| [mlrun.datastore.StreamTarget](https://storey.readthedocs.io/en/latest/api.html#storey.targets.StreamTarget) | Writes all incoming events into a V3IO stream. | Y | N | N | - ## Models | Class name | Description | |--------------------------------------------------|----------------------------------------------------------| From 3e3b60cef618ba5d3fbf9baa9430b333dd823f74 Mon Sep 17 00:00:00 2001 From: Assaf Ben-Amitai Date: Tue, 20 Feb 2024 17:15:46 +0200 Subject: [PATCH 046/119] [Docs] Update Sources and targets (#5173) [1.6.x] (#5175) --- docs/feature-store/sources-targets.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/feature-store/sources-targets.md b/docs/feature-store/sources-targets.md index 37b0ece6aef..a13d467c90b 100644 --- a/docs/feature-store/sources-targets.md +++ b/docs/feature-store/sources-targets.md @@ -48,9 +48,11 @@ If the data being ingested spans over more than 1024 partitions, the ingestion f Decrease the number of partitions by filtering the time (for example, using start_filter/end_filter of the {py:meth}`~mlrun.datastore.ParquetSource`), and/or increasing the `time_partitioning_granularity`. -storey processes the data row by row (as a streaming engine, it doesn't get all the data up front, so it needs to process row by row). +Storey processes the data row by row (as a streaming engine, it doesn't get all the data up front, so it needs to process row by row). These rows are batched together according to the partitions defined, and they are -written to each partition separately. (Therefore, storey does not have the 1024 limitation.) +written to each partition separately. (Therefore, storey does not have the 1024 partitions limitation.) + +Spark does not have the partitions limitation, either. Configure partitioning with: - `partitioned` — Optional. Whether to partition the file. False by default. If True without passing any other partition fields, the data is partitioned by /year/month/day/hour. From 3cdf5a676f74acde2bcd5207cd6b75f104f10ce2 Mon Sep 17 00:00:00 2001 From: Assaf Ben-Amitai Date: Thu, 22 Feb 2024 22:06:16 +0200 Subject: [PATCH 047/119] [Docs] Update sources targets [1.6.x] (#5188) --- docs/data-prep/ingest-data-fs.md | 173 +-------------------- docs/feature-store/sources-targets.md | 216 ++++++++++++++++++++++---- 2 files changed, 191 insertions(+), 198 deletions(-) diff --git a/docs/data-prep/ingest-data-fs.md b/docs/data-prep/ingest-data-fs.md index 8bced6fe53d..b7471c576f1 100644 --- a/docs/data-prep/ingest-data-fs.md +++ b/docs/data-prep/ingest-data-fs.md @@ -27,11 +27,10 @@ one of: count, sum, sqr, max, min, first, last, avg, stdvar, stddev. E.g. x_coun - [Ingest data using an MLRun job](#ingest-data-using-an-mlrun-job) - [Real-time ingestion](#real-time-ingestion) - [Incremental ingestion](#incremental-ingestion) -- [Data sources](#data-sources) -- [Target stores](#target-stores) **See also**: - {ref}`feature-sets` +- {ref}`sources-targets` ## Verify a feature set with a small dataset by inferring data @@ -159,174 +158,4 @@ target from the previous ingest is not deleted. For the storey and pandas ingestion engines, the feature is currently implemented for ParquetSource only (CsvSource will be supported in a future release). For Spark engine both ParquetSource and CsvSource are supported. -## Data sources -For batch ingestion the feature store supports dataframes and files (i.e. csv & parquet).
    -The files can reside on S3, NFS, SQL (for example, MYSQL), Azure blob storage, or the Iguazio platform. MLRun also supports Google BigQuery as a data source. - -For real time ingestion the source can be http, Kafka, MySQL, or V3IO stream, etc. -When defining a source, it maps to nuclio event triggers.
    - -You can also create a custom `source` to access various databases or data sources. - -### S3/Azure data source - -When working with S3/Azure, there are additional requirements. Use: pip install mlrun[s3]; pip install mlrun[azure-blob-storage]; -or pip install mlrun[google-cloud-storage] to install them. -- Azure: define the environment variable `AZURE_STORAGE_CONNECTION_STRING` -- S3: define `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` and `AWS_BUCKET` - -### SQL data source - -```{admonition} Note -Tech Preview -``` -```{admonition} Limitation -Do not use SQL reserved words as entity names. See more details in [Keywords and Reserved Words](https://dev.mysql.com/doc/refman/8.0/en/keywords.html). -``` -`SQLSource` can be used for both batch ingestion and real time ingestion. It supports storey but does not support Spark. To configure -either, pass the `db_uri` or overwrite the `MLRUN_SQL__URL` env var, in this format:
    -`mysql+pymysql://:@:/`, for example: - -``` -source = SQLSource(table_name='my_table', - db_path="mysql+pymysql://abc:abc@localhost:3306/my_db", - key_field='key', - parse_dates=['timestamp']) - - feature_set = fs.FeatureSet("my_fs", entities=[fs.Entity('key')],) - feature_set.set_targets([]) - df = fs.ingest(feature_set, source=source) -``` - -### Apache Kafka data source - -Example: - -``` -from mlrun.datastore.sources import KafkaSource - - -with open('/v3io/bigdata/name.crt') as x: - caCert = x.read() -caCert - -kafka_source = KafkaSource( - brokers=['default-tenant.app.vmdev76.lab.iguazeng.com:9092'], - topics="stocks-topic", - initial_offset="earliest", - group="my_group", - ) - -run_config = fstore.RunConfig(local=False).apply(mlrun.auto_mount()) - -stocks_set_endpoint = stocks_set.deploy_ingestion_service(source=kafka_source,run_config=run_config) -``` - - -### Confluent Kafka data source - -```{admonition} Note -Tech Preview -``` - -Example: - -``` -from mlrun.datastore.sources import KafkaSource - - -with open('/v3io/bigdata/name.crt') as x: - caCert = x.read() -caCert - - -kafka_source = KafkaSource( - brokers=['server-1:9092', - 'server-2:9092', - 'server-3:9092', - 'server-4:9092', - 'server-5:9092'], - topics=["topic-name"], - initial_offset="earliest", - group="test", - attributes={"sasl" : { - "enable": True, - "password" : "pword", - "user" : "user", - "handshake" : True, - "mechanism" : "SCRAM-SHA-256"}, - "tls" : { - "enable": True, - "insecureSkipVerify" : False - }, - "caCert" : caCert} - ) - -run_config = fstore.RunConfig(local=False).apply(mlrun.auto_mount()) - -stocks_set_endpoint = stocks_set.deploy_ingestion_service(source=kafka_source,run_config=run_config) -``` - - -## Target stores - -By default, the feature sets are saved in parquet and the Iguazio NoSQL DB ({py:class}`~mlrun.datastore.NoSqlTarget`).
    -The Parquet file is ideal for fetching large set of data for training while the key value is ideal for an online application -since it supports low latency data retrieval based on key access. - -```{admonition} Note -When working with the Iguazio MLOps platform the default feature set storage location is under the "Projects" container: `/fs/..` folder. -The default location can be modified in mlrun config or specified per ingest operation. The parquet/csv files can be stored in -NFS, S3, Azure blob storage, Redis, SQL, and on Iguazio DB/FS. -``` - -### Redis target store - -```{admonition} Note -Tech Preview -``` - -The Redis online target is called, in MLRun, `RedisNoSqlTarget`. The functionality of the `RedisNoSqlTarget` is identical to the `NoSqlTarget` except for: -- The RedisNoSqlTarget accepts the path parameter in the form: `://[:port]` -For example: `rediss://localhost:6379` creates a redis target, where: - - The client/server protocol (rediss) is TLS protected (vs. "redis" if no TLS is established) - - The server location is localhost port 6379. -- If the path parameter is not set, it tries to fetch it from the MLRUN_REDIS__URL environment variable. -- You cannot pass the username/password as part of the URL. If you want to provide the username/password, use secrets as: -`REDIS_USER REDIS_PASSWORD` where \ is the optional RedisNoSqlTarget `credentials_prefix` parameter. -- Two types of Redis servers are supported: StandAlone and Cluster (no need to specify the server type in the config). -- A feature set supports one online target only. Therefore `RedisNoSqlTarget` and `NoSqlTarget` cannot be used as two targets of the same feature set. - -The K8s secrets are not available when executing locally (from the sdk). Therefore, if RedisNoSqlTarget with secret is used, -You must add the secret as an env-var. - -To use the Redis online target store, you can either change the default to be parquet and Redis, or you can specify the Redis target -explicitly each time with the path parameter, for example:
    -`RedisNoSqlTarget(path ="redis://1.2.3.4:6379")` - -### SQL target store - -```{admonition} Note -Tech Preview -``` -```{admonition} Limitation -Do not use SQL reserved words as entity names. See more details in [Keywords and Reserved Words](https://dev.mysql.com/doc/refman/8.0/en/keywords.html). -``` -The `SQLTarget` online target supports storey but does not support Spark. Aggregations are not supported.
    -To configure, pass the `db_uri` or overwrite the `MLRUN_SQL__URL` env var, in this format:
    -`mysql+pymysql://:@:/` - -You can pass the schema and the name of the table you want to create or the name of an existing table, for example: - -``` - target = SQLTarget( - table_name='my_table', - schema= {'id': string, 'age': int, 'time': pd.Timestamp, ...} - create_table=True, - primary_key_column='id', - parse_dates=["time"], - ) -feature_set = fs.FeatureSet("my_fs", entities=[fs.Entity('id')],) -fs.ingest(feature_set, source=df, targets=[target]) -``` \ No newline at end of file diff --git a/docs/feature-store/sources-targets.md b/docs/feature-store/sources-targets.md index a13d467c90b..3ee290c2cd3 100644 --- a/docs/feature-store/sources-targets.md +++ b/docs/feature-store/sources-targets.md @@ -1,37 +1,151 @@ (sources-targets)= # Sources and targets - - [Sources](#sources) - [Targets](#targets) -- [ParquetTarget](#parquettarget) -- [NoSql target](#nosql-target) - - # Sources -| Class name | Description | storey | spark | pandas | -| -------------------------------------------------- | --------------------------------- | --- | --- | --- | -| [mlrun.datastore.BigQuerySource](../api/mlrun.datastore.html#mlrun.datastore.BigQuerySource) | Reads Google BigQuery query results as input source for a flow ("batch" source). | N | Y | Y | -| mlrun.datastore.SnowFlakeSource | Reads Snowflake query results as input source for a flow ("batch" source). | N | Y | N | -| mlrun.datastore.SQLSource | Reads SQL query results as input source for a flow ("batch" source). | Y | N | Y | -| [mlrun.datastore.CSVSource](https://storey.readthedocs.io/en/latest/api.html#storey.sources.CSVSource) | Reads a CSV file as input source for a flow ("batch" source). | Y | Y | Y | -| [storey.sources.DataframeSource](https://storey.readthedocs.io/en/latest/api.html#storey.sources.DataframeSource) | Reads data frame as input source for a flow ("batch" source). | Y | N | N | -| [mlrun.datastore.HttpSource](../api/mlrun.datastore.html#mlrun.datastore.HttpSource) | Sets the HTTP-endpoint source for the flow (event-based source). | Y | N | N | -| [mlrun.datastore.KafkaSource](../api/mlrun.datastore.html#mlrun.datastore.KafkaSource) | Sets the kafka source for the flow (event-based source). | Y | N | N | -| [mlrun.datastore.ParquetSource](https://storey.readthedocs.io/en/latest/api.html#storey.sources.ParquetSource) | Reads the Parquet file/dir as the input source for a flow ("batch" source). | Y | Y | Y | -| [mlrun.datastore.StreamSource](../api/mlrun.datastore.html#mlrun.datastore.StreamSource) | Sets the stream source for the flow. If the stream doesn’t exist it creates it (event-based source). | Y | N | N | + +For batch ingestion the feature store supports dataframes and files (i.e. csv & parquet).
    +The files can reside on S3, NFS, SQL (for example, MYSQL), Azure blob storage, or the Iguazio platform. MLRun also supports Google BigQuery as a data source. + +For real time ingestion the source can be http, Kafka, MySQL, or V3IO stream, etc. +When defining a source, it maps to nuclio event triggers.
    + +You can also create a custom `source` to access various databases or data sources. + +| Class name | Description | storey | spark | pandas | +| -------------------------------------------------- | --------------------------------- | --- | --- | --- | +| [BigQuerySource](../api/mlrun.datastore.html#mlrun.datastore.BigQuerySource) | Batch. Reads Google BigQuery query results as input source for a flow.| N | Y | Y | +| SnowFlakeSource | Batch. Reads Snowflake query results as input source for a flow | N | Y | N | +| [SQLSource](#sql-data-source) | Batch. Reads SQL query results as input source for a flow | Y | N | Y | +| [CSVSource](https://storey.readthedocs.io/en/latest/api.html#storey.sources.CSVSource) | Batch. Reads a CSV file as input source for a flow. | Y | Y | Y | +| [DataframeSource](https://storey.readthedocs.io/en/latest/api.html#storey.sources.DataframeSource) | Batch. Reads data frame as input source for a flow. | Y | N | N | +| [ParquetSource](https://storey.readthedocs.io/en/latest/api.html#storey.sources.ParquetSource) | Batch. Reads the Parquet file/dir as the input source for a flow. | Y | Y | Y | +| [HttpSource](../api/mlrun.datastore.html#mlrun.datastore.HttpSource) |Event-based. Sets the HTTP-endpoint source for the flow. | Y | N | N | +| [Apache Kafka source](#apache-kafka-source) and [Confluent Kafka source](#confluent-kafka-source)|Event-based. Sets the kafka source for the flow. | Y | N | N | +| [StreamSource](../api/mlrun.datastore.html#mlrun.datastore.StreamSource) |Event-based. Sets the stream source for the flow. If the stream doesn’t exist it creates it. | Y | N | N | + +## S3/Azure source + +When working with S3/Azure, there are additional requirements. Use: pip install mlrun[s3]; pip install mlrun[azure-blob-storage]; +or pip install mlrun[google-cloud-storage] to install them. +- Azure: define the environment variable `AZURE_STORAGE_CONNECTION_STRING` +- S3: define `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` and `AWS_BUCKET` + +## SQL source + +```{admonition} Note +Tech Preview +``` +```{admonition} Limitation +Do not use SQL reserved words as entity names. See more details in [Keywords and Reserved Words](https://dev.mysql.com/doc/refman/8.0/en/keywords.html). +``` +`SQLSource` can be used for both batch ingestion and real time ingestion. It supports storey but does not support Spark. To configure +either, pass the `db_uri` or overwrite the `MLRUN_SQL__URL` env var, in this format:
    +`mysql+pymysql://:@:/`, for example: + +``` +source = SQLSource(table_name='my_table', + db_path="mysql+pymysql://abc:abc@localhost:3306/my_db", + key_field='key', + parse_dates=['timestamp']) + + feature_set = fs.FeatureSet("my_fs", entities=[fs.Entity('key')],) + feature_set.set_targets([]) + df = fs.ingest(feature_set, source=source) +``` + +## Apache Kafka source + +Example: + +``` +from mlrun.datastore.sources import KafkaSource + + +with open('/v3io/bigdata/name.crt') as x: + caCert = x.read() +caCert + +kafka_source = KafkaSource( + brokers=['default-tenant.app.vmdev76.lab.iguazeng.com:9092'], + topics="stocks-topic", + initial_offset="earliest", + group="my_group", + ) + +run_config = fstore.RunConfig(local=False).apply(mlrun.auto_mount()) + +stocks_set_endpoint = stocks_set.deploy_ingestion_service(source=kafka_source,run_config=run_config) +``` + +## Confluent Kafka source + +```{admonition} Note +Tech Preview +``` +Example: + +``` +from mlrun.datastore.sources import KafkaSource + + +with open('/v3io/bigdata/name.crt') as x: + caCert = x.read() +caCert + + +kafka_source = KafkaSource( + brokers=['server-1:9092', + 'server-2:9092', + 'server-3:9092', + 'server-4:9092', + 'server-5:9092'], + topics=["topic-name"], + initial_offset="earliest", + group="test", + attributes={"sasl" : { + "enable": True, + "password" : "pword", + "user" : "user", + "handshake" : True, + "mechanism" : "SCRAM-SHA-256"}, + "tls" : { + "enable": True, + "insecureSkipVerify" : False + }, + "caCert" : caCert} + ) + +run_config = fstore.RunConfig(local=False).apply(mlrun.auto_mount()) + +stocks_set_endpoint = stocks_set.deploy_ingestion_service(source=kafka_source,run_config=run_config) +``` # Targets -| Class name | Description | storey | spark | pandas | -| -------------------------------------------------- | ------------------------------------------------------- | --- | --- | --- | -| [mlrun.datastore.CSVTarget](https://storey.readthedocs.io/en/latest/api.html#storey.targets.CSVTarget) | Writes events to a CSV file (offline target). | Y | Y | Y | -| [mlrun.datastore.KafkaTarget](https://storey.readthedocs.io/en/latest/api.html#storey.targets.KafkaTarget) | Writes all incoming events into a Kafka stream (online target). | Y | N | N | -| [mlrun.datastore.NoSqlTarget](https://storey.readthedocs.io/en/latest/api.html#storey.targets.NoSqlTarget) | The default online target. Persists the data in V3IO table to its associated storage by key (online target). | Y | Y | Y | -| mlrun.datastore.RedisNoSqlTarget | Persists the data in Redis table to its associated storage by key (online target). | Y | Y | N | -| mlrun.datastore.SqlTarget | The default offline target. Persists the data in SQL table to its associated storage by key (offline target). | Y | N | Y | -| [mlrun.datastore.ParquetTarget](https://storey.readthedocs.io/en/latest/api.html#storey.targets.ParquetTarget)| The Parquet target storage driver, used to materialize feature set/vector data into parquet files (online target). | Y | Y | Y | -| [mlrun.datastore.StreamTarget](https://storey.readthedocs.io/en/latest/api.html#storey.targets.StreamTarget) | Writes all incoming events into a V3IO stream (offline target). | Y | N | N | + +By default, the feature sets are saved in parquet and the Iguazio NoSQL DB ({py:class}`~mlrun.datastore.NoSqlTarget`).
    +The Parquet file is ideal for fetching large set of data for training while the key value is ideal for an online application +since it supports low latency data retrieval based on key access. + +```{admonition} Note +When working with the Iguazio MLOps platform the default feature set storage location is under the "Projects" container: `/fs/..` folder. +The default location can be modified in mlrun config or specified per ingest operation. The parquet/csv files can be stored in +NFS, S3, Azure blob storage, Redis, SQL, and on Iguazio DB/FS. +``` + + +| Class name | Description | storey | spark | pandas | +| -------------------------------------------------- | -------------------------------------------------------| --- | --- | --- | +| [CSVTarget](https://storey.readthedocs.io/en/latest/api.html#storey.targets.CSVTarget) |Offline. Writes events to a CSV file. | Y | Y | Y | +| [KafkaTarget](https://storey.readthedocs.io/en/latest/api.html#storey.targets.KafkaTarget) |Offline. Writes all incoming events into a Kafka stream. | Y | N | N | +| [ParquetTarget](#parquettarget) |Offline. The Parquet target storage driver, used to materialize feature set/vector data into parquet files. | Y | Y | Y | +| [StreamTarget](https://storey.readthedocs.io/en/latest/api.html#storey.targets.StreamTarget) |Offline. Writes all incoming events into a V3IO stream. | Y | N | N | +| [NoSqlTarget](#nosql-target) |Online. Persists the data in V3IO table to its associated storage by key . | Y | Y | Y | +| [RedisNoSqlTarget](#redis-target) |Online. Persists the data in Redis table to its associated storage by key. | Y | Y | N | +| [SqlTarget](#sql-target) |Online. Persists the data in SQL table to its associated storage by key. | Y | N | Y | + ## ParquetTarget @@ -72,7 +186,57 @@ Disable partitioning with: ## NoSql target -The {py:meth}`~mlrun.datastore.NoSqlTarget` is a V3IO key-value based target. It is the default target for real-time data. +The {py:meth}`~mlrun.datastore.NoSqlTarget` is a V3IO key-value based target. It is the default target for online (real-time) data. It supports low latency data retrieval based on key access, making it ideal for online applications. The combination of a NoSQL target with the storey engine does not support features of type string with a value containing both quote (') and double-quote ("). + +## Redis target + +```{admonition} Note +Tech Preview +``` + +The Redis online target is called, in MLRun, `RedisNoSqlTarget`. The functionality of the `RedisNoSqlTarget` is identical to the `NoSqlTarget` except for: +- The RedisNoSqlTarget accepts the path parameter in the form: `://[:port]` +For example: `rediss://localhost:6379` creates a redis target, where: + - The client/server protocol (rediss) is TLS protected (vs. "redis" if no TLS is established) + - The server location is localhost port 6379. +- If the path parameter is not set, it tries to fetch it from the MLRUN_REDIS__URL environment variable. +- You cannot pass the username/password as part of the URL. If you want to provide the username/password, use secrets as: +`REDIS_USER REDIS_PASSWORD` where \ is the optional RedisNoSqlTarget `credentials_prefix` parameter. +- Two types of Redis servers are supported: StandAlone and Cluster (no need to specify the server type in the config). +- A feature set supports one online target only. Therefore `RedisNoSqlTarget` and `NoSqlTarget` cannot be used as two targets of the same feature set. + +The K8s secrets are not available when executing locally (from the sdk). Therefore, if RedisNoSqlTarget with secret is used, +You must add the secret as an env-var. + +To use the Redis online target store, you can either change the default to be parquet and Redis, or you can specify the Redis target +explicitly each time with the path parameter, for example:
    +`RedisNoSqlTarget(path ="redis://1.2.3.4:6379")` + +## SQL target + +```{admonition} Note +Tech Preview +``` +```{admonition} Limitation +Do not use SQL reserved words as entity names. See more details in [Keywords and Reserved Words](https://dev.mysql.com/doc/refman/8.0/en/keywords.html). +``` +The `SQLTarget` online target supports storey but does not support Spark. Aggregations are not supported.
    +To configure, pass the `db_uri` or overwrite the `MLRUN_SQL__URL` env var, in this format:
    +`mysql+pymysql://:@:/` + +You can pass the schema and the name of the table you want to create or the name of an existing table, for example: + +``` + target = SQLTarget( + table_name='my_table', + schema= {'id': string, 'age': int, 'time': pd.Timestamp, ...} + create_table=True, + primary_key_column='id', + parse_dates=["time"], + ) +feature_set = fs.FeatureSet("my_fs", entities=[fs.Entity('id')],) +fs.ingest(feature_set, source=df, targets=[target]) +``` \ No newline at end of file From 0b696d7b3f0f18f0a759ecd6565309dc36085e89 Mon Sep 17 00:00:00 2001 From: Alon Maor <48641682+alonmr@users.noreply.github.com> Date: Sun, 25 Feb 2024 09:04:27 +0200 Subject: [PATCH 048/119] [Docs] Backport documentation updates [1.6.x] (#5189) --- README.md | 10 +- docs/cheat-sheet.md | 8 +- docs/cli.md | 261 +++++++++--------- docs/conf.py | 2 +- docs/data-prep/index.md | 1 - docs/data-prep/ingest-data-fs.md | 7 + docs/data-prep/logging_datasets.md | 19 +- .../01-ingest-datasources.ipynb | 2 +- .../03-deploy-serving-model.ipynb | 14 +- docs/feature-store/feature-sets.md | 7 +- docs/feature-store/feature-store-overview.md | 10 + docs/feature-store/feature-vectors.md | 4 +- docs/feature-store/sources-targets.md | 5 +- docs/feature-store/transformations.md | 4 +- docs/glossary.md | 12 +- docs/index.md | 36 +-- docs/install.md | 2 + docs/install/remote.md | 4 +- .../initial-setup-configuration.ipynb | 16 +- .../automate-project-git-source.ipynb | 6 +- docs/projects/project-setup.md | 1 + docs/runtimes/configuring-job-resources.md | 3 +- docs/runtimes/create-and-use-functions.ipynb | 8 +- docs/runtimes/dask-mlrun.ipynb | 2 +- docs/runtimes/databricks.ipynb | 130 ++++++++- docs/runtimes/image-build.md | 6 + docs/runtimes/images.md | 11 +- docs/runtimes/load-from-hub.md | 8 +- docs/runtimes/serving-function.md | 61 +++- docs/serving/custom-model-serving-class.md | 6 +- docs/serving/graph-ha-cfg.md | 2 +- docs/store/artifacts.md | 2 + docs/store/datastore.md | 2 +- mlrun/runtimes/serving.py | 3 +- 34 files changed, 454 insertions(+), 221 deletions(-) diff --git a/README.md b/README.md index 43bcafd7991..3a52230ba4b 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,7 @@ In MLRun the assets, metadata, and services (data, functions, jobs, artifacts, m Projects can be imported/exported as a whole, mapped to git repositories or IDE projects (in PyCharm, VSCode, etc.), which enables versioning, collaboration, and CI/CD. Project access can be restricted to a set of users and roles. -See: **Docs:** [Projects and Automation](https://docs.mlrun.org/en/latest/projects/project.html), [CI/CD Integration](https://docs.mlrun.org/en/latest/projects/ci-integration.html), **Tutorials:** [Quick start](https://docs.mlrun.org/en/latest/tutorials/01-mlrun-basics.html), [Automated ML Pipeline](https://docs.mlrun.org/en/latest/tutorials/04-pipeline.html), **Video:** [quick start](https://youtu.be/xI8KVGLlj7Q). +See: **Docs:** [Projects and Automation](https://docs.mlrun.org/en/latest/projects/project.html), [CI/CD Integration](https://docs.mlrun.org/en/latest/projects/ci-integration.html), **Tutorials:** [Quick start](https://docs.mlrun.org/en/latest/tutorials/01-mlrun-basics.html), [Automated ML Pipeline](https://docs.mlrun.org/en/latest/tutorials/04-pipeline.html), **Video:** [Quick start](https://youtu.be/xI8KVGLlj7Q). ### Ingest and process data @@ -46,13 +46,13 @@ See: **Docs:** [Ingest and process data](https://docs.mlrun.org/en/latest/data-p MLRun allows you to easily build ML pipelines that take data from various sources or the Feature Store and process it, train models at scale with multiple parameters, test models, tracks each experiments, register, version and deploy models, etc. MLRun provides scalable built-in or custom model training services, integrate with any framework and can work with 3rd party training/auto-ML services. You can also bring your own pre-trained model and use it in the pipeline. -See: **Docs:** [Develop and train models](https://docs.mlrun.org/en/latest/development/index.html), [Model Training and Tracking](https://docs.mlrun.org/en/latest/development/model-training-tracking.html), [Batch Runs and Workflows](https://docs.mlrun.org/en/latest/concepts/runs-workflows.html); **Tutorials:** [Train & Eval Models](https://docs.mlrun.org/en/latest/tutorials/02-model-training.html), [Automated ML Pipeline](https://docs.mlrun.org/en/latest/tutorials/04-pipeline.html); **Video:** [Training models](https://youtu.be/bZgBsmLMdQo). +See: **Docs:** [Develop and train models](https://docs.mlrun.org/en/latest/development/index.html), [Model Training and Tracking](https://docs.mlrun.org/en/latest/development/model-training-tracking.html), [Batch Runs and Workflows](https://docs.mlrun.org/en/latest/concepts/runs-workflows.html); **Tutorials:** [Train, compare, and register models](https://docs.mlrun.org/en/latest/tutorials/02-model-training.html), [Automated ML Pipeline](https://docs.mlrun.org/en/latest/tutorials/04-pipeline.html); **Video:** [Train and compare models](https://youtu.be/bZgBsmLMdQo). ### Deploy models and applications MLRun rapidly deploys and manages production-grade real-time or batch application pipelines using elastic and resilient serverless functions. MLRun addresses the entire ML application: intercepting application/user requests, running data processing tasks, inferencing using one or more models, driving actions, and integrating with the application logic. -See: **Docs:** [Deploy models and applications](https://docs.mlrun.org/en/latest/deployment/index.html), [Realtime Pipelines](https://docs.mlrun.org/en/latest/serving/serving-graph.html), [Batch Inference](https://docs.mlrun.org/en/latest/concepts/TBD.html), **Tutorials:** [Realtime Serving](https://docs.mlrun.org/en/latest/tutorials/03-model-serving.html), [Batch Inference](https://docs.mlrun.org/en/latest/tutorials/07-batch-infer.html), [Advanced Pipeline](https://docs.mlrun.org/en/latest/tutorials/07-batch-infer.html); **Video:** [Serving models](https://youtu.be/OUjOus4dZfw). +See: **Docs:** [Deploy models and applications](https://docs.mlrun.org/en/latest/deployment/index.html), [Realtime Pipelines](https://docs.mlrun.org/en/latest/serving/serving-graph.html), [Batch Inference](https://docs.mlrun.org/en/latest/deployment/batch_inference.html), **Tutorials:** [Realtime Serving](https://docs.mlrun.org/en/latest/tutorials/03-model-serving.html), [Batch Inference](https://docs.mlrun.org/en/latest/tutorials/07-batch-infer.html), [Advanced Pipeline](https://docs.mlrun.org/en/latest/tutorials/07-batch-infer.html); **Video:** [Serving pre-trained models](https://youtu.be/OUjOus4dZfw). ### Monitor and alert @@ -70,9 +70,9 @@ MLRun includes the following major components: [**Project Management:**](https://docs.mlrun.org/en/latest/projects/project.html) A service (API, SDK, DB, UI) that manages the different project assets (data, functions, jobs, workflows, secrets, etc.) and provides central control and metadata layer. -[**Serverless Functions:**](https://docs.mlrun.org/en/latest/runtimes/functions.html) automatically deployed software package with one or more methods and runtime-specific attributes (such as image, libraries, command, arguments, resources, etc.). +[**Functions:**](https://docs.mlrun.org/en/latest/runtimes/functions.html) automatically deployed software package with one or more methods and runtime-specific attributes (such as image, libraries, command, arguments, resources, etc.). -[**Data & Artifacts:**](https://docs.mlrun.org/en/latest/concepts/data-feature-store.html) Glueless connectivity to various data sources, metadata management, catalog, and versioning for structures/unstructured artifacts. +[**Data & Artifacts:**](https://docs.mlrun.org/en/latest/concepts/data.html) Glueless connectivity to various data sources, metadata management, catalog, and versioning for structures/unstructured artifacts. [**Feature Store:**](https://docs.mlrun.org/en/latest/feature-store/feature-store.html) automatically collects, prepares, catalogs, and serves production data features for development (offline) and real-time (online) deployment using minimal engineering effort. diff --git a/docs/cheat-sheet.md b/docs/cheat-sheet.md index a671582a6fe..873a00f1ff7 100644 --- a/docs/cheat-sheet.md +++ b/docs/cheat-sheet.md @@ -711,11 +711,11 @@ Docs: [Feature store overview](./feature-store/feature-store-overview.html) ### Engines -Docs: [Ingest data using the feature store](./data-prep/ingest-data-fs.html), [Ingest features with Spark](./feature-store/using-spark-engine.html) +Docs: {ref}`feature-store-overview`, [Ingest features with Spark](./feature-store/using-spark-engine.html) -- `storey` engine (default) is designed for real-time data (e.g. individual records) that will be transformed using Python functions and classes -- `pandas` engine is designed for batch data that can fit into memory that will be transformed using Pandas dataframes -- `spark` engine is designed for batch data that cannot fit into memory that will be transformed using Spark dataframes +- `storey` engine (default) is designed for real-time data (e.g. individual records) that will be transformed using Python functions and classes. +- `pandas` engine is designed for batch data that can fit into memory that will be transformed using Pandas dataframes. Pandas is used for testing, and is not recommended for production deployments +- `spark` engine is designed for batch data. ### Feature sets diff --git a/docs/cli.md b/docs/cli.md index 53082cdc7d7..b98e034d6fc 100644 --- a/docs/cli.md +++ b/docs/cli.md @@ -31,25 +31,25 @@ Usage: mlrun build [OPTIONS] FUNC_URL Example: `mlrun build myfunc.yaml` -| Flag | Description | -|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| −−name TEXT | Function name | -| −−project TEXT | Project name | -| −−tag TEXT | Function tag | -| &minusi, −−image TEXT | Target image path | -| &minuss, −−source TEXT | Path/URL of the function source code. A PY file, or if `-a/--archive` is specified, a directory to archive. (Default: './') | -| &minusb, −−base-image TEXT | Base Docker image | -| &minusc, −−command TEXT | Build commands; for example, '-c pip install pandas' | -| −−secret−name TEXT | Name of a container-registry secret | -| &minusa, −−archive TEXT | Path/URL of a target function-sources archive directory: as part of the build, the function sources (see `-s/--source`) are archived into a TAR file and then extracted into the archive directory | -| −−silent | Do not show build logs | -| −−with−mlrun | Add the MLRun package ("mlrun") | -| −−db TEXT | Save the run results to path or DB url | -| &minusr, −−runtime TEXT | Function spec dict, for pipeline usage | -| −−kfp | Running inside Kubeflow Piplines, do not use | -| −−skip | Skip if already deployed | -| −−env−file TEXT | Path to .env file to load config/variables from | -| −−ensure−project | Ensure the project exists, if not, create project | +| Flag | Description | +|-----------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| \--name TEXT | Function name | +| \--project TEXT | Project name | +| \--tag TEXT | Function tag | +| -i, --image TEXT | Target image path | +| -s, --source TEXT | Path/URL of the function source code. A PY file, or if `-a/--archive` is specified, a directory to archive. (Default: './') | +| -b, --base-image TEXT | Base Docker image | +| -c, --command TEXT | Build commands; for example, '-c pip install pandas' | +| \--secret-name TEXT | Name of a container-registry secret | +| -a, --archive TEXT | Path/URL of a target function-sources archive directory: as part of the build, the function sources (see `-s/--source`) are archived into a TAR file and then extracted into the archive directory | +| \--silent | Do not show build logs | +| \--with-mlrun | Add the MLRun package ("mlrun") | +| \--db TEXT | Save the run results to path or DB url | +| -r, --runtime TEXT | Function spec dict, for pipeline usage | +| \--kfp | Running inside Kubeflow Piplines, do not use | +| \--skip | Skip if already deployed | +| \--env-file TEXT | Path to .env file to load config/variables from | +| \--ensure-project | Ensure the project exists, if not, create project | > **Note:** For information about using the `-a|--archive` option to create a function-sources archive, see [Using a Sources Archive](#sources-archive) later in this tutorial. @@ -68,18 +68,18 @@ Examples: - Clean resources for specific job (by uid): `mlrun clean mpijob 15d04c19c2194c0a8efb26ea3017254b` -| Flag | Description | -|--------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| −−kind | Clean resources for all runs of a specific kind (e.g. job). | -| −−id | Delete the resources of the mlrun object with this identifier. For most function runtimes, runtime resources are per run, and the identifier is the run’s UID. For DASK runtime, the runtime resources are per function, and the identifier is the function’s name. | +| Flag | Description | +|---------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| \--kind | Clean resources for all runs of a specific kind (e.g. job). | +| \--id | Delete the resources of the mlrun object with this identifier. For most function runtimes, runtime resources are per run, and the identifier is the run’s UID. For DASK runtime, the runtime resources are per function, and the identifier is the function’s name. | -| Options | Description | -|-----------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------| -| −−api TEXT | URL of the mlrun-api service. | -| −ls, −−label−selector TEXT | Delete only the runtime resources matching the label selector. | -| −f, −−force | Delete the runtime resource even if they're not in terminal state or if the grace period didn’t pass. | -| −gp, −−grace−period INTEGER | Grace period, in seconds, given to the runtime resource before they are actually removed, counted from the moment they moved to the terminal state. | +| Options | Description | +|-----------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------| +| \--api TEXT | URL of the mlrun-api service. | +| -ls, --label-selector TEXT | Delete only the runtime resources matching the label selector. | +| -f, --force | Delete the runtime resource even if they're not in terminal state or if the grace period didn’t pass. | +| -gp, --grace-period INTEGER | Grace period, in seconds, given to the runtime resource before they are actually removed, counted from the moment they moved to the terminal state. |
    ### `config` @@ -88,15 +88,15 @@ Use the `config` CLI command to show the mlrun client environment configuration, Example: `mlrun config` -| Flag | Description | -|--------------------------------------------|----------------------------------------------------------| -| −−command TEXT | get (default), set or clear | -| −−env−file TEXT | Path to the mlrun .env file (defaults to '~/.mlrun.env') | -| -a, −−api TEXT | API service url | -| -p, −−artifact−path TEXT | Default artifacts path | -| -u, −−username TEXT | Username (for remote access) | -| -k, −−access-key TEXT | Access key (for remote access) | -| -e, −−env−vars TEXT | Additional env vars, e.g. -e AWS_ACCESS_KEY_ID= | +| Flag | Description | +|--------------------------|----------------------------------------------------------| +| \--command TEXT | get (default), set or clear | +| \--env-file TEXT | Path to the mlrun .env file (defaults to '~/.mlrun.env') | +| -a, --api TEXT | API service url | +| -p, --artifact-path TEXT | Default artifacts path | +| -u, --username TEXT | Username (for remote access) | +| -k, --access-key TEXT | Access key (for remote access) | +| -e, --env-vars TEXT | Additional env vars, e.g. -e AWS_ACCESS_KEY_ID= | @@ -113,16 +113,16 @@ Examples: - `mlrun get artifacts --project getting-started-admin` - `mlrun get func prep-data --project getting-started-admin` -| Flag | Description | -|----------------------------------|-----------------------------------------------------------------------| -| −−kind TEXT | resource type to list/get: run, runtime, workflow, artifact, function | -| −−name TEXT | Name of object to return | -| -s, −−selector TEXT | Label selector | -| -n, −−namespace TEXT | Kubernetes namespace | -| −−uid TEXT | Object ID | -| −−project TEXT | Project name to return | -| -t, −−tag TEXT | Artifact/function tag of object to return | -| −−db TEXT | db path/url of object to return | +| Flag | Description | +|----------------------|-----------------------------------------------------------------------| +| \--kind TEXT | resource type to list/get: run, runtime, workflow, artifact, function | +| \--name TEXT | Name of object to return | +| -s, --selector TEXT | Label selector | +| -n, --namespace TEXT | Kubernetes namespace | +| \--uid TEXT | Object ID | +| \--project TEXT | Project name to return | +| -t, --tag TEXT | Artifact/function tag of object to return | +| \--db TEXT | db path/url of object to return | @@ -134,13 +134,13 @@ Usage: logs [OPTIONS] uid Example: `mlrun logs ba409c0cb4904d60aa8f8d1c05b40a75 --project getting-started-admin` -| Flag | Description | -|--------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| −−uid TEXT | UID of the run to get logs for | -| -p, −−project TEXT | Project name | -| −−offset TEXT | Retrieve partial log, get up to size bytes starting at the offset from beginning of log | -| −−db TEXT | API service url | -| -w, −−watch | Retrieve logs of a running process, and watch the progress of the execution until it completes. Prints out the logs and continues to periodically poll for, and print, new logs as long as the state of the runtime that generates this log is either `pending` or `running`. | +| Flag | Description | +|--------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| \--uid TEXT | UID of the run to get logs for | +| -p, --project TEXT | Project name | +| \--offset TEXT | Retrieve partial log, get up to size bytes starting at the offset from beginning of log | +| \--db TEXT | API service url | +| -w, --watch | Retrieve logs of a running process, and watch the progress of the execution until it completes. Prints out the logs and continues to periodically poll for, and print, new logs as long as the state of the runtime that generates this log is either `pending` or `running`. | @@ -153,33 +153,34 @@ Usage: mlrun project [OPTIONS] [CONTEXT] Example: `mlrun project -r workflow.py .` -| Flag | Description | -|------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| -n, −−name TEXT | Project name | -| -u, −−url TEXT | Remote git or archive url of the project | -| -r, −−run TEXT | Run workflow name of .py file | -| -a, −−arguments TEXT | Kubeflow pipeline arguments name and value tuples (with -r flag), e.g. -a x=6 | -| -p, −−artifact−path TEXT | Target path/url for workflow artifacts. The string `{{workflow.uid}}` is replaced by workflow id | -| -x, −−param TEXT | mlrun project parameter name and value tuples, e.g. -p x=37 -p y='text' | -| -s, −−secrets TEXT | Secrets file= or env=ENV_KEY1,.. | -| −−namespace TEXT | k8s namespace | -| −−db TEXT | API service url | -| −−init−git | For new projects init git the context dir | -| -c, −−clone | Force override/clone into the context dir | -| −−sync | Sync functions into db | -| -w, −−watch | Wait for pipeline completion (with -r flag) | -| -d, −−dirty | Allow run with uncommitted git changes | -| −−git−repo TEXT | git repo (org/repo) for git comments | -| −−git−issue INTEGER | git issue number for git comments | -| −−handler TEXT | Workflow function handler name | -| −−engine TEXT | Workflow engine (kfp/local) | -| −−local | Try to run workflow functions locally | -| −−timeout INTEGER | Timeout in seconds to wait for pipeline completion (used when watch=True) | -| −−env−file TEXT | Path to .env file to load config/variables from | -| −−save/−−no−save | Create and save the project if not exist (default to save) | -| −−schedule TEXT | To create a schedule, define a standard crontab expression string. To use the pre-defined workflow's schedule: `set --schedule 'true'`. [See cron details](https://apscheduler.readthedocs.io/en/3.x/modules/triggers/cron.html#module-apscheduler.triggers.cron). | -| −−save−secrets TEXT | Store the project secrets as k8s secrets | -| -nt, −−notifications TEXT | To have a notification for the run set notification file destination define: file=notification.json or a 'dictionary configuration e.g \'{"slack":{"webhook":""}}\'' | +| Flag | Description | +|---------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| -n, --context TEXT | Project context path | +| -n, --name TEXT | Project name | +| -u, --url TEXT | Remote git or archive url of the project | +| -r, --run TEXT | Run workflow name of .py file | +| -a, --arguments TEXT | Kubeflow pipeline arguments name and value tuples (with -r flag), e.g. -a x=6 | +| -p, --artifact-path TEXT | Target path/url for workflow artifacts. The string `{{workflow.uid}}` is replaced by workflow id | +| -x, --param TEXT | mlrun project parameter name and value tuples, e.g. -p x=37 -p y='text' | +| -s, --secrets TEXT | Secrets file= or env=ENV_KEY1,.. | +| --namespace TEXT | k8s namespace | +| \--db TEXT | API service url | +| \--init-git | For new projects init git the context dir | +| -c, --clone | Force override/clone into the context dir | +| \--sync | Sync functions into db | +| -w, --watch | Wait for pipeline completion (with -r flag) | +| -d, --dirty | Allow run with uncommitted git changes | +| \--git-repo TEXT | git repo (org/repo) for git comments | +| \--git-issue INTEGER | git issue number for git comments | +| \--handler TEXT | Workflow function handler name | +| \--engine TEXT | Workflow engine (kfp/local) | +| \--local | Try to run workflow functions locally | +| \--timeout INTEGER | Timeout in seconds to wait for pipeline completion (used when watch=True) | +| \--env-file TEXT | Path to .env file to load config/variables from | +| \--save/--no-save | Create and save the project if not exist (default to save) | +| \--schedule TEXT | To create a schedule, define a standard crontab expression string. To use the pre-defined workflow's schedule: `set --schedule 'true'`. [See cron details](https://apscheduler.readthedocs.io/en/3.x/modules/triggers/cron.html#module-apscheduler.triggers.cron). | +| \--save-secrets TEXT | Store the project secrets as k8s secrets | +| -nt, --notifications TEXT | To have a notification for the run set notification file destination define: file=notification.json or a 'dictionary configuration e.g \'{"slack":{"webhook":""}}\'' | @@ -194,48 +195,48 @@ Examples: - `mlrun run -f myfunc.yaml -w -p p1=3` -| Flag | Description | -|-----------------------------------------------------|---------------------------------------------------------------------------------------------------------| -| -p, −−param TEXT | Parameter name and value tuples; for example, `-p x=37 -p y='text'` | -| -i, −−inputs TEXT | Input artifact; for example, `-i infile.txt=s3://mybucket/infile.txt` | -| -o, −−outputs TEXT | Output artifact/result for kfp" | -| −−in−path TEXT | Base directory path/URL for storing input artifacts | -| −−out−path TEXT | Base directory path/URL for storing output artifacts | -| -s, −−secrets TEXT | Secrets, either as `file=` or `env=,...`; for example, `-s file=secrets.txt` | -| −−uid TEXT | Unique run ID | -| −−name TEXT | Run name | -| −−workflow TEXT | Workflow name/id | -| −−project TEXT | Project name or ID | -| −−db TEXT | Save run results to DB url | -| −−runtime TEXT | Function spec dict, for pipeline usage | -| −−kfp | Running inside Kubeflow Piplines, do not use | -| -h, −−hyperparam TEXT | Hyper parameters (will expand to multiple tasks) e.g. --hyperparam p2=[1,2,3] | -| −−param−file TEXT | Path to csv table of execution (hyper) params | -| −−selector TEXT | How to select the best result from a list, e.g. max.accuracy | -| −−hyper−param−strategy TEXT | Hyperparam tuning strategy list, grid, random | -| −−hyper−param−options TEXT | Hyperparam options json string | -| -f, −−func−url TEXT | Path/URL of a YAML function-configuration file, or db:///[:tag] for a DB function object | -| −−task TEXT | Path/URL of a YAML task-configuration file | -| −−handler TEXT | Invoke the function handler inside the code file | -| −−mode TEXT | Special run mode ('pass' for using the command as is) | -| −−schedule TEXT | Cron schedule | -| −−from−env | Read the spec from the env var | -| −−dump | Dump run results as YAML | -| −−image TEXT | Container image | -| −−kind TEXT | Serverless runtime kind | -| −−source TEXT | Source code archive/git | -| −−local | Run the task locally (ignore runtime) | -| −−auto−mount | Add volume mount to job using auto mount option | -| −−workdir TEXT | Run working directory | -| −−origin−file TEXT | For internal use | -| −−label TEXT | Run labels (key=val) | -| -w, −−watch | Watch/tail run log | -| −−verbose | Verbose log | -| −−scrape−metrics | Whether to add the `mlrun/scrape-metrics` label to this run's resources | -| −−env−file TEXT | Path to .env file to load config/variables from | -| −−auto−build | When set, the function image will be built prior to run if needed | -| −−ensure−project | Ensure the project exists, if not, create project | -| −−returns TEXT | Logging configurations for the handler's returning values | +| Flag | Description | +|------------------------------|---------------------------------------------------------------------------------------------------------| +| -p, --param TEXT | Parameter name and value tuples; for example, `-p x=37 -p y='text'` | +| -i, --inputs TEXT | Input artifact; for example, `-i infile.txt=s3://mybucket/infile.txt` | +| -o, --outputs TEXT | Output artifact/result for kfp" | +| \--in-path TEXT | Base directory path/URL for storing input artifacts | +| \--out-path TEXT | Base directory path/URL for storing output artifacts | +| -s, --secrets TEXT | Secrets, either as `file=` or `env=,...`; for example, `-s file=secrets.txt` | +| \--uid TEXT | Unique run ID | +| \--name TEXT | Run name | +| \--workflow TEXT | Workflow name/id | +| \--project TEXT | Project name or ID | +| \--db TEXT | Save run results to DB url | +| \--runtime TEXT | Function spec dict, for pipeline usage | +| \--kfp | Running inside Kubeflow Piplines, do not use | +| -h, --hyperparam TEXT | Hyper parameters (will expand to multiple tasks) e.g. --hyperparam p2=[1,2,3] | +| \--param-file TEXT | Path to csv table of execution (hyper) params | +| \--selector TEXT | How to select the best result from a list, e.g. max.accuracy | +| \--hyper-param-strategy TEXT | Hyperparam tuning strategy list, grid, random | +| \--hyper-param-options TEXT | Hyperparam options json string | +| -f, --func-url TEXT | Path/URL of a YAML function-configuration file, or db:///[:tag] for a DB function object | +| \--task TEXT | Path/URL of a YAML task-configuration file | +| \--handler TEXT | Invoke the function handler inside the code file | +| \--mode TEXT | Special run mode ('pass' for using the command as is) | +| \--schedule TEXT | Cron schedule | +| \--from-env | Read the spec from the env var | +| \--dump | Dump run results as YAML | +| \--image TEXT | Container image | +| \--kind TEXT | Serverless runtime kind | +| \--source TEXT | Source code archive/git | +| \--local | Run the task locally (ignore runtime) | +| \--auto-mount | Add volume mount to job using auto mount option | +| \--workdir TEXT | Run working directory | +| \--origin-file TEXT | For internal use | +| \--label TEXT | Run labels (key=val) | +| -w, --watch | Watch/tail run log | +| \--verbose | Verbose log | +| \--scrape-metrics | Whether to add the `mlrun/scrape-metrics` label to this run's resources | +| \--env-file TEXT | Path to .env file to load config/variables from | +| \--auto-build | When set, the function image will be built prior to run if needed | +| \--ensure-project | Ensure the project exists, if not, create project | +| \--returns TEXT | Logging configurations for the handler's returning values | ### `version` @@ -255,12 +256,12 @@ Examples: - `mlrun watch-stream v3io:///users/my-test-stream -s 1 -s 2` - `mlrun watch-stream v3io:///users/my-test-stream -s 1 -s 2 --seek EARLIEST` -| Flag | Description | -|-------------------------------------|------------------------------------------------------------| -| -s, −−shard-ids INTEGER | Shard id to listen on (can be multiple). | -| −−seek TEXT | Where to start/seek (EARLIEST or LATEST) | -| -i, −−interval INTEGER | Interval in seconds. Default = 3 | -| -j, −−is-json | Indicates that the payload is json (will be deserialized). | +| Flag | Description | +|-------------------------|------------------------------------------------------------| +| -s, --shard-ids INTEGER | Shard id to listen on (can be multiple). | +| \--seek TEXT | Where to start/seek (EARLIEST or LATEST) | +| -i, --interval INTEGER | Interval in seconds. Default = 3 | +| -j, --is-json | Indicates that the payload is json (will be deserialized). | ## Building and running a function from a Git repository diff --git a/docs/conf.py b/docs/conf.py index 6ec45dc253c..e2bbb10cd51 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -139,7 +139,6 @@ def current_version(): "deflist", "html_image", "html_admonition", - "smartquotes", "replacements", "linkify", "substitution", @@ -171,6 +170,7 @@ def current_version(): ] redirects = {"functions-architecture": "functions.html"} +smartquotes = False # -- Autosummary ------------------------------------------------------------- diff --git a/docs/data-prep/index.md b/docs/data-prep/index.md index 1f559a5ead2..3e57710d5c3 100644 --- a/docs/data-prep/index.md +++ b/docs/data-prep/index.md @@ -13,7 +13,6 @@ not needed, in which cases MLRun provides a set of utilities to facilitate data ingesting_data logging_datasets -../runtimes/spark-operator ingest-data-fs ../feature-store/using-spark-engine ``` diff --git a/docs/data-prep/ingest-data-fs.md b/docs/data-prep/ingest-data-fs.md index b7471c576f1..977b8319c72 100644 --- a/docs/data-prep/ingest-data-fs.md +++ b/docs/data-prep/ingest-data-fs.md @@ -12,6 +12,13 @@ the ingestion process runs the graph transformations, infers metadata and stats, When targets are not specified, data is stored in the configured default targets (i.e. NoSQL for real-time and Parquet for offline). +### Ingestion engines + +MLRun supports a several ingestion engines: +- `storey` engine (default) is designed for real-time data (e.g. individual records) that will be transformed using Python functions and classes +- `pandas` engine is designed for batch data that can fit into memory that will be transformed using Pandas dataframes. Pandas is used for testing, and is not recommended for production deployments +- `spark` engine is designed for batch data. + ```{admonition} Limitations - Do not name columns starting with either `_` or `aggr_`. They are reserved for internal use. See diff --git a/docs/data-prep/logging_datasets.md b/docs/data-prep/logging_datasets.md index 7310ad1c146..fa107a157d9 100644 --- a/docs/data-prep/logging_datasets.md +++ b/docs/data-prep/logging_datasets.md @@ -40,10 +40,11 @@ def get_data(context: MLClientCtx, source_url: DataItem, format: str = 'csv'): index=False, artifact_path=target_path) ``` +This code can be placed in a python file, or as a cell in the Python notebook. You can run this function locally or as a job. For example, to run it locally: ``` python from os import path -from mlrun import new_project, run_local, mlconf +from mlrun import new_project, mlconf project_name = 'my-project' project_path = path.abspath('conf') @@ -55,12 +56,18 @@ artifact_path = path.abspath('jobs') mlconf.dbpath = mlconf.dbpath or 'http://mlrun-api:8080' source_url = 'https://s3.wasabisys.com/iguazio/data/iris/iris_dataset.csv' + +# Create a function from py or notebook (ipynb) file +get_data_func = project.set_function('./get_data.py' + name='get_data' + kind='job', + image='mlrun/mlrun') + # Run get-data function locally -get_data_run = run_local(name='get_data', - handler=get_data, - inputs={'source_url': source_url}, - project=project_name, - artifact_path=artifact_path) +get_data_run = get_data_func.run(handler="get_data", + inputs={'source_url': source_url}, + artifact_path=artifact_path, + local=True) ``` The dataset location is returned in the `outputs` field, therefore you can get the location by calling diff --git a/docs/feature-store/end-to-end-demo/01-ingest-datasources.ipynb b/docs/feature-store/end-to-end-demo/01-ingest-datasources.ipynb index ba1fa3e0334..7772c6ed9cc 100644 --- a/docs/feature-store/end-to-end-demo/01-ingest-datasources.ipynb +++ b/docs/feature-store/end-to-end-demo/01-ingest-datasources.ipynb @@ -765,7 +765,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "After performing the ingestion process, you can see all of the different features that were created with the help of the UI, asshown in the image below." + "After performing the ingestion process, you can see all of the different features that were created with the help of the UI, as shown in the image below." ] }, { diff --git a/docs/feature-store/end-to-end-demo/03-deploy-serving-model.ipynb b/docs/feature-store/end-to-end-demo/03-deploy-serving-model.ipynb index 8f3112b6081..0ab0d601028 100644 --- a/docs/feature-store/end-to-end-demo/03-deploy-serving-model.ipynb +++ b/docs/feature-store/end-to-end-demo/03-deploy-serving-model.ipynb @@ -370,7 +370,11 @@ "source": [ "### Accessing the real-time feature vector directly\n", "\n", - "You can also directly query the feature store values using the `get_online_feature_service` method. This method is used internally in the EnrichmentVotingEnsemble router class." + "You can also directly query the feature store values using the `get_online_feature_service` method. This method is used internally in the EnrichmentVotingEnsemble router class.\n", + "\n", + "```{admonition} Note\n", + "The timestamp of the last event is not returned with `get_online_feature_service` / `svc.get`.\n", + "```" ] }, { @@ -422,7 +426,11 @@ "source": [ "## Deploying the function on the Kubernetes cluster\n", "\n", - "You can now deploy the function. Once deployed, you get a function with http trigger that can be called from other locations." + "You can now deploy the function. Once deployed, you get a function with http trigger that can be called from other locations.\n", + "\n", + "Model activities can be tracked into a real-time stream and time-series DB. The monitoring data\n", + "is used to create real-time dashboards, detect drift, and analyze performance.
    \n", + "To monitor a deployed model, apply `set_tracking()`.\n" ] }, { @@ -624,7 +632,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.7" + "version": "3.9.13" } }, "nbformat": 4, diff --git a/docs/feature-store/feature-sets.md b/docs/feature-store/feature-sets.md index 1aafe1c8ca4..f13db6e6b32 100644 --- a/docs/feature-store/feature-sets.md +++ b/docs/feature-store/feature-sets.md @@ -32,8 +32,13 @@ The feature set object contains the following information: Create a {py:class}`~mlrun.feature_store.FeatureSet` with the base definitions: * **name** — The feature set name is a unique name within a project. -* **entities** — Each feature set must be associated with one or more index column. When joining feature sets, the key columns +* **entities** — Each feature set must be associated with one or more index columns. When joining feature sets, the key columns are determined by the relations field if it exists, and otherwise by the entities. + +```{admonition} Caution +Avoid using timestamps or bool as entities. +``` + * **timestamp_key** — (optional) Used for specifying the time field when joining by time. * **engine** — The processing engine type: - spark — Good for simple batch transformations diff --git a/docs/feature-store/feature-store-overview.md b/docs/feature-store/feature-store-overview.md index 020e7d30bbd..00ccd6220a3 100644 --- a/docs/feature-store/feature-store-overview.md +++ b/docs/feature-store/feature-store-overview.md @@ -54,6 +54,16 @@ serving process. The next step is to define the [feature vector](feature-vectors.html). Call the {py:meth}`~mlrun.feature_store.get_offline_features` function to join together features across different feature sets. +### Ingestion engines + +MLRun supports several ingestion engines: + +- `storey` engine (default) is designed for real-time data (e.g. individual records) that will be transformed using Python functions and classes +- `pandas` engine is designed for batch data that can fit into memory that will be transformed using Pandas dataframes. Pandas is used for testing, and is not recommended for production deployments +- `spark` engine is designed for batch data. + +See also [transformation — engine support](./transformations.html#supporting-multiple-engines). + ## Training and serving using the feature store
    feature-store-training
    diff --git a/docs/feature-store/feature-vectors.md b/docs/feature-store/feature-vectors.md index c4ecdc37c0f..ec969ba8fbb 100644 --- a/docs/feature-store/feature-vectors.md +++ b/docs/feature-store/feature-vectors.md @@ -85,11 +85,13 @@ You can define relations within a feature set in two ways: You can define a graph using the `join_graph` parameter ({py:meth}`~mlrun.feature_store.FeatureVector`), which defines the join type. You can use the graph to define complex joins and pass on the relations to the vector. Currently, only one branch (DAG) is supported. This means that operations involving brackets are not available. + +You can merge two feature sets when the left one has more entities, only if all the entities of the right feature set exist in the left feature set's entities. When using a left join, you must explicitly specify whether you want to perform an `as_of` join or not. The left join type is the only one that implements the "as_of" join. - Example, assuming three feature sets: [fs1, fs2. fs3]: +An example, assuming three feature sets: [fs1, fs2, fs3]: ``` join_graph = JoinGraph(first_feature_set=fs_1).inner(fs_2).outer(fs_3) vector = FeatureVector("myvector", features, diff --git a/docs/feature-store/sources-targets.md b/docs/feature-store/sources-targets.md index 3ee290c2cd3..08c0725432a 100644 --- a/docs/feature-store/sources-targets.md +++ b/docs/feature-store/sources-targets.md @@ -149,9 +149,12 @@ NFS, S3, Azure blob storage, Redis, SQL, and on Iguazio DB/FS. ## ParquetTarget +{py:meth}`~mlrun.datastore.ParquetTarget` is the default target for offline data. +The Parquet file is ideal for fetching large sets of data for training. + ### Partitioning -When writing data to a {py:meth}`~mlrun.datastore.ParquetTarget`, you can use partitioning. Partitioning organizes data +When writing data to a ParquetTarget, you can use partitioning. Partitioning organizes data in Parquet files by dividing large data sets into smaller and more manageable pieces. The data is divided into separate files according to specific criteria, for example: date, time, or specific values in a column. Partitioning, when configured correctly, improves read performance by reducing the amount of data that needs to be diff --git a/docs/feature-store/transformations.md b/docs/feature-store/transformations.md index 0f1f40c91be..f8dd6989751 100644 --- a/docs/feature-store/transformations.md +++ b/docs/feature-store/transformations.md @@ -57,7 +57,7 @@ where `[a-z]+` is the name of an aggregation. ```{admonition} Warning You must ensure that your features will not conflict with the automatically generated feature names. For example, -when using `add_aggregation()` on a feature X, you may get a genegated feature name of `X_count_1h`. +when using `add_aggregation()` on a feature X, you may get a generated feature name of `X_count_1h`. But if your dataset already contains `X_count_1h`, this would result in either unreliable aggregations or errors. ``` @@ -146,7 +146,7 @@ All time windows are aligned to the epoch (1970-01-01T00:00:00Z). ## Built-in transformations MLRun, and the associated `storey` package, have a built-in library of [transformation functions](../serving/available-steps.html) that can be -applied as steps in the feature-set's internal execution graph. To add steps to the graph, +applied as steps in the feature-set's internal execution graph. To add steps to the graph, reference them from the {py:class}`~mlrun.feature_store.FeatureSet` object by using the {py:attr}`~mlrun.feature_store.FeatureSet.graph` property. Then, new steps can be added to the graph using the functions in {py:mod}`storey.transformations` (follow the link to browse the documentation and the diff --git a/docs/glossary.md b/docs/glossary.md index aba5ef97238..d0e931820a4 100644 --- a/docs/glossary.md +++ b/docs/glossary.md @@ -9,12 +9,12 @@ | Feature vector | A combination of multiple features originating from different feature sets. See [Creating and using feature vectors](./feature-store/feature-vectors.html). | | HTTPRunDB | API for wrapper to the internal DB in MLRun. See [HTTPRunDB](./api/mlrun.db.html#mlrun.db.httpdb.HTTPRunDB). | | hub | Used in code to reference the [MLRun function hub](./runtimes/load-from-hub.html). | -| MLRun function | An abstraction over the code, extra packages, runtime configuration and desired resources which allow execution in a local environment and on various serverless engines on top of K8s. See [Functions](./runtimes/functions.html). | -| MLRun Function hub | A collection of pre-built MLRun functions avilable for usage. See [MLRun function hub](./runtimes/load-from-hub.html). | -| MLRun project | A logical container for all the work on a particular activity/application that includes functions, workflow, artifacts, secrets, and more, and can be assigned to a specific group of users. See [Projects](./projects/project.html). | -| mpijob | One of the MLRun batch runtimes that runs distributed jobs and Horovod over the MPI job operator, used mainly for deep learning jobs. See [MPIJob and Horovod runtime](./runtimes/horovod.html). | -| Nuclio function | Subtype of MLRun function that uses the Nuclio runtime for any generic real-time function. See [Nuclio real-time functions](./concepts/nuclio-real-time-functions.html) and [Nuclio documentation](https://nuclio.io/docs/stable/). | -| Serving function | Subtype of MLRun function that uses the Nuclio runtime specifically for serving ML models or real-time pipelines. See [Real-time serving pipelines (graphs)](./serving/serving-graph.html). | +| MLRun function | An abstraction over the code, extra packages, runtime configuration and desired resources which allow execution in a local environment and on various serverless engines on top of K8s. See [MLRun serverless functions](./concepts/functions-concepts.html#mlrun-serverless-functions) and [Creating and using functions](./runtimes/functions.html). | +| MLRun Function Hub | A collection of pre-built MLRun functions avilable for usage. See [MLRun function hub](./runtimes/load-from-hub.html). | +| MLRun project | A logical container for all the work on a particular activity/application that include functions, workflow, artifacts, secrets, and more, and can be assigned to a specific group of users. See [Projects](./projects/project.html). | +| mpijob | One of the MLRun batch runtimes that runs distributed jobs and Horovod over the MPI job operator, used mainly for deep learning jobs. See [MLRun MPIJob and Horovod runtime](./runtimes/horovod.html). | +| Nuclio function | Subtype of MLRun function that uses the Nuclio runtime for any generic real-time function. See [Nuclio real-time functions](./concepts/nuclio-real-time-functions.html) and [Nuclio documentation](https://docs.nuclio.io/en/stable/index.html). | +| Serving function | Subtype of MLRun function that uses the Nuclio runtime specifically for serving ML models or real-time pipelines. See [Real-time serving pipelines (graphs)](./serving/serving-graph.html) and [Model serving pipelines](./serving/build-graph-model-serving.html). | | storey | Asynchronous streaming library for real time event processing and feature extraction. Used in Iguazio's feature store and real-time pipelines. See [storey.transformations - Graph transformations](./api/storey.transformations.html). | | | diff --git a/docs/index.md b/docs/index.md index 4f7f7e710b1..9d708728262 100644 --- a/docs/index.md +++ b/docs/index.md @@ -67,10 +67,10 @@ Project access can be restricted to a set of users and roles. {octicon}`mortar-board` **Docs:** {bdg-link-info}`Projects and automation <./projects/project.html>` {bdg-link-info}`CI/CD integration <./projects/ci-integration.html>` -, {octicon}`code-square` **Tutorials:** -{bdg-link-primary}`quick start <./tutorials/01-mlrun-basics.html>` +
    {octicon}`code-square` **Tutorials:** +{bdg-link-primary}`Quick start <./tutorials/01-mlrun-basics.html>` {bdg-link-primary}`Automated ML pipeline <./tutorials/04-pipeline.html>` -, {octicon}`video` **Videos:** +
    {octicon}`video` **Videos:** {bdg-link-warning}`Quick start ` ````` @@ -82,10 +82,11 @@ In addition, the MLRun [**Feature store**](./feature-store/feature-store.html) a `````{div} full-width {octicon}`mortar-board` **Docs:** +{bdg-link-info}`Ingest and process data ` {bdg-link-info}`Feature store <./feature-store/feature-store.html>` -{bdg-link-info}`Data & artifacts <./concepts/data.html>` -, {octicon}`code-square` **Tutorials:** -{bdg-link-primary}`quick start <./tutorials/01-mlrun-basics.html>` +{bdg-link-info}`Data and artifacts <./concepts/data.html>` +
    {octicon}`code-square` **Tutorials:** +{bdg-link-primary}`Quick start <./tutorials/01-mlrun-basics.html>` {bdg-link-primary}`Feature store <./feature-store/basic-demo.html>` ````` @@ -96,13 +97,14 @@ MLRun allows you to easily build ML pipelines that take data from various source `````{div} full-width {octicon}`mortar-board` **Docs:** +{bdg-link-info}`Develop and train models ` {bdg-link-info}`Model training and tracking <./development/model-training-tracking.html>` {bdg-link-info}`Batch runs and workflows <./concepts/runs-workflows.html>` -, {octicon}`code-square` **Tutorials:** -{bdg-link-primary}`Train & eval models <./tutorials/02-model-training.html>` +
    {octicon}`code-square` **Tutorials:** +{bdg-link-primary}`Train, compare, and register models <./tutorials/02-model-training.html>` {bdg-link-primary}`Automated ML pipeline <./tutorials/04-pipeline.html>` -, {octicon}`video` **Videos:** -{bdg-link-warning}`Train & compare models ` +
    {octicon}`video` **Videos:** +{bdg-link-warning}`Train and compare models ` ````` ### Deploy models and applications @@ -112,13 +114,14 @@ MLRun rapidly deploys and manages production-grade real-time or batch applicatio `````{div} full-width {octicon}`mortar-board` **Docs:** +{bdg-link-info}`Deploy models and applications ` {bdg-link-info}`Realtime pipelines <./serving/serving-graph.html>` {bdg-link-info}`Batch inference <./deployment/batch_inference.html>` -, {octicon}`code-square` **Tutorials:** +
    {octicon}`code-square` **Tutorials:** {bdg-link-primary}`Realtime serving <./tutorials/03-model-serving.html>` {bdg-link-primary}`Batch inference <./tutorials/07-batch-infer.html>` {bdg-link-primary}`Advanced pipeline <./tutorials/07-batch-infer.html>` -, {octicon}`video` **Videos:** +
    {octicon}`video` **Videos:** {bdg-link-warning}`Serve pre-trained models ` ````` @@ -129,9 +132,10 @@ Observability is built into the different MLRun objects (data, functions, jobs, `````{div} full-width {octicon}`mortar-board` **Docs:** +{bdg-link-info}`Monitor and alert ` {bdg-link-info}`Model monitoring overview <./monitoring/model-monitoring-deployment.html>` -, {octicon}`code-square` **Tutorials:** -{bdg-link-primary}`Model monitoring & drift detection <./tutorials/05-model-monitoring.html>` +
    {octicon}`code-square` **Tutorials:** +{bdg-link-primary}`Model monitoring and drift detection <./tutorials/05-model-monitoring.html>` ````` @@ -192,11 +196,11 @@ MLRun includes the following major components: **{ref}`Serverless functions `:** An automatically deployed software package with one or more methods and runtime-specific attributes (such as image, libraries, command, arguments, resources, etc.). -**{ref}`Data & artifacts `:** Glueless connectivity to various data sources, metadata management, catalog, and versioning for structured/unstructured artifacts. +**{ref}`Data and artifacts `:** Glueless connectivity to various data sources, metadata management, catalog, and versioning for structured/unstructured artifacts. **{ref}`Feature store `:** Automatically collects, prepares, catalogs, and serves production data features for development (offline) and real-time (online) deployment using minimal engineering effort. -**{ref}`Batch Runs & workflows `:** Execute one or more functions with specific parameters and collect, track, and compare all their results and artifacts. +**{ref}`Batch Runs and workflows `:** Execute one or more functions with specific parameters and collect, track, and compare all their results and artifacts. **{ref}`Real-time serving pipeline `:** Rapid deployment of scalable data and ML pipelines using real-time serverless technology, including API handling, data preparation/enrichment, model serving, ensembles, driving and measuring actions, etc. diff --git a/docs/install.md b/docs/install.md index 4b488d9f2da..d5e8e7df3c4 100644 --- a/docs/install.md +++ b/docs/install.md @@ -8,6 +8,8 @@ MLRun has two main components, the service and the client (SDK and UI): - MLRun service runs over Kubernetes (can also be deployed using local Docker for demo and test purposes). It can orchestrate and integrate with other open source open source frameworks, as shown in the following diagram. - MLRun client SDK is installed in your development environment and interacts with the service using REST API calls. +This release of MLRun supports only Python 3.9 for both the server and the client. +