Skip to content

Commit f58c95f

Browse files
authored
Merge branch 'main' into feature/multi_model_deployment
2 parents 941796f + 277cb8e commit f58c95f

File tree

14 files changed

+325
-138
lines changed

14 files changed

+325
-138
lines changed

.github/workflows/run-forecast-unit-tests.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,6 @@ jobs:
5656
$CONDA/bin/conda init
5757
source /home/runner/.bashrc
5858
pip install -r test-requirements-operators.txt
59-
pip install "oracle-automlx[forecasting]>=24.4.1"
59+
pip install "oracle-automlx[forecasting]>=25.1.1"
6060
pip install pandas>=2.2.0
6161
python -m pytest -v -p no:warnings --durations=5 tests/operators/forecast

.github/workflows/run-unittests-py39-py310.yml renamed to .github/workflows/run-unittests-py310-py311.yml

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
name: "[Py3.9-3.11] - All Unit Tests"
1+
name: "[Py3.10-3.11] - All Unit Tests"
22

33
on:
44
workflow_dispatch:
@@ -33,15 +33,14 @@ jobs:
3333
strategy:
3434
fail-fast: false
3535
matrix:
36-
python-version: ["3.9", "3.10", "3.11"]
36+
python-version: ["3.10", "3.11"]
3737
name: ["unitary", "slow_tests"]
3838
include:
3939
- name: "unitary"
4040
test-path: "tests/unitary"
4141
# `model` tests running in "slow_tests",
4242
# `feature_store` tests has its own test suite
43-
# `forecast` tests not supported in python 3.9,3.10 (automlx dependency). Tests are running in python3.8 test env, see run-unittests-py38-cov-report.yml
44-
# 'pii' tests run only with py3.8, 'datapane' library conflicts with pandas>2.2.0, which used in py3.9/3.10 setup
43+
# `forecast` tests not run in this suite
4544
# 'hpo' tests hangs if run together with all unitary tests. Tests running in separate command before running all unitary
4645
ignore-path: |
4746
--ignore tests/unitary/with_extras/model \

.github/workflows/run-unittests-py38-cov-report.yml renamed to .github/workflows/run-unittests-py39-cov-report.yml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
name: "[Py3.8][COV REPORT] - All Unit Tests"
1+
name: "[Py3.9][COV REPORT] - All Unit Tests"
22

33
on:
44
workflow_dispatch:
@@ -26,7 +26,7 @@ env:
2626

2727
jobs:
2828
test:
29-
name: python 3.8, ${{ matrix.name }}
29+
name: python 3.9, ${{ matrix.name }}
3030
runs-on: ubuntu-latest
3131
timeout-minutes: 90
3232

@@ -58,7 +58,7 @@ jobs:
5858

5959
- uses: actions/setup-python@v5
6060
with:
61-
python-version: "3.8"
61+
python-version: "3.9"
6262
cache: "pip"
6363
cache-dependency-path: |
6464
pyproject.toml
@@ -71,7 +71,7 @@ jobs:
7171
name: "Test env setup"
7272
timeout-minutes: 30
7373

74-
# Installing pii deps for python3.8 test setup only, it will not work with python3.9/3.10, because
74+
# Installing pii deps for python3.9 test setup only, it will not work with python3.9/3.10, because
7575
# 'datapane' library conflicts with pandas>2.2.0, which used in py3.9/3.10 setup
7676
- name: "Install PII dependencies"
7777
run: |

ads/opctl/anomaly_detection.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
#!/usr/bin/env python
2+
3+
# Copyright (c) 2025 Oracle and/or its affiliates.
4+
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
5+
6+
from ads.opctl.operator.lowcode.anomaly.__main__ import operate
7+
from ads.opctl.operator.lowcode.anomaly.operator_config import AnomalyOperatorConfig
8+
9+
if __name__ == "__main__":
10+
config = AnomalyOperatorConfig()
11+
operate(config)

ads/opctl/forecast.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
#!/usr/bin/env python
2+
3+
# Copyright (c) 2025 Oracle and/or its affiliates.
4+
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
5+
6+
from ads.opctl.operator.lowcode.forecast.__main__ import operate
7+
from ads.opctl.operator.lowcode.forecast.operator_config import ForecastOperatorConfig
8+
9+
if __name__ == "__main__":
10+
config = ForecastOperatorConfig()
11+
operate(config)

ads/opctl/operator/lowcode/forecast/__main__.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
#!/usr/bin/env python
2-
# -*- coding: utf-8 -*--
32

4-
# Copyright (c) 2023, 2024 Oracle and/or its affiliates.
3+
# Copyright (c) 2023, 2025 Oracle and/or its affiliates.
54
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
65

76
import json
@@ -15,17 +14,17 @@
1514
from ads.opctl.operator.common.const import ENV_OPERATOR_ARGS
1615
from ads.opctl.operator.common.utils import _parse_input_args
1716

17+
from .model.forecast_datasets import ForecastDatasets, ForecastResults
1818
from .operator_config import ForecastOperatorConfig
19-
from .model.forecast_datasets import ForecastDatasets
2019
from .whatifserve import ModelDeploymentManager
2120

2221

23-
def operate(operator_config: ForecastOperatorConfig) -> None:
22+
def operate(operator_config: ForecastOperatorConfig) -> ForecastResults:
2423
"""Runs the forecasting operator."""
2524
from .model.factory import ForecastOperatorModelFactory
2625

2726
datasets = ForecastDatasets(operator_config)
28-
ForecastOperatorModelFactory.get_model(
27+
results = ForecastOperatorModelFactory.get_model(
2928
operator_config, datasets
3029
).generate_report()
3130
# saving to model catalog
@@ -36,6 +35,7 @@ def operate(operator_config: ForecastOperatorConfig) -> None:
3635
if spec.what_if_analysis.model_deployment:
3736
mdm.create_deployment()
3837
mdm.save_deployment_info()
38+
return results
3939

4040

4141
def verify(spec: Dict, **kwargs: Dict) -> bool:

ads/opctl/operator/lowcode/forecast/model/automlx.py

Lines changed: 41 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
#!/usr/bin/env python
2-
# Copyright (c) 2023, 2024 Oracle and/or its affiliates.
2+
# Copyright (c) 2023, 2025 Oracle and/or its affiliates.
33
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
44
import logging
55
import os
@@ -66,8 +66,7 @@ def preprocess(self, data, series_id): # TODO: re-use self.le for explanations
6666
@runtime_dependency(
6767
module="automlx",
6868
err_msg=(
69-
"Please run `pip3 install oracle-automlx>=23.4.1` and "
70-
"`pip3 install oracle-automlx[forecasting]>=23.4.1` "
69+
"Please run `pip3 install oracle-automlx[forecasting]>=25.1.1` "
7170
"to install the required dependencies for automlx."
7271
),
7372
)
@@ -105,7 +104,7 @@ def _build_model(self) -> pd.DataFrame:
105104
engine_opts = (
106105
None
107106
if engine_type == "local"
108-
else ({"ray_setup": {"_temp_dir": "/tmp/ray-temp"}},)
107+
else {"ray_setup": {"_temp_dir": "/tmp/ray-temp"}}
109108
)
110109
init(
111110
engine=engine_type,
@@ -250,17 +249,18 @@ def _generate_report(self):
250249
self.explain_model()
251250

252251
global_explanation_section = None
253-
if self.spec.explanations_accuracy_mode != SpeedAccuracyMode.AUTOMLX:
254-
# Convert the global explanation data to a DataFrame
255-
global_explanation_df = pd.DataFrame(self.global_explanation)
256252

257-
self.formatted_global_explanation = (
258-
global_explanation_df / global_explanation_df.sum(axis=0) * 100
259-
)
260-
self.formatted_global_explanation = self.formatted_global_explanation.rename(
261-
{self.spec.datetime_column.name: ForecastOutputColumns.DATE},
262-
axis=1,
263-
)
253+
# Convert the global explanation data to a DataFrame
254+
global_explanation_df = pd.DataFrame(self.global_explanation)
255+
256+
self.formatted_global_explanation = (
257+
global_explanation_df / global_explanation_df.sum(axis=0) * 100
258+
)
259+
260+
self.formatted_global_explanation.rename(
261+
columns={self.spec.datetime_column.name: ForecastOutputColumns.DATE},
262+
inplace=True,
263+
)
264264

265265
aggregate_local_explanations = pd.DataFrame()
266266
for s_id, local_ex_df in self.local_explanation.items():
@@ -272,11 +272,15 @@ def _generate_report(self):
272272
self.formatted_local_explanation = aggregate_local_explanations
273273

274274
if not self.target_cat_col:
275-
self.formatted_global_explanation = self.formatted_global_explanation.rename(
276-
{"Series 1": self.original_target_column},
277-
axis=1,
275+
self.formatted_global_explanation = (
276+
self.formatted_global_explanation.rename(
277+
{"Series 1": self.original_target_column},
278+
axis=1,
279+
)
280+
)
281+
self.formatted_local_explanation.drop(
282+
"Series", axis=1, inplace=True
278283
)
279-
self.formatted_local_explanation.drop("Series", axis=1, inplace=True)
280284

281285
# Create a markdown section for the global explainability
282286
global_explanation_section = rc.Block(
@@ -425,7 +429,9 @@ def explain_model(self):
425429
# Use the MLExplainer class from AutoMLx to generate explanations
426430
explainer = automlx.MLExplainer(
427431
self.models[s_id]["model"],
428-
self.datasets.additional_data.get_data_for_series(series_id=s_id)
432+
self.datasets.additional_data.get_data_for_series(
433+
series_id=s_id
434+
)
429435
.drop(self.spec.datetime_column.name, axis=1)
430436
.head(-self.spec.horizon)
431437
if self.spec.additional_data
@@ -436,7 +442,9 @@ def explain_model(self):
436442

437443
# Generate explanations for the forecast
438444
explanations = explainer.explain_prediction(
439-
X=self.datasets.additional_data.get_data_for_series(series_id=s_id)
445+
X=self.datasets.additional_data.get_data_for_series(
446+
series_id=s_id
447+
)
440448
.drop(self.spec.datetime_column.name, axis=1)
441449
.tail(self.spec.horizon)
442450
if self.spec.additional_data
@@ -448,17 +456,28 @@ def explain_model(self):
448456
explanations_df = pd.concat(
449457
[exp.to_dataframe() for exp in explanations]
450458
)
451-
explanations_df["row"] = explanations_df.groupby("Feature").cumcount()
459+
explanations_df["row"] = explanations_df.groupby(
460+
"Feature"
461+
).cumcount()
452462
explanations_df = explanations_df.pivot(
453463
index="row", columns="Feature", values="Attribution"
454464
)
455465
explanations_df = explanations_df.reset_index(drop=True)
456466

457467
# Store the explanations in the local_explanation dictionary
458468
self.local_explanation[s_id] = explanations_df
469+
470+
self.global_explanation[s_id] = dict(
471+
zip(
472+
self.local_explanation[s_id].columns,
473+
np.nanmean((self.local_explanation[s_id]), axis=0),
474+
)
475+
)
459476
else:
460477
# Fall back to the default explanation generation method
461478
super().explain_model()
462479
except Exception as e:
463-
logger.warning(f"Failed to generate explanations for series {s_id} with error: {e}.")
480+
logger.warning(
481+
f"Failed to generate explanations for series {s_id} with error: {e}."
482+
)
464483
logger.debug(f"Full Traceback: {traceback.format_exc()}")

0 commit comments

Comments
 (0)