Skip to content

Commit 3436c2d

Browse files
strickvlclaude
andcommitted
Add risk assessment HTML visualization as second artifact
- Add generate_risk_visualization() function to create styled HTML report - Update risk_assessment step to return tuple: (risk_scores_dict, risk_visualization_html) - Add RISK_VISUALIZATION constant to annotations - Update training pipeline and run.py to handle new return signature - Visualization includes overall risk score, component risks, and detailed hazard breakdown - Color-coded risk levels (LOW/MEDIUM/HIGH) with severity badges for hazards - Professional styling with gradient header and responsive card layout 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <[email protected]>
1 parent 409131e commit 3436c2d

File tree

4 files changed

+249
-8
lines changed

4 files changed

+249
-8
lines changed

credit-scorer/run.py

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -170,9 +170,14 @@ def main(
170170
# from artifact store via Client.get_artifact_version() as designed
171171

172172
training_pipeline = training.with_options(**pipeline_args)
173-
model, eval_results, eval_visualization, risk_scores, *_ = (
174-
training_pipeline(**train_args)
175-
)
173+
(
174+
model,
175+
eval_results,
176+
eval_visualization,
177+
risk_scores,
178+
risk_visualization,
179+
*_,
180+
) = training_pipeline(**train_args)
176181

177182
# Store for potential chaining
178183
outputs["model"] = model

credit-scorer/src/constants/annotations.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,7 @@ class Artifacts(StrEnum):
5858
EVALUATION_RESULTS = "evaluation_results"
5959
EVAL_VISUALIZATION = "evaluation_visualization"
6060
RISK_SCORES = "risk_scores"
61+
RISK_VISUALIZATION = "risk_visualization"
6162
FAIRNESS_REPORT = "fairness_report"
6263
RISK_REGISTER = "risk_register"
6364

credit-scorer/src/pipelines/training.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -91,11 +91,17 @@ def training(
9191
)
9292

9393
# Perform risk assessment based on evaluation results
94-
risk_scores = risk_assessment(
94+
risk_scores, risk_visualization = risk_assessment(
9595
evaluation_results=eval_results,
9696
risk_register_path=risk_register_path,
9797
approval_thresholds=approval_thresholds,
9898
)
9999

100100
# Return artifacts to be used by deployment pipeline
101-
return model, eval_results, eval_visualization, risk_scores
101+
return (
102+
model,
103+
eval_results,
104+
eval_visualization,
105+
risk_scores,
106+
risk_visualization,
107+
)

credit-scorer/src/steps/training/risk_assessment.py

Lines changed: 232 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,11 +17,12 @@
1717

1818
from datetime import datetime
1919
from pathlib import Path
20-
from typing import Annotated, Dict, List
20+
from typing import Annotated, Dict, List, Tuple
2121

2222
from openpyxl import Workbook, load_workbook
2323
from zenml import get_step_context, log_metadata, step
2424
from zenml.logger import get_logger
25+
from zenml.types import HTMLString
2526

2627
from src.constants import Artifacts as A
2728
from src.constants import Hazards
@@ -136,12 +137,235 @@ def get_article_for_hazard(hazard_id: str) -> str:
136137
) # Default to Risk Management
137138

138139

140+
def generate_risk_visualization(risk_scores: Dict, run_id: str) -> HTMLString:
141+
"""Generate HTML visualization for risk assessment results."""
142+
overall_risk = risk_scores.get("overall", 0.0)
143+
auc_risk = risk_scores.get("risk_auc", 0.0)
144+
bias_risk = risk_scores.get("risk_bias", 0.0)
145+
hazards = risk_scores.get("hazards", [])
146+
147+
# Risk level categorization
148+
if overall_risk < 0.3:
149+
risk_level = "LOW"
150+
risk_color = "#28a745"
151+
risk_bg = "#d4edda"
152+
elif overall_risk < 0.7:
153+
risk_level = "MEDIUM"
154+
risk_color = "#ffc107"
155+
risk_bg = "#fff3cd"
156+
else:
157+
risk_level = "HIGH"
158+
risk_color = "#dc3545"
159+
risk_bg = "#f8d7da"
160+
161+
# Severity color mapping
162+
severity_colors = {
163+
"low": "#28a745",
164+
"medium": "#ffc107",
165+
"high": "#dc3545",
166+
"critical": "#6f42c1",
167+
}
168+
169+
html_content = f"""
170+
<!DOCTYPE html>
171+
<html>
172+
<head>
173+
<title>Risk Assessment Report - {run_id}</title>
174+
<style>
175+
body {{
176+
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
177+
line-height: 1.6;
178+
color: #333;
179+
max-width: 1000px;
180+
margin: 0 auto;
181+
padding: 20px;
182+
background-color: #f8f9fa;
183+
}}
184+
.header {{
185+
text-align: center;
186+
margin-bottom: 30px;
187+
padding: 20px;
188+
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
189+
color: white;
190+
border-radius: 10px;
191+
box-shadow: 0 4px 6px rgba(0,0,0,0.1);
192+
}}
193+
.risk-overview {{
194+
display: grid;
195+
grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
196+
gap: 20px;
197+
margin-bottom: 30px;
198+
}}
199+
.risk-card {{
200+
background: white;
201+
padding: 20px;
202+
border-radius: 8px;
203+
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
204+
text-align: center;
205+
}}
206+
.risk-score {{
207+
font-size: 2.5em;
208+
font-weight: bold;
209+
margin: 10px 0;
210+
}}
211+
.overall-risk {{
212+
background: {risk_bg};
213+
border-left: 5px solid {risk_color};
214+
}}
215+
.overall-risk .risk-score {{
216+
color: {risk_color};
217+
}}
218+
.risk-level {{
219+
font-size: 1.2em;
220+
font-weight: bold;
221+
color: {risk_color};
222+
margin-top: 10px;
223+
}}
224+
.hazards-section {{
225+
background: white;
226+
padding: 20px;
227+
border-radius: 8px;
228+
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
229+
margin-bottom: 20px;
230+
}}
231+
.hazard-item {{
232+
border-left: 4px solid #ddd;
233+
padding: 15px;
234+
margin: 10px 0;
235+
background: #f8f9fa;
236+
border-radius: 0 5px 5px 0;
237+
}}
238+
.hazard-high {{
239+
border-left-color: #dc3545;
240+
background: #fff5f5;
241+
}}
242+
.hazard-medium {{
243+
border-left-color: #ffc107;
244+
background: #fffbf0;
245+
}}
246+
.hazard-low {{
247+
border-left-color: #28a745;
248+
background: #f0fff4;
249+
}}
250+
.hazard-id {{
251+
font-weight: bold;
252+
color: #495057;
253+
margin-bottom: 5px;
254+
}}
255+
.hazard-description {{
256+
margin-bottom: 10px;
257+
color: #6c757d;
258+
}}
259+
.hazard-mitigation {{
260+
font-style: italic;
261+
color: #495057;
262+
border-top: 1px solid #dee2e6;
263+
padding-top: 10px;
264+
}}
265+
.severity-badge {{
266+
display: inline-block;
267+
padding: 4px 12px;
268+
border-radius: 20px;
269+
font-size: 0.8em;
270+
font-weight: bold;
271+
text-transform: uppercase;
272+
color: white;
273+
margin-bottom: 10px;
274+
}}
275+
.no-hazards {{
276+
text-align: center;
277+
padding: 40px;
278+
color: #28a745;
279+
background: #d4edda;
280+
border-radius: 8px;
281+
border: 2px solid #28a745;
282+
}}
283+
.timestamp {{
284+
text-align: center;
285+
color: #6c757d;
286+
font-size: 0.9em;
287+
margin-top: 20px;
288+
}}
289+
</style>
290+
</head>
291+
<body>
292+
<div class="header">
293+
<h1>🛡️ Risk Assessment Report</h1>
294+
<p>EU AI Act Article 9 Compliance</p>
295+
<p><strong>Run ID:</strong> {run_id}</p>
296+
</div>
297+
298+
<div class="risk-overview">
299+
<div class="risk-card overall-risk">
300+
<h3>Overall Risk</h3>
301+
<div class="risk-score">{overall_risk:.2f}</div>
302+
<div class="risk-level">{risk_level}</div>
303+
</div>
304+
<div class="risk-card">
305+
<h3>Model Performance Risk</h3>
306+
<div class="risk-score" style="color: #6c757d;">{auc_risk:.2f}</div>
307+
<small>Based on AUC Score</small>
308+
</div>
309+
<div class="risk-card">
310+
<h3>Bias Risk</h3>
311+
<div class="risk-score" style="color: #6c757d;">{bias_risk:.2f}</div>
312+
<small>Fairness Assessment</small>
313+
</div>
314+
</div>
315+
316+
<div class="hazards-section">
317+
<h2>📋 Identified Hazards</h2>
318+
{generate_hazards_html(hazards) if hazards else '<div class="no-hazards"><h3>✅ No Hazards Identified</h3><p>The model meets all risk thresholds for this assessment.</p></div>'}
319+
</div>
320+
321+
<div class="timestamp">
322+
Generated on {datetime.now().strftime('%Y-%m-%d %H:%M:%S UTC')}
323+
</div>
324+
</body>
325+
</html>
326+
"""
327+
328+
return HTMLString(html_content)
329+
330+
331+
def generate_hazards_html(hazards: List[Dict]) -> str:
332+
"""Generate HTML for hazards list."""
333+
html = ""
334+
for hazard in hazards:
335+
severity = hazard.get("severity", "low").lower()
336+
severity_color = {
337+
"low": "#28a745",
338+
"medium": "#ffc107",
339+
"high": "#dc3545",
340+
"critical": "#6f42c1",
341+
}.get(severity, "#6c757d")
342+
343+
hazard_class = f"hazard-{severity}"
344+
345+
html += f"""
346+
<div class="hazard-item {hazard_class}">
347+
<div class="hazard-id">{hazard.get('id', 'UNKNOWN')}</div>
348+
<div class="severity-badge" style="background-color: {severity_color};">
349+
{severity.upper()}
350+
</div>
351+
<div class="hazard-description">{hazard.get('description', 'No description available')}</div>
352+
<div class="hazard-mitigation">
353+
<strong>Mitigation:</strong> {hazard.get('mitigation', 'No mitigation specified')}
354+
</div>
355+
</div>
356+
"""
357+
358+
return html
359+
360+
139361
@step
140362
def risk_assessment(
141363
evaluation_results: Dict,
142364
approval_thresholds: Dict[str, float],
143365
risk_register_path: str = "docs/risk/risk_register.xlsx",
144-
) -> Annotated[Dict, A.RISK_SCORES]:
366+
) -> Tuple[
367+
Annotated[Dict, A.RISK_SCORES], Annotated[HTMLString, A.RISK_VISUALIZATION]
368+
]:
145369
"""Compute risk scores & update register. Article 9 compliant."""
146370
scores = score_risk(evaluation_results)
147371
hazards = identify_hazards(evaluation_results, scores)
@@ -288,4 +512,9 @@ def risk_assessment(
288512
"risk_register_path": str(risk_register_path),
289513
}
290514
log_metadata(metadata=result)
291-
return result
515+
516+
# Generate visualization
517+
run_id = get_step_context().pipeline_run.id
518+
risk_visualization = generate_risk_visualization(result, str(run_id))
519+
520+
return result, risk_visualization

0 commit comments

Comments
 (0)