|
5 | 5 | generated by collectData.py. The health score is derived from multiple |
6 | 6 | signals related to repository activity and efficiency and is aligned |
7 | 7 | with the Actionable Steps and Healthy & Efficient Repository documents. |
| 8 | +
|
| 9 | +UPDATED VERSION: |
| 10 | +- Supports dynamic metric selection per repository |
| 11 | +- Uses only selected metrics for calculation (as required by Issue #173) |
| 12 | +- Handles edge cases like missing metrics or empty selections |
8 | 13 | """ |
9 | 14 |
|
10 | 15 | import json |
|
13 | 18 | # Configuration |
14 | 19 | DATA_PATH = "Backend/test_data.json" |
15 | 20 |
|
| 21 | +# Fake Database (simulates future DB / Okta integration) |
| 22 | +# Each repository selects its own metrics |
| 23 | +REPO_METRIC_SELECTION = { |
| 24 | + "lrda_mobile": ["issue_resolution", "commit_volume"], |
| 25 | + "oss_dev_analytics": [ |
| 26 | + "issue_resolution", |
| 27 | + "issue_responsiveness", |
| 28 | + "pr_responsiveness", |
| 29 | + "contributor_activity", |
| 30 | + "commit_volume" |
| 31 | + ] |
| 32 | +} |
| 33 | + |
16 | 34 | # Metric weights derived from Actionable Steps research |
17 | 35 | # Weights are kept configurable so they can evolve as research changes |
18 | 36 | WEIGHTS = { |
|
23 | 41 | "commit_volume": 0.20 |
24 | 42 | } |
25 | 43 |
|
26 | | -# Scoring helper functions |
| 44 | +# Scoring helper functions (convert raw values -> 0-100 scale) |
27 | 45 | def score_issue_resolution(rate): |
28 | 46 | """ |
29 | 47 | Convert issue resolution rate into a normalized score |
@@ -225,23 +243,49 @@ def calculate_health_scores(data): |
225 | 243 | # Computing raw metrics |
226 | 244 | issue_rate, issue_time = calculate_issue_metrics(issues) |
227 | 245 | pr_time = calculate_pr_metrics(prs) |
228 | | - total_commmits, contributors = calculate_commit_metrics(commits) |
| 246 | + total_commits, contributors = calculate_commit_metrics(commits) |
229 | 247 |
|
230 | 248 | # Converting raw metrics into normalized scores |
231 | 249 | metric_scores = { |
232 | 250 | "issue_resolution": score_issue_resolution(issue_rate), |
233 | 251 | "issue_responsiveness": score_time_hours(issue_time), |
234 | 252 | "pr_responsiveness": score_time_hours(pr_time), |
235 | 253 | "contributor_activity": score_contributors(contributors), |
236 | | - "commit_volume": score_commits(total_commmits), |
| 254 | + "commit_volume": score_commits(total_commits), |
237 | 255 | } |
238 | 256 |
|
239 | | - # Final weighted health score |
240 | | - final_score = round( |
241 | | - sum(metric_scores[m] * WEIGHTS[m] for m in WEIGHTS), 2 |
242 | | - ) |
| 257 | + # Get selected metrics for this repo (dynamic behavior) |
| 258 | + selected_metrics = REPO_METRIC_SELECTION.get(repo_name, []) |
| 259 | + |
| 260 | + # Edge Case 1: No metrics selected |
| 261 | + if len(selected_metrics) == 0: |
| 262 | + raise ValueError(f"No metrics selected for {repo_name}") |
| 263 | + |
| 264 | + total_weighted = 0 |
| 265 | + total_weights = 0 |
| 266 | + |
| 267 | + for metric in selected_metrics: |
| 268 | + value = metric_scores.get(metric) |
| 269 | + |
| 270 | + # Skip missing values safely |
| 271 | + if value is None: |
| 272 | + continue |
| 273 | + |
| 274 | + weight = WEIGHTS.get(metric, 1) |
| 275 | + |
| 276 | + total_weighted += value * weight |
| 277 | + total_weights += weight |
| 278 | + |
| 279 | + # Edge Case 2: All selected metrics had no valid data |
| 280 | + if total_weights == 0: |
| 281 | + raise ValueError(f"No valid metric data for {repo_name}") |
| 282 | + |
| 283 | + # Final dynamic score |
| 284 | + final_score = round(total_weighted / total_weights, 2) |
243 | 285 |
|
| 286 | + # Storing results |
244 | 287 | results[repo_name] = { |
| 288 | + "selected_metrics": selected_metrics, |
245 | 289 | "metrics": metric_scores, |
246 | 290 | "final_score": final_score, |
247 | 291 | "status": health_label(final_score) |
|
0 commit comments