From b34c3db435011d3c9b16d0ae7f3602c921ec83b0 Mon Sep 17 00:00:00 2001 From: Jeremi Joslin Date: Tue, 17 Feb 2026 11:32:13 +0700 Subject: [PATCH 1/3] feat(spp_simulation): add simulation engine for program scenario modeling New module providing: - Scenario templates and configurable simulation scenarios - Simulation run execution with metric tracking - Scenario-to-program conversion for promoting simulations to real programs - Comparison wizard for side-by-side scenario analysis - Targeting efficiency and coverage metrics --- spp_simulation/README.rst | 143 +++++ spp_simulation/__init__.py | 5 + spp_simulation/__manifest__.py | 56 ++ spp_simulation/data/scenario_templates.xml | 76 +++ spp_simulation/docs/methodology.md | 219 +++++++ spp_simulation/models/__init__.py | 8 + .../models/simulation_comparison.py | 307 +++++++++ .../models/simulation_entitlement_rule.py | 74 +++ spp_simulation/models/simulation_metric.py | 83 +++ spp_simulation/models/simulation_run.py | 553 ++++++++++++++++ spp_simulation/models/simulation_scenario.py | 285 ++++++++ .../models/simulation_scenario_template.py | 79 +++ spp_simulation/pyproject.toml | 3 + spp_simulation/readme/DESCRIPTION.md | 51 ++ spp_simulation/report/simulation_report.xml | 92 +++ .../report/simulation_report_views.xml | 12 + spp_simulation/security/ir.model.access.csv | 21 + .../security/simulation_security.xml | 63 ++ spp_simulation/services/__init__.py | 4 + spp_simulation/services/simulation_service.py | 607 ++++++++++++++++++ .../services/targeting_efficiency_service.py | 75 +++ spp_simulation/static/description/index.html | 512 +++++++++++++++ .../src/comparison_table/comparison_table.js | 99 +++ .../src/comparison_table/comparison_table.xml | 36 ++ .../src/fairness_table/fairness_table.js | 106 +++ .../src/fairness_table/fairness_table.xml | 59 ++ .../static/src/overlap_table/overlap_table.js | 51 ++ .../src/overlap_table/overlap_table.xml | 38 ++ .../src/results_summary/results_summary.js | 92 +++ .../src/results_summary/results_summary.xml | 40 ++ spp_simulation/tests/__init__.py | 16 + spp_simulation/tests/common.py | 113 ++++ spp_simulation/tests/test_comparison.py | 151 +++++ spp_simulation/tests/test_custom_metrics.py | 90 +++ .../tests/test_distribution_service.py | 111 ++++ spp_simulation/tests/test_entitlement_rule.py | 96 +++ spp_simulation/tests/test_fairness.py | 166 +++++ .../tests/test_metric_constraints.py | 121 ++++ spp_simulation/tests/test_privacy.py | 129 ++++ spp_simulation/tests/test_scenario.py | 110 ++++ .../tests/test_scenario_convert_to_program.py | 339 ++++++++++ .../tests/test_scenario_template.py | 62 ++ spp_simulation/tests/test_security.py | 145 +++++ spp_simulation/tests/test_simulation_run.py | 98 +++ .../tests/test_simulation_service.py | 398 ++++++++++++ .../tests/test_targeting_efficiency.py | 48 ++ spp_simulation/views/menu.xml | 70 ++ .../views/simulation_comparison_views.xml | 77 +++ .../views/simulation_metric_views.xml | 63 ++ spp_simulation/views/simulation_run_views.xml | 242 +++++++ .../simulation_scenario_template_views.xml | 95 +++ .../views/simulation_scenario_views.xml | 228 +++++++ spp_simulation/wizard/__init__.py | 3 + spp_simulation/wizard/compare_wizard.py | 53 ++ .../wizard/compare_wizard_views.xml | 19 + 55 files changed, 6892 insertions(+) create mode 100644 spp_simulation/README.rst create mode 100644 spp_simulation/__init__.py create mode 100644 spp_simulation/__manifest__.py create mode 100644 spp_simulation/data/scenario_templates.xml create mode 100644 spp_simulation/docs/methodology.md create mode 100644 spp_simulation/models/__init__.py create mode 100644 spp_simulation/models/simulation_comparison.py create mode 100644 spp_simulation/models/simulation_entitlement_rule.py create mode 100644 spp_simulation/models/simulation_metric.py create mode 100644 spp_simulation/models/simulation_run.py create mode 100644 spp_simulation/models/simulation_scenario.py create mode 100644 spp_simulation/models/simulation_scenario_template.py create mode 100644 spp_simulation/pyproject.toml create mode 100644 spp_simulation/readme/DESCRIPTION.md create mode 100644 spp_simulation/report/simulation_report.xml create mode 100644 spp_simulation/report/simulation_report_views.xml create mode 100644 spp_simulation/security/ir.model.access.csv create mode 100644 spp_simulation/security/simulation_security.xml create mode 100644 spp_simulation/services/__init__.py create mode 100644 spp_simulation/services/simulation_service.py create mode 100644 spp_simulation/services/targeting_efficiency_service.py create mode 100644 spp_simulation/static/description/index.html create mode 100644 spp_simulation/static/src/comparison_table/comparison_table.js create mode 100644 spp_simulation/static/src/comparison_table/comparison_table.xml create mode 100644 spp_simulation/static/src/fairness_table/fairness_table.js create mode 100644 spp_simulation/static/src/fairness_table/fairness_table.xml create mode 100644 spp_simulation/static/src/overlap_table/overlap_table.js create mode 100644 spp_simulation/static/src/overlap_table/overlap_table.xml create mode 100644 spp_simulation/static/src/results_summary/results_summary.js create mode 100644 spp_simulation/static/src/results_summary/results_summary.xml create mode 100644 spp_simulation/tests/__init__.py create mode 100644 spp_simulation/tests/common.py create mode 100644 spp_simulation/tests/test_comparison.py create mode 100644 spp_simulation/tests/test_custom_metrics.py create mode 100644 spp_simulation/tests/test_distribution_service.py create mode 100644 spp_simulation/tests/test_entitlement_rule.py create mode 100644 spp_simulation/tests/test_fairness.py create mode 100644 spp_simulation/tests/test_metric_constraints.py create mode 100644 spp_simulation/tests/test_privacy.py create mode 100644 spp_simulation/tests/test_scenario.py create mode 100644 spp_simulation/tests/test_scenario_convert_to_program.py create mode 100644 spp_simulation/tests/test_scenario_template.py create mode 100644 spp_simulation/tests/test_security.py create mode 100644 spp_simulation/tests/test_simulation_run.py create mode 100644 spp_simulation/tests/test_simulation_service.py create mode 100644 spp_simulation/tests/test_targeting_efficiency.py create mode 100644 spp_simulation/views/menu.xml create mode 100644 spp_simulation/views/simulation_comparison_views.xml create mode 100644 spp_simulation/views/simulation_metric_views.xml create mode 100644 spp_simulation/views/simulation_run_views.xml create mode 100644 spp_simulation/views/simulation_scenario_template_views.xml create mode 100644 spp_simulation/views/simulation_scenario_views.xml create mode 100644 spp_simulation/wizard/__init__.py create mode 100644 spp_simulation/wizard/compare_wizard.py create mode 100644 spp_simulation/wizard/compare_wizard_views.xml diff --git a/spp_simulation/README.rst b/spp_simulation/README.rst new file mode 100644 index 00000000..41b74bbd --- /dev/null +++ b/spp_simulation/README.rst @@ -0,0 +1,143 @@ +.. image:: https://odoo-community.org/readme-banner-image + :target: https://odoo-community.org/get-involved?utm_source=readme + :alt: Odoo Community Association + +============================ +OpenSPP Targeting Simulation +============================ + +.. + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + !! This file is generated by oca-gen-addon-readme !! + !! changes will be overwritten. !! + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + !! source digest: sha256:fd3e34b59c516e27a1d09522eca711429d907a328efb7a60b408f4edb71d552f + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + +.. |badge1| image:: https://img.shields.io/badge/maturity-Alpha-red.png + :target: https://odoo-community.org/page/development-status + :alt: Alpha +.. |badge2| image:: https://img.shields.io/badge/license-LGPL--3-blue.png + :target: http://www.gnu.org/licenses/lgpl-3.0-standalone.html + :alt: License: LGPL-3 +.. |badge3| image:: https://img.shields.io/badge/github-OpenSPP%2Fopenspp--modules-lightgray.png?logo=github + :target: https://github.com/OpenSPP/openspp-modules/tree/19.0/spp_simulation + :alt: OpenSPP/openspp-modules + +|badge1| |badge2| |badge3| + +Targeting Simulation & Fairness Analysis +======================================== + +Simulate targeting scenarios, analyze fairness and distribution, and +compare different targeting strategies before committing to criteria. + +Key Features +------------ + +- **Scenario Builder**: Define targeting criteria using CEL expressions + with live preview counts +- **Template Library**: Pre-built templates for common targeting + patterns (elderly pension, female-headed households, etc.) +- **Distribution Analysis**: Gini coefficient, Lorenz curve, percentile + breakdown +- **Fairness Analysis**: Disparity ratios across gender, disability, + location with traffic-light status indicators +- **Targeting Efficiency**: Confusion matrix, leakage rate, + undercoverage against ideal populations +- **Budget Simulation**: Fixed cap and proportional reduction + strategies +- **Scenario Comparison**: Side-by-side comparison of multiple + targeting approaches with overlap analysis +- **Custom Metrics**: Define CEL-based aggregate, coverage, and ratio + metrics + +Privacy +------- + +Only aggregated counts, percentages, and metrics are stored. No +individual beneficiary records are persisted in simulation results. + +Models +------ + ++----------------------------------+----------------------------------+ +| Model | Description | ++==================================+==================================+ +| ``sp | Pre-built targeting scenario | +| p.simulation.scenario.template`` | templates | ++----------------------------------+----------------------------------+ +| ``spp.simulation.scenario`` | Targeting scenario definitions | ++----------------------------------+----------------------------------+ +| ``s | Amount calculation rules | +| pp.simulation.entitlement.rule`` | | ++----------------------------------+----------------------------------+ +| ``spp.simulation.run`` | Aggregated simulation results | +| | (non-deletable) | ++----------------------------------+----------------------------------+ +| ``spp.simulation.comparison`` | Side-by-side run comparisons | ++----------------------------------+----------------------------------+ +| ``spp.simulation.metric`` | Custom evaluation metrics | ++----------------------------------+----------------------------------+ + +Security Groups +--------------- + +================== =============================================== +Group Access +================== =============================================== +Simulation Viewer Read-only access to all simulation data +Simulation Officer Create/edit scenarios, run simulations +Simulation Manager Full access including comparisons and archiving +================== =============================================== + +Menu Path +--------- + +Social Protection > Simulation > Scenarios / Results / Comparisons + +Configuration: Social Protection > Simulation > Configuration > +Templates / Custom Metrics + +.. IMPORTANT:: + This is an alpha version, the data model and design can change at any time without warning. + Only for development or testing purpose, do not use in production. + `More details on development status `_ + +**Table of contents** + +.. contents:: + :local: + +Bug Tracker +=========== + +Bugs are tracked on `GitHub Issues `_. +In case of trouble, please check there if your issue has already been reported. +If you spotted it first, help us to smash it by providing a detailed and welcomed +`feedback `_. + +Do not contact contributors directly about support or help with technical issues. + +Credits +======= + +Authors +------- + +* OpenSPP.org + +Maintainers +----------- + +.. |maintainer-jeremi| image:: https://github.com/jeremi.png?size=40px + :target: https://github.com/jeremi + :alt: jeremi + +Current maintainer: + +|maintainer-jeremi| + +This module is part of the `OpenSPP/openspp-modules `_ project on GitHub. + +You are welcome to contribute. diff --git a/spp_simulation/__init__.py b/spp_simulation/__init__.py new file mode 100644 index 00000000..bcccf4bf --- /dev/null +++ b/spp_simulation/__init__.py @@ -0,0 +1,5 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. + +from . import models +from . import services +from . import wizard diff --git a/spp_simulation/__manifest__.py b/spp_simulation/__manifest__.py new file mode 100644 index 00000000..c86977f9 --- /dev/null +++ b/spp_simulation/__manifest__.py @@ -0,0 +1,56 @@ +# pylint: disable=pointless-statement +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +{ + "name": "OpenSPP Targeting Simulation", + "summary": "Simulate targeting scenarios, analyze fairness and distribution, " + "and compare different targeting strategies before committing to criteria.", + "category": "OpenSPP/Targeting", + "version": "19.0.2.0.0", + "author": "OpenSPP.org", + "website": "https://github.com/OpenSPP/OpenSPP2", + "license": "LGPL-3", + "development_status": "Alpha", + "maintainers": ["jeremi"], + "depends": [ + "base", + "mail", + "spp_programs", + "spp_cel_domain", + "spp_cel_widget", + "spp_security", + "spp_aggregation", + "spp_metrics_core", + ], + "data": [ + # Security + "security/simulation_security.xml", + "security/ir.model.access.csv", + # Data + "data/scenario_templates.xml", + # Views + "views/simulation_scenario_template_views.xml", + "views/simulation_scenario_views.xml", + "views/simulation_run_views.xml", + "views/simulation_comparison_views.xml", + "views/simulation_metric_views.xml", + "views/menu.xml", + # Wizard + "wizard/compare_wizard_views.xml", + # Report + "report/simulation_report_views.xml", + "report/simulation_report.xml", + ], + "assets": { + "web.assets_backend": [ + "spp_simulation/static/src/results_summary/*", + "spp_simulation/static/src/fairness_table/*", + "spp_simulation/static/src/comparison_table/*", + "spp_simulation/static/src/overlap_table/*", + ], + }, + "demo": [], + "images": [], + "application": True, + "installable": True, + "auto_install": False, +} diff --git a/spp_simulation/data/scenario_templates.xml b/spp_simulation/data/scenario_templates.xml new file mode 100644 index 00000000..ac414f86 --- /dev/null +++ b/spp_simulation/data/scenario_templates.xml @@ -0,0 +1,76 @@ + + + + + Elderly Pension (Age 60+) + Target elderly individuals aged 60 and above for pension benefits. + age + individual + age_years(r.birthdate) >= 60 + 1000 + fixed + fa-hourglass-half + 10 + + + + Households with Children Under 5 + Target households that have at least one child under 5 years of age. + age + group + members.any(m, age_years(m.birthdate) < 5) + 500 + fixed + fa-child + 20 + + + + Female-Headed Households + Target households where the head of household is female. + vulnerability + group + is_female(head.gender_id) + 750 + fixed + fa-female + 30 + + + + Rural Households + Target households located in rural areas. + geographic + group + has_area_tag('RURAL') + 600 + fixed + fa-tree + 40 + + + + Disability-Targeted + Target households with at least one member with a disability. + vulnerability + group + members.any(m, m.disability_id != false) + 800 + fixed + fa-wheelchair + 50 + + + + Large Households (5+ Members) + Target households with 5 or more members. Amount scales with household size. + categorical + group + size(members) >= 5 + 200 + multiplier + fa-users + 60 + + + diff --git a/spp_simulation/docs/methodology.md b/spp_simulation/docs/methodology.md new file mode 100644 index 00000000..d739e091 --- /dev/null +++ b/spp_simulation/docs/methodology.md @@ -0,0 +1,219 @@ +# Simulation Methodology + +This document explains the metrics, formulas, and methodology used by the OpenSPP Simulation module. + +## Overview + +The Simulation module allows you to test targeting scenarios before committing to actual program enrollment. It helps answer questions like: + +- How many beneficiaries would this targeting criteria reach? +- What would be the total cost? +- Are benefits distributed equitably across demographic groups? +- How much of my budget would be used? + +**When to use simulation:** Before finalizing targeting criteria for a new program cycle, or when considering changes to existing targeting rules. + +## Simulation Pipeline + +```mermaid +flowchart TD + A[Scenario Configuration] --> B[Targeting] + B --> C[Entitlement Calculation] + C --> D[Budget Adjustment] + D --> E[Distribution Analysis] + E --> F[Fairness Analysis] + F --> G[Targeting Efficiency] + G --> H[Results] + + B -->|CEL Expression| B1[Match registrants] + C -->|Fixed/Multiplier/CEL| C1[Compute amounts] + D -->|Cap or Proportional| D1[Apply budget constraints] +``` + +## Metrics Reference + +### Coverage Metrics + +| Metric | Description | Formula | +|--------|-------------|---------| +| **Beneficiary Count** | Number of registrants who would receive benefits | Count of registrants matching targeting expression | +| **Coverage Rate** | Percentage of registry targeted | `beneficiary_count / total_registry_count × 100` | + +### Distribution Metrics + +| Metric | Description | Interpretation | +|--------|-------------|----------------| +| **Gini Coefficient** | Measures inequality in benefit distribution | **0** = perfectly equal (everyone gets the same). **1** = maximum inequality. Lower is better. | +| **Standard Deviation** | Spread of benefit amounts around the mean | Higher values indicate more variation in amounts | +| **Mean** | Average benefit amount | `total_cost / beneficiary_count` | +| **Median** | Middle value when amounts are sorted | Less sensitive to outliers than mean | + +#### Gini Coefficient Calculation + +The Gini coefficient is computed using the standard formula: + +``` +G = (2 × Σ(i × yᵢ) - (n+1) × Σyᵢ) / (n × Σyᵢ) +``` + +Where: +- `yᵢ` = benefit amount for person i (sorted ascending) +- `n` = number of beneficiaries +- `i` = rank position (1 to n) + +**Interpretation scale:** +- **0.00 - 0.20**: Nearly equal distribution +- **0.20 - 0.40**: Moderate inequality +- **0.40+**: High inequality + +### Coverage Parity Metrics + +| Metric | Description | Interpretation | +|--------|-------------|----------------| +| **Parity Score** | Aggregate score measuring demographic coverage parity (0-100) | **100** = all groups covered proportionally. Points deducted for under-represented groups. Most meaningful for universal programs. | +| **Coverage Ratio** | Group coverage rate / Overall coverage rate | **1.0** = group matches overall. **< 0.80** = low coverage. **< 0.70** = under-represented | +| **Has Under-representation** | Boolean flag | True if any group has coverage ratio < 0.70 | + +### Targeting Efficiency Metrics + +These metrics require setting an **Ideal Population Expression** in the scenario configuration. + +| Metric | Description | Formula | +|--------|-------------|---------| +| **Leakage Rate** | Recipients who are NOT in the ideal population | `false_positives / total_simulated × 100` | +| **Undercoverage Rate** | Ideal population members who were NOT targeted | `false_negatives / total_ideal × 100` | + +```mermaid +flowchart LR + subgraph Simulated["Simulated Population"] + TP[True Positives] + FP[False Positives
Leakage] + end + + subgraph Ideal["Ideal Population"] + TP2[True Positives] + FN[False Negatives
Undercoverage] + end + + TP --- TP2 +``` + +**Terminology:** +- **True Positives (TP)**: Correctly targeted (in both simulated and ideal) +- **False Positives (FP)**: Leakage - targeted but shouldn't be (in simulated, not in ideal) +- **False Negatives (FN)**: Undercoverage - should be targeted but weren't (in ideal, not in simulated) + +## Fairness Analysis + +The fairness analysis evaluates whether targeting criteria inadvertently exclude certain demographic groups by comparing each group's coverage rate to the overall coverage rate. + +### How It Works + +``` +disparity_ratio = group_coverage_rate / overall_coverage_rate +``` + +**Example:** +- Overall: 1,000 beneficiaries out of 5,000 population = 20% coverage +- Females: 400 out of 2,200 = 18.2% coverage → ratio = 0.91 (fair) +- PWDs: 20 out of 200 = 10% coverage → ratio = 0.50 (disparity) + +A ratio of 1.0 means the group is covered at the same rate as the overall population. + +### Dimensions Currently Analyzed + +The service dynamically detects available demographic fields: + +- **Gender** - If `gender_id` field exists with ISO 5218 vocabulary codes +- **Disability** - If `disability_id` field exists (PWD vs non-PWD) + +> **Note:** Additional dimensions (age groups, geographic areas, ethnicity) are not currently implemented but could be added by extending `_get_demographic_groups()` in the fairness service. + +### Coverage Ratio Thresholds + +| Status | Ratio Range | Meaning | +|--------|-------------|---------| +| ✅ **Proportional** | ≥ 0.80 | Group coverage is within 20% of overall | +| ⚠️ **Low coverage** | 0.70 - 0.80 | Group may be under-covered relative to overall | +| ❌ **Under-represented** | < 0.70 | Significant under-representation relative to overall | + +> **Important:** For targeted programs (e.g., maternal health, disability support), non-target groups are *expected* to be under-represented. This is by design, not a problem. + +### Limitations + +- **Limited dimensions**: Only gender and disability are checked (if fields exist) +- **No intersectional analysis**: Disparity ratios computed independently (e.g., doesn't check "disabled women") +- **No statistical significance**: Small groups (N < 30) may show spurious disparity from random variation +- **Over-representation ignored**: Ratio > 1.0 is not flagged (group may be over-represented) +- **No baseline comparison**: Compares to overall coverage, not to population share or external benchmarks + +## Budget Strategies + +| Strategy | Description | Use When | +|----------|-------------|----------| +| **No Constraint** | Total cost may exceed budget | Exploring potential costs without limits | +| **Cap at Total** | Include beneficiaries at full amount until budget exhausted; remaining get nothing | Benefit amount must remain fixed (e.g., minimum living standard) | +| **Proportional Reduction** | Reduce all amounts proportionally to fit within budget | Reaching all eligible beneficiaries is prioritized over amount per person | + +```mermaid +flowchart TD + A[Compute Full Entitlements] --> B{Total > Budget?} + B -->|No| C[Use Full Amounts] + B -->|Yes| D{Strategy?} + D -->|Cap at Total| E[Include at full amount until budget reached] + D -->|Proportional| F[Multiply all amounts by budget/total ratio] +``` + +## CEL Expression Examples + +CEL (Common Expression Language) is used for targeting and entitlement expressions. + +### Basic Targeting + +| Goal | Expression | +|------|------------| +| All registrants | `true` | +| Elderly (60+) | `age_years(r.birthdate) >= 60` | +| Children under 5 | `age_years(r.birthdate) < 5` | +| Female-headed households | `is_female(head.gender_id)` | + +### Household-based Targeting + +| Goal | Expression | +|------|------------| +| Large households (5+ members) | `size(members) >= 5` | +| Households with children under 18 | `has_child_under(members, 18)` | +| Households with elderly members | `any(members, m, age_years(m.birthdate) >= 60)` | + +### Geographic Targeting + +| Goal | Expression | +|------|------------| +| Rural areas only | `has_area_tag('RURAL')` | +| Specific administrative area | `in_area('DISTRICT_01')` | + +### Combining Conditions + +| Goal | Expression | +|------|------------| +| Elderly women | `age_years(r.birthdate) >= 60 && is_female(r.gender_id)` | +| Large rural households | `size(members) >= 5 && has_area_tag('RURAL')` | +| Either elderly or disabled | `age_years(r.birthdate) >= 60 \|\| r.disability_level > 0` | + +### Ideal Population Examples + +The ideal population expression defines who **should** receive benefits (ground truth for measuring targeting accuracy): + +| Scenario | Expression | +|----------|------------| +| Below poverty line | `metric('pmt_score') < 2.0` | +| Chronic poverty | `metric('pmt_score') < 1.5 && metric('years_in_poverty') >= 3` | +| Food insecure households | `metric('food_consumption_score') < 28` | + +## Technical Notes + +- **Standard deviation** uses the population formula (÷n not ÷n-1) for consistency with large-scale data +- **Gini coefficient** uses the trapezoid approximation of the Lorenz curve +- Simulations execute against the **current registry state** (no historical simulation) +- Simulation runs are **preserved for audit compliance** and cannot be deleted +- All computations are performed in-memory using batch processing for scalability diff --git a/spp_simulation/models/__init__.py b/spp_simulation/models/__init__.py new file mode 100644 index 00000000..1c4fc662 --- /dev/null +++ b/spp_simulation/models/__init__.py @@ -0,0 +1,8 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. + +from . import simulation_scenario_template +from . import simulation_scenario +from . import simulation_entitlement_rule +from . import simulation_run +from . import simulation_comparison +from . import simulation_metric diff --git a/spp_simulation/models/simulation_comparison.py b/spp_simulation/models/simulation_comparison.py new file mode 100644 index 00000000..2dc145ff --- /dev/null +++ b/spp_simulation/models/simulation_comparison.py @@ -0,0 +1,307 @@ +import logging +from datetime import datetime + +from odoo import _, api, fields, models +from odoo.exceptions import ValidationError + +_logger = logging.getLogger(__name__) + + +class SimulationComparison(models.Model): + """Side-by-side comparison of multiple simulation runs.""" + + _name = "spp.simulation.comparison" + _description = "Simulation Comparison" + _order = "create_date desc" + + name = fields.Char(string="Name", required=True) + run_ids = fields.Many2many( + comodel_name="spp.simulation.run", + string="Runs to Compare", + required=True, + ) + comparison_json = fields.Json( + string="Comparison Data", + readonly=True, + ) + parameters_comparison_json = fields.Json( + string="Parameters Comparison", + readonly=True, + help="Side-by-side comparison of scenario parameters", + ) + parameters_comparison_html = fields.Html( + string="Parameters Comparison Table", + compute="_compute_parameters_comparison_html", + ) + overlap_count_json = fields.Json( + string="Overlap Counts", + readonly=True, + help="Aggregated overlap counts between scenarios. " + "Computed from current registry data, not historical state.", + ) + staleness_warning = fields.Text( + string="Staleness Warning", + compute="_compute_staleness_warning", + ) + + @api.constrains("run_ids") + def _check_minimum_runs(self): + for record in self: + if len(record.run_ids) < 2: + raise ValidationError(_("A comparison requires at least 2 simulation runs.")) + + @api.onchange("run_ids") + def _onchange_run_ids(self): + """Auto-recompute comparison when runs are added or removed.""" + if len(self.run_ids) >= 2: + # Build comparison data + comparison_data = {"runs": []} + for run in self.run_ids: + run_data = { + "run_id": run.id, + "scenario_name": run.scenario_id.name, + "beneficiary_count": run.beneficiary_count, + "total_cost": run.total_cost, + "coverage_rate": run.coverage_rate, + "equity_score": run.equity_score, + "gini_coefficient": run.gini_coefficient, + "has_disparity": run.has_disparity, + "leakage_rate": run.leakage_rate, + "undercoverage_rate": run.undercoverage_rate, + "budget_utilization": run.budget_utilization, + "executed_at": run.executed_at.isoformat() if run.executed_at else None, + } + comparison_data["runs"].append(run_data) + self.comparison_json = comparison_data + + # Build parameters comparison from snapshots + parameters_data = {"runs": []} + for run in self.run_ids: + snapshot = run.scenario_snapshot_json or {} + param_data = { + "run_id": run.id, + "scenario_name": run.scenario_id.name, + "executed_at": run.executed_at.isoformat() if run.executed_at else None, + "target_type": snapshot.get("target_type"), + "targeting_expression": snapshot.get("targeting_expression"), + "budget_amount": snapshot.get("budget_amount"), + "budget_strategy": snapshot.get("budget_strategy"), + "entitlement_rules": snapshot.get("entitlement_rules") or [], + "ideal_population_expression": snapshot.get("ideal_population_expression"), + } + parameters_data["runs"].append(param_data) + self.parameters_comparison_json = parameters_data + + @api.depends("run_ids", "run_ids.executed_at") + def _compute_staleness_warning(self): + for record in self: + if len(record.run_ids) < 2: + record.staleness_warning = False + continue + dates = record.run_ids.mapped("executed_at") + dates = [d for d in dates if d] + if len(dates) < 2: + record.staleness_warning = False + continue + max_date = max(dates) + min_date = min(dates) + delta = (max_date - min_date).days + if delta > 1: + record.staleness_warning = ( + f"These runs were executed {delta} days apart. " + "Overlap counts reflect the current registry state, " + "not the state at each run's execution time." + ) + else: + record.staleness_warning = False + + @api.depends("parameters_comparison_json") + def _compute_parameters_comparison_html(self): + """Render parameters comparison as an HTML table.""" + budget_strategy_labels = { + "none": "No Budget Constraint", + "cap_total": "Cap at Budget Total", + "proportional_reduction": "Proportional Reduction", + } + target_type_labels = { + "group": "Group (Household)", + "individual": "Individual", + } + + for record in self: + params = record.parameters_comparison_json + if not params: + record.parameters_comparison_html = False + continue + + runs = params.get("runs", []) + if not runs: + record.parameters_comparison_html = "

No parameter data available

" + continue + + # Build comparison table + html_parts = [''] + + # Header row with scenario names and execution dates + html_parts.append("") + for run_data in runs: + name = run_data.get("scenario_name", "Unknown") + executed_at = run_data.get("executed_at") + if executed_at: + # Parse ISO date and format nicely + try: + dt = datetime.fromisoformat(executed_at.replace("Z", "+00:00")) + date_str = dt.strftime("%b %d, %Y %H:%M") + html_parts.append(f"") + except ValueError: + html_parts.append(f"") + else: + html_parts.append(f"") + html_parts.append("") + + # Target Type row + html_parts.append("") + for run_data in runs: + val = target_type_labels.get(run_data.get("target_type"), run_data.get("target_type", "-")) + html_parts.append(f"") + html_parts.append("") + + # Targeting Expression row + html_parts.append("") + for run_data in runs: + expr = run_data.get("targeting_expression") or "-" + html_parts.append(f"") + html_parts.append("") + + # Budget Amount row + html_parts.append("") + for run_data in runs: + amount = run_data.get("budget_amount") or 0 + html_parts.append(f"") + html_parts.append("") + + # Budget Strategy row + html_parts.append("") + for run_data in runs: + strategy = budget_strategy_labels.get( + run_data.get("budget_strategy"), run_data.get("budget_strategy", "-") + ) + html_parts.append(f"") + html_parts.append("") + + # Entitlement Rules row + html_parts.append("") + for run_data in runs: + rules = run_data.get("entitlement_rules") or [] + if rules: + rules_html = "
    " + for rule in rules: + mode = rule.get("amount_mode", "fixed") + amount = rule.get("amount", 0) + if mode == "fixed": + rules_html += f"
  • Fixed: {amount:,.2f}
  • " + elif mode == "multiplier": + field = rule.get("multiplier_field", "?") + rules_html += f"
  • Multiplier: {amount:,.2f} × {field}
  • " + elif mode == "cel": + cel = rule.get("amount_cel_expression", "?") + rules_html += f"
  • CEL: {cel}
  • " + rules_html += "
" + html_parts.append(f"") + else: + html_parts.append("") + html_parts.append("") + + html_parts.append("
Parameter{name}
{date_str}
{name}{name}
Target Type{val}
Targeting Expression{expr}
Budget Amount{amount:,.2f}
Budget Strategy{strategy}
Entitlement Rules{rules_html}-
") + record.parameters_comparison_html = "".join(html_parts) + + def action_compute_comparison(self): + """Compute the side-by-side comparison data.""" + self.ensure_one() + comparison_data = {"runs": []} + for run in self.run_ids: + run_data = { + "run_id": run.id, + "scenario_name": run.scenario_id.name, + "beneficiary_count": run.beneficiary_count, + "total_cost": run.total_cost, + "coverage_rate": run.coverage_rate, + "equity_score": run.equity_score, + "gini_coefficient": run.gini_coefficient, + "has_disparity": run.has_disparity, + "leakage_rate": run.leakage_rate, + "undercoverage_rate": run.undercoverage_rate, + "budget_utilization": run.budget_utilization, + "executed_at": run.executed_at.isoformat() if run.executed_at else None, + } + comparison_data["runs"].append(run_data) + self.comparison_json = comparison_data + + # Build parameters comparison from snapshots + parameters_data = {"runs": []} + for run in self.run_ids: + snapshot = run.scenario_snapshot_json or {} + param_data = { + "run_id": run.id, + "scenario_name": run.scenario_id.name, + "executed_at": run.executed_at.isoformat() if run.executed_at else None, + "target_type": snapshot.get("target_type"), + "targeting_expression": snapshot.get("targeting_expression"), + "budget_amount": snapshot.get("budget_amount"), + "budget_strategy": snapshot.get("budget_strategy"), + "entitlement_rules": snapshot.get("entitlement_rules") or [], + "ideal_population_expression": snapshot.get("ideal_population_expression"), + } + parameters_data["runs"].append(param_data) + self.parameters_comparison_json = parameters_data + + # Compute overlap if possible + self._compute_overlap() + + def _compute_overlap(self): + """Re-execute targeting expressions to compute overlap counts.""" + registry = self.env["spp.cel.registry"] + executor = self.env["spp.cel.executor"] + run_sets = {} + for run in self.run_ids: + scenario = run.scenario_id + if not scenario.targeting_expression: + continue + profile = "registry_groups" if scenario.target_type == "group" else "registry_individuals" + try: + cfg = registry.load_profile(profile) + executor_with_cfg = executor.with_context(cel_cfg=cfg) + all_ids = [] + for batch_ids in executor_with_cfg.compile_for_batch( + "res.partner", scenario.targeting_expression, batch_size=5000 + ): + all_ids.extend(batch_ids) + run_sets[run.id] = set(all_ids) + except Exception: + _logger.warning( + "Could not compute overlap for run %s (scenario '%s')", + run.id, + scenario.name, + exc_info=True, + ) + run_sets[run.id] = set() + + overlap_data = {} + run_list = list(self.run_ids) + for i, run_a in enumerate(run_list): + for run_b in run_list[i + 1 :]: + set_a = run_sets.get(run_a.id, set()) + set_b = run_sets.get(run_b.id, set()) + overlap_count = len(set_a & set_b) + union_count = len(set_a | set_b) + key = f"{run_a.id}_{run_b.id}" + overlap_data[key] = { + "run_a_id": run_a.id, + "run_a_name": run_a.scenario_id.name, + "run_b_id": run_b.id, + "run_b_name": run_b.scenario_id.name, + "overlap_count": overlap_count, + "union_count": union_count, + "jaccard_index": overlap_count / union_count if union_count else 0, + } + self.overlap_count_json = overlap_data diff --git a/spp_simulation/models/simulation_entitlement_rule.py b/spp_simulation/models/simulation_entitlement_rule.py new file mode 100644 index 00000000..fff5348a --- /dev/null +++ b/spp_simulation/models/simulation_entitlement_rule.py @@ -0,0 +1,74 @@ +import logging + +from odoo import _, api, fields, models +from odoo.exceptions import ValidationError + +_logger = logging.getLogger(__name__) + + +class SimulationEntitlementRule(models.Model): + """Amount calculation rules for a simulation scenario.""" + + _name = "spp.simulation.entitlement.rule" + _description = "Simulation Entitlement Rule" + _order = "sequence, id" + + scenario_id = fields.Many2one( + comodel_name="spp.simulation.scenario", + string="Scenario", + required=True, + ondelete="cascade", + index=True, + ) + sequence = fields.Integer(string="Sequence", default=10) + name = fields.Char(string="Rule Name") + amount_mode = fields.Selection( + selection=[ + ("fixed", "Fixed Amount"), + ("multiplier", "Multiplier"), + ("cel", "CEL Expression"), + ], + string="Amount Mode", + required=True, + default="fixed", + ) + amount = fields.Float( + string="Amount", + help="Fixed amount per beneficiary (used when mode is 'Fixed Amount').", + ) + multiplier_field = fields.Char( + string="Multiplier Field", + help="Field name to multiply by (e.g., 'household_size'). Used when mode is 'Multiplier'.", + ) + max_multiplier = fields.Float( + string="Maximum Multiplier", + default=0, + help="Maximum multiplier value. 0 means no limit.", + ) + amount_cel_expression = fields.Text( + string="Amount CEL Expression", + help="CEL expression returning the amount per beneficiary. Used when mode is 'CEL Expression'.", + ) + condition_cel_expression = fields.Text( + string="Condition Expression (CEL)", + help="Optional sub-filter within the targeted population. " + "Only beneficiaries matching this condition receive this entitlement.", + ) + + @api.constrains("amount_mode", "amount") + def _check_fixed_amount(self): + for record in self: + if record.amount_mode == "fixed" and record.amount < 0: + raise ValidationError(_("Fixed amount cannot be negative.")) + + @api.constrains("amount_mode", "multiplier_field") + def _check_multiplier_field(self): + for record in self: + if record.amount_mode == "multiplier" and not record.multiplier_field: + raise ValidationError(_("Multiplier field is required when amount mode is 'Multiplier'.")) + + @api.constrains("amount_mode", "amount_cel_expression") + def _check_cel_expression(self): + for record in self: + if record.amount_mode == "cel" and not record.amount_cel_expression: + raise ValidationError(_("CEL expression is required when amount mode is 'CEL Expression'.")) diff --git a/spp_simulation/models/simulation_metric.py b/spp_simulation/models/simulation_metric.py new file mode 100644 index 00000000..e3713b65 --- /dev/null +++ b/spp_simulation/models/simulation_metric.py @@ -0,0 +1,83 @@ +import logging + +from odoo import _, api, fields, models +from odoo.exceptions import ValidationError + +_logger = logging.getLogger(__name__) + + +class SimulationMetric(models.Model): + """Custom evaluation metrics for simulation scenarios. + + Inherits from spp.metric.base for common metric fields + (name, label, description, unit, decimal_places, category_id, active, sequence). + + Defines simulation-specific computation fields: + - metric_type (aggregate/coverage/ratio) + - cel_expression (for aggregate and coverage metrics) + - aggregation (how to aggregate across groups) + - numerator/denominator expressions (for ratio metrics) + """ + + _name = "spp.simulation.metric" + _description = "Simulation Metric" + _inherit = ["spp.metric.base"] + _order = "sequence, name" + + # name, label, description, unit, decimal_places, category_id, active, sequence + # inherited from spp.metric.base + + # ─── Computation ──────────────────────────────────────────────────── + metric_type = fields.Selection( + selection=[ + ("aggregate", "Aggregate"), + ("coverage", "Coverage"), + ("ratio", "Ratio"), + ], + string="Metric Type", + required=True, + default="aggregate", + help="Type of simulation metric computation", + ) + + cel_expression = fields.Text( + string="CEL Expression", + help="CEL expression for computation (required for aggregate and coverage metrics)", + ) + + aggregation = fields.Selection( + selection=[ + ("sum", "Sum"), + ("avg", "Average"), + ("min", "Minimum"), + ("max", "Maximum"), + ("count", "Count"), + ], + string="Aggregation", + default="sum", + help="How to aggregate across groups", + ) + + numerator_expression = fields.Text( + string="Numerator Expression", + help="CEL expression for the numerator of ratio metrics.", + ) + denominator_expression = fields.Text( + string="Denominator Expression", + help="CEL expression for the denominator of ratio metrics.", + ) + + @api.constrains("metric_type", "cel_expression") + def _check_aggregate_expression(self): + for record in self: + if record.metric_type in ("aggregate", "coverage") and not record.cel_expression: + raise ValidationError(_("CEL expression is required for aggregate and coverage metrics.")) + + @api.constrains("metric_type", "numerator_expression", "denominator_expression") + def _check_ratio_expressions(self): + for record in self: + if record.metric_type == "ratio": + if not record.numerator_expression or not record.denominator_expression: + raise ValidationError( + _("Both numerator and denominator expressions are required for ratio metrics.") + ) diff --git a/spp_simulation/models/simulation_run.py b/spp_simulation/models/simulation_run.py new file mode 100644 index 00000000..d40eaf75 --- /dev/null +++ b/spp_simulation/models/simulation_run.py @@ -0,0 +1,553 @@ +import logging + +from odoo import Command, _, api, fields, models +from odoo.exceptions import UserError + +_logger = logging.getLogger(__name__) + + +class SimulationRun(models.Model): + """Aggregated simulation results. Non-deletable for audit compliance.""" + + _name = "spp.simulation.run" + _description = "Simulation Run" + _order = "executed_at desc" + _rec_name = "display_name" + + @api.depends("scenario_id", "scenario_id.name", "executed_at", "beneficiary_count") + def _compute_display_name(self): + for record in self: + if record.scenario_id and record.executed_at: + date_str = record.executed_at.strftime("%b %d, %Y %H:%M") + record.display_name = ( + f"{record.scenario_id.name} - {date_str} " f"({record.beneficiary_count:,} beneficiaries)" + ) + elif record.scenario_id: + record.display_name = f"{record.scenario_id.name} - Run #{record.id}" + else: + record.display_name = f"Run #{record.id}" + + display_name = fields.Char( + compute="_compute_display_name", + store=True, + ) + + scenario_id = fields.Many2one( + comodel_name="spp.simulation.scenario", + string="Scenario", + required=True, + ondelete="restrict", + index=True, + ) + # Headline scalar fields + beneficiary_count = fields.Integer( + string="Beneficiary Count", + readonly=True, + index=True, + help="Number of registrants who would receive benefits.", + ) + total_registry_count = fields.Integer( + string="Total Registry Count", + readonly=True, + help="Total eligible registrants in the registry.", + ) + coverage_rate = fields.Float( + string="Coverage Rate", + readonly=True, + help="Percentage of registry targeted (beneficiary_count / total_registry_count).", + ) + total_cost = fields.Float( + string="Total Cost", + readonly=True, + index=True, + help="Sum of all entitlement amounts.", + ) + budget_utilization = fields.Float( + string="Budget Utilization", + readonly=True, + help="Percentage of budget used.", + ) + gini_coefficient = fields.Float( + string="Gini Coefficient", + readonly=True, + help="Measures benefit inequality. 0 = perfectly equal distribution, " + "1 = maximum inequality. Lower values indicate more equal distribution.", + ) + equity_score = fields.Float( + string="Parity Score", + readonly=True, + help="0-100 score measuring demographic coverage parity. " + "100 = all groups covered proportionally. Points deducted for under-represented groups. " + "Most meaningful for universal programs, not targeted interventions.", + ) + has_disparity = fields.Boolean( + string="Has Under-representation", + readonly=True, + help="True if any demographic group has coverage ratio below 0.70 of overall coverage. " + "For targeted programs, under-representation of non-target groups is expected.", + ) + leakage_rate = fields.Float( + string="Leakage Rate", + readonly=True, + help="Percentage of recipients who are not part of the ideal population. " + "Requires ideal_population_expression to be set.", + ) + undercoverage_rate = fields.Float( + string="Undercoverage Rate", + readonly=True, + help="Percentage of the ideal population that was not targeted. " + "Requires ideal_population_expression to be set.", + ) + # Detailed JSON fields + distribution_json = fields.Json( + string="Distribution Data", + readonly=True, + ) + fairness_json = fields.Json( + string="Fairness Data", + readonly=True, + ) + targeting_efficiency_json = fields.Json( + string="Targeting Efficiency Data", + readonly=True, + ) + geographic_json = fields.Json( + string="Geographic Data", + readonly=True, + ) + metric_results_json = fields.Json( + string="Custom Metric Results", + readonly=True, + ) + # Natural language summaries + summary_html = fields.Html( + string="Executive Summary", + compute="_compute_summary_html", + store=True, + readonly=True, + ) + distribution_summary_html = fields.Html( + string="Distribution Summary", + compute="_compute_distribution_summary_html", + store=True, + readonly=True, + ) + distribution_details_html = fields.Html( + string="Distribution Details", + compute="_compute_distribution_details_html", + store=True, + readonly=True, + ) + geographic_html = fields.Html( + string="Geographic Breakdown", + compute="_compute_geographic_html", + store=True, + readonly=True, + ) + metric_results_html = fields.Html( + string="Custom Metrics Results", + compute="_compute_metric_results_html", + store=True, + readonly=True, + ) + targeting_efficiency_html = fields.Html( + string="Targeting Efficiency Details", + compute="_compute_targeting_efficiency_html", + store=True, + readonly=True, + ) + # Metadata + executed_at = fields.Datetime( + string="Executed At", + readonly=True, + default=fields.Datetime.now, + index=True, + ) + execution_duration_seconds = fields.Float( + string="Execution Duration (seconds)", + readonly=True, + ) + scenario_snapshot_json = fields.Json( + string="Scenario Snapshot", + readonly=True, + help="Snapshot of scenario configuration at time of execution.", + ) + # Computed fields for displaying snapshot values in UI + scenario_snapshot_target_type = fields.Char( + string="Target Type (Snapshot)", + compute="_compute_scenario_snapshot_fields", + ) + scenario_snapshot_targeting_expression = fields.Text( + string="Targeting Expression (Snapshot)", + compute="_compute_scenario_snapshot_fields", + ) + scenario_snapshot_budget_amount = fields.Float( + string="Budget Amount (Snapshot)", + compute="_compute_scenario_snapshot_fields", + ) + scenario_snapshot_budget_strategy = fields.Char( + string="Budget Strategy (Snapshot)", + compute="_compute_scenario_snapshot_fields", + ) + scenario_snapshot_ideal_population_expression = fields.Text( + string="Ideal Population Expression (Snapshot)", + compute="_compute_scenario_snapshot_fields", + ) + scenario_snapshot_entitlement_rules_html = fields.Html( + string="Entitlement Rules (Snapshot)", + compute="_compute_scenario_snapshot_fields", + ) + error_message = fields.Text( + string="Error Message", + readonly=True, + ) + state = fields.Selection( + selection=[ + ("running", "Running"), + ("completed", "Completed"), + ("failed", "Failed"), + ], + string="State", + default="running", + required=True, + readonly=True, + index=True, + ) + + @api.depends( + "beneficiary_count", + "total_registry_count", + "coverage_rate", + "total_cost", + "budget_utilization", + "equity_score", + "scenario_id.name", + "scenario_id.budget_amount", + ) + def _compute_summary_html(self): + for record in self: + if record.state != "completed": + record.summary_html = False + continue + scenario_name = record.scenario_id.name or "this scenario" + target_type_label = "households" if record.scenario_id.target_type == "group" else "individuals" + coverage_pct = f"{record.coverage_rate:.1f}" if record.coverage_rate else "0.0" + parts = [ + f"{scenario_name} targets " + f"{record.beneficiary_count:,} {target_type_label} " + f"({coverage_pct}% of registry).", + ] + if record.total_cost: + parts.append(f" Total estimated cost: {record.total_cost:,.2f}.") + if record.budget_utilization and record.scenario_id.budget_amount: + parts.append(f" Budget utilization: {record.budget_utilization:.1f}%.") + if record.equity_score: + parity_label = ( + "proportional" + if record.equity_score >= 80 + else ("some variation" if record.equity_score >= 60 else "significant variation") + ) + parts.append(f" Parity score: {record.equity_score:.0f}/100 ({parity_label}).") + record.summary_html = "

" + "".join(parts) + "

" + + @api.depends("gini_coefficient", "distribution_json") + def _compute_distribution_summary_html(self): + for record in self: + if record.state != "completed" or not record.distribution_json: + record.distribution_summary_html = False + continue + distribution = record.distribution_json or {} + gini = record.gini_coefficient + gini_label = ( + "nearly equal" if gini < 0.2 else ("moderately distributed" if gini < 0.4 else "unequally distributed") + ) + parts = [f"Benefits are {gini_label} " f"(Gini coefficient: {gini:.2f})."] + minimum = distribution.get("minimum", 0) + maximum = distribution.get("maximum", 0) + mean = distribution.get("mean", 0) + median = distribution.get("median", 0) + if mean: + parts.append(f" Average amount: {mean:,.2f} (range: {minimum:,.2f} to {maximum:,.2f}).") + if median and mean: + skew = ( + "right-skewed" if mean > median * 1.1 else ("left-skewed" if mean < median * 0.9 else "symmetric") + ) + parts.append(f" Distribution is {skew} (median: {median:,.2f}).") + record.distribution_summary_html = "

" + "".join(parts) + "

" + + @api.depends("distribution_json") + def _compute_distribution_details_html(self): + """Render distribution statistics and percentiles as HTML tables.""" + for record in self: + if record.state != "completed" or not record.distribution_json: + record.distribution_details_html = False + continue + dist = record.distribution_json or {} + if not dist: + record.distribution_details_html = "

No distribution data

" + continue + + html_parts = ['
'] + + # Statistics table + html_parts.append('
Statistics
') + html_parts.append('') + html_parts.append("") + stats = [ + ("Count", dist.get("count", 0), "{:,}"), + ("Total", dist.get("total", 0), "{:,.2f}"), + ("Minimum", dist.get("minimum", 0), "{:,.2f}"), + ("Maximum", dist.get("maximum", 0), "{:,.2f}"), + ("Mean", dist.get("mean", 0), "{:,.2f}"), + ("Median", dist.get("median", 0), "{:,.2f}"), + ("Std Dev", dist.get("standard_deviation", 0), "{:,.2f}"), + ] + for label, value, fmt in stats: + formatted = fmt.format(value) if value else "-" + html_parts.append(f"") + html_parts.append("
{label}{formatted}
") + + # Percentiles table + percentiles = dist.get("percentiles", {}) + if percentiles: + html_parts.append('
Percentiles
') + html_parts.append('') + html_parts.append("") + percentile_order = ["p10", "p25", "p50", "p75", "p90"] + percentile_labels = { + "p10": "10th percentile", + "p25": "25th percentile (Q1)", + "p50": "50th percentile (Median)", + "p75": "75th percentile (Q3)", + "p90": "90th percentile", + } + for key in percentile_order: + if key in percentiles: + label = percentile_labels.get(key, key) + value = percentiles[key] + html_parts.append(f"") + html_parts.append("
{label}{value:,.2f}
") + + html_parts.append("
") + record.distribution_details_html = "".join(html_parts) + + @api.depends("geographic_json") + def _compute_geographic_html(self): + """Render geographic breakdown as an HTML table.""" + for record in self: + if record.state != "completed" or not record.geographic_json: + record.geographic_html = False + continue + geo_data = record.geographic_json or {} + if not geo_data: + record.geographic_html = "

No geographic data available

" + continue + + html_parts = [ + '', + "", + "", + "", + ] + # Sort by beneficiary count descending + sorted_areas = sorted(geo_data.items(), key=lambda x: x[1].get("count", 0), reverse=True) + for _area_id, area_info in sorted_areas: + name = area_info.get("name", "Unknown") + count = area_info.get("count", 0) + amount = area_info.get("amount", 0) + coverage = area_info.get("coverage_rate", 0) + html_parts.append( + f"" f"" + ) + html_parts.append("
AreaBeneficiariesAmountCoverage
{name}{count:,}{amount:,.2f}{coverage:.1f}%
") + record.geographic_html = "".join(html_parts) + + @api.depends("metric_results_json") + def _compute_metric_results_html(self): + """Render custom metric results as an HTML table.""" + for record in self: + if record.state != "completed" or not record.metric_results_json: + record.metric_results_html = False + continue + metrics = record.metric_results_json or {} + if not metrics: + record.metric_results_html = "

No custom metrics configured

" + continue + + html_parts = [ + '', + "", + "", + "", + ] + for metric_name, metric_data in metrics.items(): + value = metric_data.get("value", 0) + metric_type = metric_data.get("type", "unknown") + # Format value based on type + if metric_type == "coverage": + formatted_value = f"{value:.1f}%" + elif metric_type == "ratio": + formatted_value = f"{value:.2f}" + elif isinstance(value, float): + formatted_value = f"{value:,.2f}" + else: + formatted_value = str(value) + html_parts.append(f"") + html_parts.append("
MetricValueType
{metric_name}{formatted_value}{metric_type}
") + record.metric_results_html = "".join(html_parts) + + @api.depends("targeting_efficiency_json") + def _compute_targeting_efficiency_html(self): + """Render targeting efficiency confusion matrix as an HTML table.""" + for record in self: + if record.state != "completed" or not record.targeting_efficiency_json: + record.targeting_efficiency_html = False + continue + data = record.targeting_efficiency_json or {} + if not data or "error" in data: + record.targeting_efficiency_html = ( + "

Requires ideal population expression to be set

" + ) + continue + + tp = data.get("true_positives", 0) + fp = data.get("false_positives", 0) + fn = data.get("false_negatives", 0) + total_sim = data.get("total_simulated", 0) + total_ideal = data.get("total_ideal", 0) + + html_parts = ['
'] + + # Confusion matrix + html_parts.append('
Confusion Matrix
') + html_parts.append('') + html_parts.append("") + html_parts.append( + f"" + ) + html_parts.append( + f"" + ) + html_parts.append( + f"" + ) + html_parts.append("
True Positives
" + f"Correctly included
{tp:,}
False Positives (Leakage)
" + f"Included but shouldn't be
{fp:,}
False Negatives (Undercoverage)
" + f"Should be included but weren't
{fn:,}
") + + # Totals + html_parts.append('
Population Totals
') + html_parts.append('') + html_parts.append("") + html_parts.append(f"") + html_parts.append(f"") + html_parts.append("
Simulated (targeted){total_sim:,}
Ideal (should be targeted){total_ideal:,}
") + + html_parts.append("
") + record.targeting_efficiency_html = "".join(html_parts) + + @api.depends("scenario_snapshot_json") + def _compute_scenario_snapshot_fields(self): + budget_strategy_labels = { + "none": "No Budget Constraint", + "cap_total": "Cap at Budget Total", + "proportional_reduction": "Proportional Reduction", + } + target_type_labels = { + "group": "Group (Household)", + "individual": "Individual", + } + for record in self: + snapshot = record.scenario_snapshot_json or {} + record.scenario_snapshot_target_type = target_type_labels.get( + snapshot.get("target_type"), snapshot.get("target_type", "") + ) + record.scenario_snapshot_targeting_expression = snapshot.get("targeting_expression") or "" + record.scenario_snapshot_budget_amount = snapshot.get("budget_amount") or 0.0 + record.scenario_snapshot_budget_strategy = budget_strategy_labels.get( + snapshot.get("budget_strategy"), snapshot.get("budget_strategy", "") + ) + record.scenario_snapshot_ideal_population_expression = snapshot.get("ideal_population_expression") or "" + + # Build HTML table for entitlement rules + rules = snapshot.get("entitlement_rules") or [] + if rules: + html_parts = [ + '', + "", + "", + "", + "", + ] + amount_mode_labels = { + "fixed": "Fixed", + "multiplier": "Multiplier", + "cel": "CEL Expression", + } + for rule in rules: + mode = amount_mode_labels.get(rule.get("amount_mode"), rule.get("amount_mode", "")) + amount = rule.get("amount") or 0 + mult_field = rule.get("multiplier_field") or "-" + max_mult = rule.get("max_multiplier") or "-" + cel_expr = rule.get("amount_cel_expression") or "-" + condition = rule.get("condition_cel_expression") or "-" + html_parts.append( + f"" + f"" + f"" + ) + html_parts.append("
ModeAmountMultiplier FieldMax MultiplierCEL ExpressionCondition
{mode}{amount:,.2f}{mult_field}{max_mult}{cel_expr}{condition}
") + record.scenario_snapshot_entitlement_rules_html = "".join(html_parts) + else: + record.scenario_snapshot_entitlement_rules_html = ( + "

No entitlement rules defined

" + ) + + def unlink(self): + """Prevent deletion of simulation runs for audit compliance.""" + raise UserError(_("Simulation runs cannot be deleted. They are preserved for audit compliance.")) + + def action_open_comparison_wizard(self): + """Open the comparison wizard with this run pre-selected.""" + self.ensure_one() + return { + "type": "ir.actions.act_window", + "name": "Compare Simulation Runs", + "res_model": "spp.simulation.compare.wizard", + "view_mode": "form", + "target": "new", + "context": { + "default_run_ids": [Command.link(self.id)], + }, + } + + def action_compare_with_previous(self): + """Compare this run with the previous completed run of the same scenario.""" + self.ensure_one() + previous_runs = self.search( + [ + ("scenario_id", "=", self.scenario_id.id), + ("state", "=", "completed"), + ("id", "!=", self.id), + ("executed_at", "<", self.executed_at), + ], + order="executed_at desc", + limit=1, + ) + if not previous_runs: + raise UserError(_("No previous completed run found for comparison.")) + comparison = self.env["spp.simulation.comparison"].create( + { + "name": f"Compare: {self.scenario_id.name}", + "run_ids": [Command.set([self.id, previous_runs.id])], + } + ) + comparison.action_compute_comparison() + return { + "type": "ir.actions.act_window", + "res_model": "spp.simulation.comparison", + "res_id": comparison.id, + "view_mode": "form", + "target": "current", + } diff --git a/spp_simulation/models/simulation_scenario.py b/spp_simulation/models/simulation_scenario.py new file mode 100644 index 00000000..0d12d7bf --- /dev/null +++ b/spp_simulation/models/simulation_scenario.py @@ -0,0 +1,285 @@ +import logging + +from odoo import _, api, fields, models +from odoo.exceptions import UserError, ValidationError + +_logger = logging.getLogger(__name__) + + +class SimulationScenario(models.Model): + """A 'what if' targeting scenario definition.""" + + _name = "spp.simulation.scenario" + _description = "Simulation Scenario" + _inherit = ["mail.thread"] + _order = "write_date desc" + + name = fields.Char( + string="Name", + required=True, + tracking=True, + ) + description = fields.Text(string="Description") + category = fields.Char( + string="Category", + help="Group related scenarios together (e.g., 'Child Grant Variations', 'Budget Testing')", + index=True, + ) + template_id = fields.Many2one( + comodel_name="spp.simulation.scenario.template", + string="Template", + ondelete="set null", + help="Template this scenario was created from.", + ) + target_type = fields.Selection( + selection=[ + ("group", "Group"), + ("individual", "Individual"), + ], + string="Target Type", + required=True, + default="group", + tracking=True, + ) + targeting_expression = fields.Text( + string="Targeting Expression (CEL)", + help="CEL expression that defines who is eligible. Required before running.", + ) + targeting_expression_explanation = fields.Text( + string="Expression Explanation", + help="Plain language explanation of what this expression targets.", + ) + entitlement_rule_ids = fields.One2many( + comodel_name="spp.simulation.entitlement.rule", + inverse_name="scenario_id", + string="Entitlement Rules", + ) + budget_amount = fields.Float( + string="Budget Amount", + tracking=True, + help="Maximum budget for the scenario. Leave at 0 for no limit.", + ) + budget_strategy = fields.Selection( + selection=[ + ("none", "No Budget Constraint"), + ("cap_total", "Cap at Budget Total"), + ("proportional_reduction", "Proportional Reduction"), + ], + string="Budget Strategy", + default="none", + tracking=True, + help="How to handle when total cost exceeds budget. " + "'No Constraint' ignores budget. " + "'Cap at Total' stops adding beneficiaries when budget is reached. " + "'Proportional Reduction' reduces all amounts proportionally to fit within budget.", + ) + metric_ids = fields.Many2many( + comodel_name="spp.simulation.metric", + string="Custom Metrics", + ) + program_id = fields.Many2one( + comodel_name="spp.program", + string="Reference Program", + ondelete="set null", + help="Optional program for comparison context.", + ) + converted_program_id = fields.Many2one( + comodel_name="spp.program", + string="Converted Program", + ondelete="set null", + readonly=True, + help="Program created from this scenario via conversion.", + ) + ideal_population_expression = fields.Text( + string="Ideal Population Expression (CEL)", + help="CEL expression defining who SHOULD receive benefits. " + "Used to calculate leakage (included but shouldn't be) and " + "undercoverage (should be included but weren't). " + "Example: metric('pmt_score') < 2.0", + ) + state = fields.Selection( + selection=[ + ("draft", "Draft"), + ("ready", "Ready"), + ("archived", "Archived"), + ], + string="State", + default="draft", + required=True, + tracking=True, + ) + targeting_preview_count = fields.Integer( + string="Preview Count", + compute="_compute_targeting_preview_count", + help="Live count of registrants matching the targeting expression.", + ) + targeting_preview_error = fields.Text( + string="Preview Error", + compute="_compute_targeting_preview_count", + ) + run_ids = fields.One2many( + comodel_name="spp.simulation.run", + inverse_name="scenario_id", + string="Simulation Runs", + ) + run_count = fields.Integer( + string="Run Count", + compute="_compute_run_count", + ) + latest_run_id = fields.Many2one( + comodel_name="spp.simulation.run", + string="Latest Run", + compute="_compute_latest_run", + ) + latest_beneficiary_count = fields.Integer( + string="Latest Beneficiary Count", + compute="_compute_latest_run", + ) + latest_equity_score = fields.Float( + string="Latest Equity Score", + compute="_compute_latest_run", + ) + cel_profile = fields.Char( + string="CEL Profile", + compute="_compute_cel_profile", + store=False, + help="CEL profile based on target type (registry_groups or registry_individuals)", + ) + + @api.depends("target_type") + def _compute_cel_profile(self): + """Compute the CEL profile based on target type.""" + for record in self: + record.cel_profile = "registry_groups" if record.target_type == "group" else "registry_individuals" + + @api.depends("targeting_expression", "target_type") + def _compute_targeting_preview_count(self): + cel_service = self.env["spp.cel.service"] + for record in self: + record.targeting_preview_count = 0 + record.targeting_preview_error = False + if not record.targeting_expression: + continue + profile = "registry_groups" if record.target_type == "group" else "registry_individuals" + try: + result = cel_service.compile_expression( + record.targeting_expression, + profile=profile, + base_domain=[("disabled", "=", False)], + limit=0, + ) + record.targeting_preview_count = result.get("count", 0) + except Exception as exc: + record.targeting_preview_error = str(exc) + + @api.depends("run_ids") + def _compute_run_count(self): + for record in self: + record.run_count = len(record.run_ids) + + @api.depends("run_ids", "run_ids.state", "run_ids.executed_at") + def _compute_latest_run(self): + for record in self: + completed_runs = record.run_ids.filtered(lambda r: r.state == "completed").sorted( + "executed_at", reverse=True + ) + if completed_runs: + record.latest_run_id = completed_runs[0] + record.latest_beneficiary_count = completed_runs[0].beneficiary_count + record.latest_equity_score = completed_runs[0].equity_score + else: + record.latest_run_id = False + record.latest_beneficiary_count = 0 + record.latest_equity_score = 0.0 + + def action_set_ready(self): + """Transition scenario from draft to ready.""" + for record in self: + if not record.targeting_expression: + raise ValidationError(_("A targeting expression is required to mark a scenario as ready.")) + record.state = "ready" + + def action_set_draft(self): + """Transition scenario back to draft.""" + self.write({"state": "draft"}) + + def action_archive(self): + """Archive the scenario.""" + self.write({"state": "archived"}) + + def action_run_simulation(self): + """Execute the simulation and create a run record.""" + self.ensure_one() + if self.state != "ready": + raise UserError(_("Only scenarios in 'Ready' state can be run.")) + service = self.env["spp.simulation.service"] + run = service.execute_simulation(self) + return { + "type": "ir.actions.act_window", + "res_model": "spp.simulation.run", + "res_id": run.id, + "view_mode": "form", + "target": "current", + } + + def action_duplicate_for_comparison(self): + """Duplicate this scenario for side-by-side comparison.""" + self.ensure_one() + new_scenario = self.copy( + default={ + "name": f"{self.name} (Comparison)", + "state": "draft", + } + ) + return { + "type": "ir.actions.act_window", + "res_model": "spp.simulation.scenario", + "res_id": new_scenario.id, + "view_mode": "form", + "target": "current", + } + + def action_view_runs(self): + """Open the list of simulation runs for this scenario.""" + self.ensure_one() + return { + "type": "ir.actions.act_window", + "name": "Simulation Runs", + "res_model": "spp.simulation.run", + "view_mode": "list,form", + "domain": [("scenario_id", "=", self.id)], + "context": {"default_scenario_id": self.id}, + } + + @api.model + def action_create_from_template(self, template_id): + """Create a new scenario pre-populated from a template.""" + template = self.env["spp.simulation.scenario.template"].browse(template_id) + if not template.exists(): + raise UserError(_("Template not found.")) + vals = { + "name": template.name, + "description": template.description, + "template_id": template.id, + "target_type": template.target_type, + "targeting_expression": template.targeting_expression, + "ideal_population_expression": template.ideal_population_expression or False, + "state": "draft", + } + scenario = self.create(vals) + # Create default entitlement rule from template + if template.default_amount: + self.env["spp.simulation.entitlement.rule"].create( + { + "scenario_id": scenario.id, + "amount_mode": template.default_amount_mode or "fixed", + "amount": template.default_amount, + } + ) + return { + "type": "ir.actions.act_window", + "res_model": "spp.simulation.scenario", + "res_id": scenario.id, + "view_mode": "form", + "target": "current", + } diff --git a/spp_simulation/models/simulation_scenario_template.py b/spp_simulation/models/simulation_scenario_template.py new file mode 100644 index 00000000..c6473c10 --- /dev/null +++ b/spp_simulation/models/simulation_scenario_template.py @@ -0,0 +1,79 @@ +import logging + +from odoo import api, fields, models + +_logger = logging.getLogger(__name__) + + +class SimulationScenarioTemplate(models.Model): + """Pre-built targeting scenario templates for non-technical users.""" + + _name = "spp.simulation.scenario.template" + _description = "Simulation Scenario Template" + _order = "category, sequence, name" + + name = fields.Char(string="Name", required=True, translate=True) + description = fields.Text(string="Description", translate=True) + category = fields.Selection( + selection=[ + ("age", "Age-Based"), + ("geographic", "Geographic"), + ("vulnerability", "Vulnerability"), + ("economic", "Economic"), + ("categorical", "Categorical"), + ], + string="Category", + required=True, + index=True, + ) + sequence = fields.Integer(string="Sequence", default=10) + target_type = fields.Selection( + selection=[ + ("group", "Group"), + ("individual", "Individual"), + ], + string="Target Type", + required=True, + default="group", + ) + targeting_expression = fields.Text( + string="Targeting Expression (CEL)", + required=True, + help="CEL expression that defines who is eligible.", + ) + default_amount = fields.Float( + string="Default Amount", + help="Default entitlement amount for this template.", + ) + default_amount_mode = fields.Selection( + selection=[ + ("fixed", "Fixed Amount"), + ("multiplier", "Multiplier"), + ], + string="Default Amount Mode", + default="fixed", + ) + ideal_population_expression = fields.Text( + string="Ideal Population Expression (CEL)", + help="CEL expression for the ideal target population (for accuracy measurement).", + ) + icon = fields.Char( + string="Icon", + default="fa-users", + help="Font Awesome icon class for visual identification.", + ) + active = fields.Boolean(string="Active", default=True) + cel_profile = fields.Char( + string="CEL Profile", + compute="_compute_cel_profile", + store=False, + help="CEL profile based on target type (registry_groups or registry_individuals)", + ) + + @api.depends("target_type") + def _compute_cel_profile(self): + """Compute the CEL profile based on target type.""" + for record in self: + record.cel_profile = ( + "registry_groups" if record.target_type == "group" else "registry_individuals" + ) diff --git a/spp_simulation/pyproject.toml b/spp_simulation/pyproject.toml new file mode 100644 index 00000000..4231d0cc --- /dev/null +++ b/spp_simulation/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["whool"] +build-backend = "whool.buildapi" diff --git a/spp_simulation/readme/DESCRIPTION.md b/spp_simulation/readme/DESCRIPTION.md new file mode 100644 index 00000000..6b8cb2a6 --- /dev/null +++ b/spp_simulation/readme/DESCRIPTION.md @@ -0,0 +1,51 @@ +# Targeting Simulation & Fairness Analysis + +Simulate targeting scenarios, analyze fairness and distribution, and compare +different targeting strategies before committing to criteria. + +## Key Features + +- **Scenario Builder**: Define targeting criteria using CEL expressions with + live preview counts +- **Template Library**: Pre-built templates for common targeting patterns + (elderly pension, female-headed households, etc.) +- **Distribution Analysis**: Gini coefficient, Lorenz curve, percentile + breakdown +- **Fairness Analysis**: Disparity ratios across gender, disability, location + with traffic-light status indicators +- **Targeting Efficiency**: Confusion matrix, leakage rate, undercoverage + against ideal populations +- **Budget Simulation**: Fixed cap and proportional reduction strategies +- **Scenario Comparison**: Side-by-side comparison of multiple targeting + approaches with overlap analysis +- **Custom Metrics**: Define CEL-based aggregate, coverage, and ratio metrics + +## Privacy + +Only aggregated counts, percentages, and metrics are stored. No individual +beneficiary records are persisted in simulation results. + +## Models + +| Model | Description | +|-------|-------------| +| `spp.simulation.scenario.template` | Pre-built targeting scenario templates | +| `spp.simulation.scenario` | Targeting scenario definitions | +| `spp.simulation.entitlement.rule` | Amount calculation rules | +| `spp.simulation.run` | Aggregated simulation results (non-deletable) | +| `spp.simulation.comparison` | Side-by-side run comparisons | +| `spp.simulation.metric` | Custom evaluation metrics | + +## Security Groups + +| Group | Access | +|-------|--------| +| Simulation Viewer | Read-only access to all simulation data | +| Simulation Officer | Create/edit scenarios, run simulations | +| Simulation Manager | Full access including comparisons and archiving | + +## Menu Path + +Social Protection > Simulation > Scenarios / Results / Comparisons + +Configuration: Social Protection > Simulation > Configuration > Templates / Custom Metrics diff --git a/spp_simulation/report/simulation_report.xml b/spp_simulation/report/simulation_report.xml new file mode 100644 index 00000000..88237817 --- /dev/null +++ b/spp_simulation/report/simulation_report.xml @@ -0,0 +1,92 @@ + + + + diff --git a/spp_simulation/report/simulation_report_views.xml b/spp_simulation/report/simulation_report_views.xml new file mode 100644 index 00000000..3d7d2e1c --- /dev/null +++ b/spp_simulation/report/simulation_report_views.xml @@ -0,0 +1,12 @@ + + + + Simulation Report + spp.simulation.run + qweb-pdf + spp_simulation.report_simulation_run + spp_simulation.report_simulation_run + + report + + diff --git a/spp_simulation/security/ir.model.access.csv b/spp_simulation/security/ir.model.access.csv new file mode 100644 index 00000000..78bb7776 --- /dev/null +++ b/spp_simulation/security/ir.model.access.csv @@ -0,0 +1,21 @@ +id,name,model_id:id,group_id:id,perm_read,perm_write,perm_create,perm_unlink +access_spp_simulation_scenario_template_viewer,spp.simulation.scenario.template viewer,model_spp_simulation_scenario_template,group_simulation_viewer,1,0,0,0 +access_spp_simulation_scenario_template_officer,spp.simulation.scenario.template officer,model_spp_simulation_scenario_template,group_simulation_officer,1,1,1,0 +access_spp_simulation_scenario_template_manager,spp.simulation.scenario.template manager,model_spp_simulation_scenario_template,group_simulation_manager,1,1,1,1 +access_spp_simulation_scenario_viewer,spp.simulation.scenario viewer,model_spp_simulation_scenario,group_simulation_viewer,1,0,0,0 +access_spp_simulation_scenario_officer,spp.simulation.scenario officer,model_spp_simulation_scenario,group_simulation_officer,1,1,1,0 +access_spp_simulation_scenario_manager,spp.simulation.scenario manager,model_spp_simulation_scenario,group_simulation_manager,1,1,1,1 +access_spp_simulation_entitlement_rule_viewer,spp.simulation.entitlement.rule viewer,model_spp_simulation_entitlement_rule,group_simulation_viewer,1,0,0,0 +access_spp_simulation_entitlement_rule_officer,spp.simulation.entitlement.rule officer,model_spp_simulation_entitlement_rule,group_simulation_officer,1,1,1,0 +access_spp_simulation_entitlement_rule_manager,spp.simulation.entitlement.rule manager,model_spp_simulation_entitlement_rule,group_simulation_manager,1,1,1,1 +access_spp_simulation_run_viewer,spp.simulation.run viewer,model_spp_simulation_run,group_simulation_viewer,1,0,0,0 +access_spp_simulation_run_officer,spp.simulation.run officer,model_spp_simulation_run,group_simulation_officer,1,1,1,0 +access_spp_simulation_run_manager,spp.simulation.run manager,model_spp_simulation_run,group_simulation_manager,1,1,1,0 +access_spp_simulation_comparison_viewer,spp.simulation.comparison viewer,model_spp_simulation_comparison,group_simulation_viewer,1,0,0,0 +access_spp_simulation_comparison_officer,spp.simulation.comparison officer,model_spp_simulation_comparison,group_simulation_officer,1,1,1,0 +access_spp_simulation_comparison_manager,spp.simulation.comparison manager,model_spp_simulation_comparison,group_simulation_manager,1,1,1,1 +access_spp_simulation_metric_viewer,spp.simulation.metric viewer,model_spp_simulation_metric,group_simulation_viewer,1,0,0,0 +access_spp_simulation_metric_officer,spp.simulation.metric officer,model_spp_simulation_metric,group_simulation_officer,1,1,1,0 +access_spp_simulation_metric_manager,spp.simulation.metric manager,model_spp_simulation_metric,group_simulation_manager,1,1,1,1 +access_spp_simulation_compare_wizard_officer,spp.simulation.compare.wizard officer,model_spp_simulation_compare_wizard,group_simulation_officer,1,1,1,0 +access_spp_simulation_compare_wizard_manager,spp.simulation.compare.wizard manager,model_spp_simulation_compare_wizard,group_simulation_manager,1,1,1,1 diff --git a/spp_simulation/security/simulation_security.xml b/spp_simulation/security/simulation_security.xml new file mode 100644 index 00000000..6ec9dc0d --- /dev/null +++ b/spp_simulation/security/simulation_security.xml @@ -0,0 +1,63 @@ + + + + + + Targeting Simulation + Targeting simulation and fairness analysis + + 35 + + + + + Simulation + + Access to targeting simulation and analysis + + + + + Simulation: Read + Technical group for read access to simulation models. + + + + Simulation: Write + Technical group for write access to simulation models. + + + + + Simulation: Validate + Technical group for validation operations on simulation models. + + + + + + Viewer + + Can view simulation scenarios, runs, and comparisons. Cannot modify data. + + + + + Officer + + Can create/edit scenarios, run simulations, and view results. + + + + + Manager + + Full simulation management including comparisons and archiving. + + + + + + + + diff --git a/spp_simulation/services/__init__.py b/spp_simulation/services/__init__.py new file mode 100644 index 00000000..6e8b8219 --- /dev/null +++ b/spp_simulation/services/__init__.py @@ -0,0 +1,4 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. + +from . import simulation_service +from . import targeting_efficiency_service diff --git a/spp_simulation/services/simulation_service.py b/spp_simulation/services/simulation_service.py new file mode 100644 index 00000000..7e5721e1 --- /dev/null +++ b/spp_simulation/services/simulation_service.py @@ -0,0 +1,607 @@ +import logging +import time + +from odoo import _, api, fields, models +from odoo.exceptions import UserError + +_logger = logging.getLogger(__name__) + + +class SimulationService(models.AbstractModel): + """Orchestration service for running targeting simulations. + + This service orchestrates the simulation workflow but delegates heavy + computation to spp.aggregation.service for statistics, distribution, + and fairness analysis. + """ + + _name = "spp.simulation.service" + _description = "Simulation Service" + + @api.model + def execute_simulation(self, scenario): + """Execute a full simulation for a scenario. + + Pipeline: + 1. Targeting (CEL -> compile_for_batch -> IDs) + 2. Entitlements (fixed/multiplier via SQL, CEL via batch eval) + 3. Budget adjustment + 4. Distribution stats + 5. Fairness analysis + 6. Targeting efficiency + 7. Custom metrics + 8. Geographic breakdown + 9. Summary generation + + Returns: + spp.simulation.run record + """ + start_time = time.time() + + # Create run record + snapshot = self._build_scenario_snapshot(scenario) + run = self.env["spp.simulation.run"].create( + { + "scenario_id": scenario.id, + "state": "running", + "scenario_snapshot_json": snapshot, + } + ) + + try: + # Step 1: Targeting + beneficiary_ids = self._execute_targeting(scenario) + total_registry_count = self._get_total_registry_count(scenario) + + beneficiary_count = len(beneficiary_ids) + coverage_rate = (beneficiary_count / total_registry_count * 100) if total_registry_count else 0.0 + + # Step 2: Entitlements + amounts = self._compute_entitlements(scenario, beneficiary_ids) + + # Step 3: Budget adjustment + amounts = self._apply_budget_strategy(scenario, amounts) + + # Step 4: Distribution stats + # Use spp.metrics.distribution for computation + distribution_service = self.env["spp.metrics.distribution"] + distribution_data = distribution_service.compute_distribution(amounts) + gini = distribution_data.get("gini_coefficient", 0.0) + + # Step 5: Fairness analysis + # Use spp.metrics.fairness for computation + fairness_service = self.env["spp.metrics.fairness"] + # Get base domain for population + profile = "registry_groups" if scenario.target_type == "group" else "registry_individuals" + registry = self.env["spp.cel.registry"] + cfg = registry.load_profile(profile) + base_domain = cfg.get("base_domain", []) + fairness_data = fairness_service.compute_fairness(beneficiary_ids, base_domain) + equity_score = fairness_data.get("equity_score", 100.0) + has_disparity = fairness_data.get("has_disparity", False) + + # Step 6: Targeting efficiency + targeting_data = {} + leakage_rate = 0.0 + undercoverage_rate = 0.0 + if scenario.ideal_population_expression: + efficiency_service = self.env["spp.simulation.targeting.efficiency.service"] + targeting_data = efficiency_service.compute_targeting_efficiency(scenario, set(beneficiary_ids)) + leakage_rate = targeting_data.get("leakage_rate", 0.0) + undercoverage_rate = targeting_data.get("undercoverage_rate", 0.0) + + # Step 7: Custom metrics + metric_results = self._compute_custom_metrics(scenario, beneficiary_ids) + + # Step 8: Geographic breakdown + geographic_data = self._compute_geographic_breakdown(scenario, beneficiary_ids, amounts) + + # Calculate budget utilization + total_cost = sum(amounts) if amounts else 0.0 + budget_utilization = 0.0 + if scenario.budget_amount and scenario.budget_amount > 0: + budget_utilization = total_cost / scenario.budget_amount * 100 + + duration = time.time() - start_time + + run.write( + { + "state": "completed", + "beneficiary_count": beneficiary_count, + "total_registry_count": total_registry_count, + "coverage_rate": coverage_rate, + "total_cost": total_cost, + "budget_utilization": budget_utilization, + "gini_coefficient": gini, + "equity_score": equity_score, + "has_disparity": has_disparity, + "leakage_rate": leakage_rate, + "undercoverage_rate": undercoverage_rate, + "distribution_json": distribution_data, + "fairness_json": fairness_data, + "targeting_efficiency_json": targeting_data, + "geographic_json": geographic_data, + "metric_results_json": metric_results, + "execution_duration_seconds": duration, + "executed_at": fields.Datetime.now(), + } + ) + _logger.info( + "Simulation completed for scenario '%s': %d beneficiaries, cost %.2f, " "equity %.0f, duration %.2fs", + scenario.name, + beneficiary_count, + total_cost, + equity_score, + duration, + ) + except Exception as exc: + duration = time.time() - start_time + _logger.error( + "Simulation failed for scenario '%s': %s", + scenario.name, + exc, + exc_info=True, + ) + run.write( + { + "state": "failed", + "error_message": str(exc), + "execution_duration_seconds": duration, + "executed_at": fields.Datetime.now(), + } + ) + return run + + def _build_scenario_snapshot(self, scenario): + """Build a JSON snapshot of the scenario configuration.""" + rules = [] + for rule in scenario.entitlement_rule_ids: + rules.append( + { + "amount_mode": rule.amount_mode, + "amount": rule.amount, + "multiplier_field": rule.multiplier_field, + "max_multiplier": rule.max_multiplier, + "amount_cel_expression": rule.amount_cel_expression, + "condition_cel_expression": rule.condition_cel_expression, + } + ) + return { + "name": scenario.name, + "target_type": scenario.target_type, + "targeting_expression": scenario.targeting_expression, + "budget_amount": scenario.budget_amount, + "budget_strategy": scenario.budget_strategy, + "entitlement_rules": rules, + "ideal_population_expression": scenario.ideal_population_expression, + } + + def _get_cel_profile(self, scenario): + """Get the CEL profile name based on scenario target type.""" + return "registry_groups" if scenario.target_type == "group" else "registry_individuals" + + def _execute_targeting(self, scenario): + """Execute the targeting expression and return all matching IDs. + + NOTE: In Phase 6, this should use spp.aggregation.scope for unified + targeting. For now, it continues using CEL directly for backward + compatibility. + """ + # Load the CEL profile configuration + profile = self._get_cel_profile(scenario) + registry = self.env["spp.cel.registry"] + cfg = registry.load_profile(profile) + + # Execute with profile context + executor = self.env["spp.cel.executor"].with_context(cel_cfg=cfg) + all_ids = [] + for batch_ids in executor.compile_for_batch("res.partner", scenario.targeting_expression, batch_size=5000): + all_ids.extend(batch_ids) + return all_ids + + def _get_total_registry_count(self, scenario): + """Get the total count of registrants in the registry. + + Uses the same base domain as the CEL profile to ensure consistency. + """ + profile = self._get_cel_profile(scenario) + registry = self.env["spp.cel.registry"] + cfg = registry.load_profile(profile) + base_domain = cfg.get("base_domain", []) + return self.env["res.partner"].search_count(base_domain) + + def _compute_entitlements(self, scenario, beneficiary_ids): + """Compute entitlement amounts for all beneficiaries. + + For fixed/multiplier modes: use SQL aggregation for speed. + For CEL mode: use per-record evaluation in batches of 5000. + """ + if not beneficiary_ids or not scenario.entitlement_rule_ids: + return [0.0] * len(beneficiary_ids) + + # Initialize per-beneficiary amounts + amounts_by_id = {beneficiary_id: 0.0 for beneficiary_id in beneficiary_ids} + + for rule in scenario.entitlement_rule_ids: + # Determine which beneficiaries this rule applies to + applicable_ids = beneficiary_ids + if rule.condition_cel_expression: + applicable_ids = self._filter_by_condition( + rule.condition_cel_expression, + beneficiary_ids, + scenario.target_type, + ) + + if rule.amount_mode == "fixed": + for beneficiary_id in applicable_ids: + amounts_by_id[beneficiary_id] += rule.amount + + elif rule.amount_mode == "multiplier": + self._apply_multiplier_rule(rule, applicable_ids, amounts_by_id) + + elif rule.amount_mode == "cel": + self._apply_cel_rule(rule, applicable_ids, amounts_by_id) + + return [amounts_by_id[beneficiary_id] for beneficiary_id in beneficiary_ids] + + def _filter_by_condition(self, condition_expression, beneficiary_ids, target_type): + """Filter beneficiary IDs using a CEL condition expression.""" + cel_service = self.env["spp.cel.service"] + profile = "registry_groups" if target_type == "group" else "registry_individuals" + try: + result = cel_service.compile_expression( + condition_expression, + profile=profile, + base_domain=[("id", "in", beneficiary_ids)], + limit=-1, + ) + return result.get("ids", []) + except Exception: + _logger.warning( + "Condition expression failed, including all beneficiaries", + exc_info=True, + ) + return beneficiary_ids + + def _apply_multiplier_rule(self, rule, applicable_ids, amounts_by_id): + """Apply a multiplier rule to compute amounts based on a field value.""" + if not applicable_ids: + return + field_name = rule.multiplier_field + # Validate the field exists + partner_model = self.env["res.partner"] + if field_name not in partner_model._fields: + _logger.warning("Multiplier field '%s' does not exist on res.partner", field_name) + return + + batch_size = 5000 + for i in range(0, len(applicable_ids), batch_size): + batch = applicable_ids[i : i + batch_size] + records = partner_model.browse(batch) + for record in records: + value = getattr(record, field_name, 0) or 0 + if isinstance(value, int | float): + if rule.max_multiplier and rule.max_multiplier > 0: + value = min(value, rule.max_multiplier) + amounts_by_id[record.id] += rule.amount * value + + def _apply_cel_rule(self, rule, applicable_ids, amounts_by_id): + """Apply a CEL amount rule by evaluating per-record in batches.""" + if not applicable_ids: + return + cel_service = self.env["spp.cel.service"] + partner_model = self.env["res.partner"] + + batch_size = 5000 + for i in range(0, len(applicable_ids), batch_size): + batch = applicable_ids[i : i + batch_size] + records = partner_model.browse(batch) + for record in records: + try: + context = {"me": record, "base_amount": rule.amount or 0.0} + result = cel_service.evaluate_expression(rule.amount_cel_expression, context) + if isinstance(result, int | float) and result >= 0: + amounts_by_id[record.id] += float(result) + except Exception: + _logger.debug( + "CEL amount evaluation failed for record %d", + record.id, + exc_info=True, + ) + + def _apply_budget_strategy(self, scenario, amounts): + """Apply budget constraints to the computed amounts.""" + if scenario.budget_strategy == "none" or not scenario.budget_amount: + return amounts + + total = sum(amounts) + if total <= 0: + return amounts + + budget = scenario.budget_amount + + if scenario.budget_strategy == "cap_total": + if total <= budget: + return amounts + # Include beneficiaries at full amount until budget runs out + capped = [] + remaining_budget = budget + for amount in amounts: + if remaining_budget >= amount: + capped.append(amount) + remaining_budget -= amount + else: + capped.append(0.0) + return capped + + elif scenario.budget_strategy == "proportional_reduction": + if total <= budget: + return amounts + # Reduce all amounts proportionally to fit within budget + ratio = budget / total + return [a * ratio for a in amounts] + + return amounts + + def _compute_custom_metrics(self, scenario, beneficiary_ids): + """Evaluate custom metrics for the simulation.""" + results = {} + cel_service = self.env["spp.cel.service"] + profile = "registry_groups" if scenario.target_type == "group" else "registry_individuals" + + for metric in scenario.metric_ids: + try: + if metric.metric_type == "aggregate": + result = cel_service.compile_expression( + metric.cel_expression, + profile=profile, + base_domain=[("id", "in", beneficiary_ids)], + limit=0, + ) + count = result.get("count", 0) + results[metric.name] = { + "type": "aggregate", + "aggregation": metric.aggregation, + "value": count, + } + + elif metric.metric_type == "coverage": + result = cel_service.compile_expression( + metric.cel_expression, + profile=profile, + base_domain=[("id", "in", beneficiary_ids)], + limit=0, + ) + matching = result.get("count", 0) + total = len(beneficiary_ids) if beneficiary_ids else 1 + results[metric.name] = { + "type": "coverage", + "matching": matching, + "total": total, + "rate": matching / total * 100 if total else 0, + } + + elif metric.metric_type == "ratio": + numerator_result = cel_service.compile_expression( + metric.numerator_expression, + profile=profile, + base_domain=[("id", "in", beneficiary_ids)], + limit=0, + ) + denominator_result = cel_service.compile_expression( + metric.denominator_expression, + profile=profile, + base_domain=[("id", "in", beneficiary_ids)], + limit=0, + ) + numerator = numerator_result.get("count", 0) + denominator = denominator_result.get("count", 0) + results[metric.name] = { + "type": "ratio", + "numerator": numerator, + "denominator": denominator, + "ratio": numerator / denominator if denominator else 0, + } + except Exception: + _logger.warning( + "Custom metric '%s' evaluation failed", + metric.name, + exc_info=True, + ) + results[metric.name] = {"type": metric.metric_type, "error": True} + + return results + + def _compute_geographic_breakdown(self, scenario, beneficiary_ids, amounts): + """Compute per-area aggregated counts and amounts.""" + if not beneficiary_ids: + return {} + + partner_model = self.env["res.partner"] + amount_map = dict(zip(beneficiary_ids, amounts, strict=False)) + + # Build area_id -> list of partner IDs mapping using read() + beneficiary_records = partner_model.browse(beneficiary_ids).read(["area_id"]) + area_to_ids = {} + for record in beneficiary_records: + area_val = record.get("area_id") + if area_val: + area_key = str(area_val[0]) + area_name = area_val[1] + else: + area_key = "none" + area_name = "No Area" + if area_key not in area_to_ids: + area_to_ids[area_key] = {"name": area_name, "ids": []} + area_to_ids[area_key]["ids"].append(record["id"]) + + geographic_data = {} + total_count = len(beneficiary_ids) + for area_key, area_info in area_to_ids.items(): + ids = area_info["ids"] + count = len(ids) + area_amount = sum(amount_map.get(pid, 0) for pid in ids) + geographic_data[area_key] = { + "name": area_info["name"], + "count": count, + "amount": area_amount, + "coverage_rate": count / total_count * 100 if total_count else 0, + } + + return geographic_data + + @api.model + def convert_to_program(self, scenario, options): + """Convert a simulation scenario into a real program. + + Uses the program creation wizard programmatically to ensure all + managers (eligibility, cycle, entitlement, program) are created + consistently with wizard-based creation. + + Args: + scenario: spp.simulation.scenario record + options: dict with optional overrides (name, currency_code, + is_one_time_distribution, import_beneficiaries, + rrule_type, cycle_duration, day, month_by, weekday, + byday, mon-sun) + + Returns: + dict with keys: program (record), warnings (list[str]) + + Raises: + UserError: if preconditions are not met + """ + scenario.ensure_one() + warnings = [] + + # Validate preconditions + self._validate_conversion_preconditions(scenario, options) + + # Resolve currency + currency = self._resolve_currency(options) + + # Build wizard values + wizard_vals = self._build_wizard_values(scenario, options, currency) + + # Create wizard transient record + wizard = self.env["spp.program.create.wizard"].create(wizard_vals) + + # Create cash entitlement items on the wizard + self._create_wizard_entitlement_items(wizard, scenario, warnings) + + # Execute wizard to create program + action = wizard.create_program() + program = self.env["spp.program"].browse(action["res_id"]) + + # Link scenario to converted program + scenario.converted_program_id = program.id + + # Post chatter message + scenario.message_post( + body=_("Converted to program: %s") % program.name, + ) + + _logger.info( + "Scenario '%s' (id=%d) converted to program '%s' (id=%d)", + scenario.name, + scenario.id, + program.name, + program.id, + ) + + return {"program": program, "warnings": warnings} + + def _validate_conversion_preconditions(self, scenario, options): + """Validate that the scenario can be converted.""" + if scenario.state != "ready": + raise UserError(_("Only scenarios in 'ready' state can be converted to programs.")) + + if scenario.converted_program_id: + raise UserError(_("Scenario already converted to program '%s'.") % scenario.converted_program_id.name) + + if not scenario.entitlement_rule_ids: + raise UserError(_("Scenario must have at least one entitlement rule to convert.")) + + def _resolve_currency(self, options): + """Resolve currency from options or use company default.""" + currency_code = options.get("currency_code") + if currency_code: + currency = self.env["res.currency"].search([("name", "=", currency_code)], limit=1) + if not currency: + raise UserError(_("Currency code '%s' not found.") % currency_code) + return currency + return self.env.company.currency_id + + def _build_wizard_values(self, scenario, options, currency): + """Build values dict for the program creation wizard.""" + name = options.get("name") or scenario.name + is_one_time = options.get("is_one_time_distribution", False) + import_beneficiaries = "yes" if options.get("import_beneficiaries") else "no" + + vals = { + "name": name, + "currency_id": currency.id, + "target_type": scenario.target_type, + "eligibility_cel_expression": scenario.targeting_expression, + "entitlement_type": "cash", + "is_one_time_distribution": is_one_time, + "import_beneficiaries": import_beneficiaries, + "auto_approve_entitlements": False, + "rrule_type": options.get("rrule_type", "monthly"), + "cycle_duration": options.get("cycle_duration", 1), + "day": options.get("day", 1), + "month_by": options.get("month_by", "date"), + } + + # Recurrence fields + if options.get("weekday"): + vals["weekday"] = options["weekday"] + if options.get("byday"): + vals["byday"] = options["byday"] + + # Weekly day flags + for day_field in ("mon", "tue", "wed", "thu", "fri", "sat", "sun"): + if options.get(day_field): + vals[day_field] = options[day_field] + + return vals + + def _create_wizard_entitlement_items(self, wizard, scenario, warnings): + """Create cash entitlement items on the wizard from scenario rules.""" + CashItem = self.env["spp.program.create.wizard.entitlement.cash.item"] + + for rule in scenario.entitlement_rule_ids: + item_vals = { + "program_id": wizard.id, + "sequence": rule.sequence, + "amount": rule.amount, + } + + if rule.amount_mode == "cel": + item_vals["amount_mode"] = "cel" + item_vals["amount_cel_expression"] = rule.amount_cel_expression or "" + + elif rule.amount_mode == "multiplier": + # Multiplier mode cannot be directly mapped because + # simulation rules use a Char field name while wizard items + # use a Many2one to ir.model.fields. Fall back to fixed. + warnings.append( + _( + "Entitlement rule '%s' uses multiplier mode with field '%s'. " + "Multiplier rules cannot be automatically converted. " + "Using fixed amount (%.2f) instead." + ) + % ( + rule.name or f"Rule #{rule.sequence}", + rule.multiplier_field or "unknown", + rule.amount, + ) + ) + + # Map condition CEL expression + if rule.condition_cel_expression: + item_vals["has_condition"] = True + item_vals["condition_mode"] = "cel" + item_vals["cel_condition"] = rule.condition_cel_expression + + CashItem.create(item_vals) diff --git a/spp_simulation/services/targeting_efficiency_service.py b/spp_simulation/services/targeting_efficiency_service.py new file mode 100644 index 00000000..5b91e28e --- /dev/null +++ b/spp_simulation/services/targeting_efficiency_service.py @@ -0,0 +1,75 @@ +import logging + +from odoo import api, models + +_logger = logging.getLogger(__name__) + + +class TargetingEfficiencyService(models.AbstractModel): + """Service for computing targeting efficiency metrics.""" + + _name = "spp.simulation.targeting.efficiency.service" + _description = "Targeting Efficiency Service" + + @api.model + def compute_targeting_efficiency(self, scenario, simulated_ids): + """Compute targeting efficiency against the ideal population. + + Uses set operations on IDs (in memory only): + - True positives: simulated ∩ ideal + - False positives (leakage): simulated - ideal + - False negatives (undercoverage): ideal - simulated + - True negatives: registry - simulated - ideal + + Returns dict with confusion matrix counts and rates. + """ + if not scenario.ideal_population_expression: + return {} + + # Get ideal population IDs + ideal_ids = self._get_ideal_population_ids(scenario) + if not ideal_ids: + return {"error": "Could not determine ideal population."} + + simulated_set = set(simulated_ids) + ideal_set = set(ideal_ids) + + true_positives = len(simulated_set & ideal_set) + false_positives = len(simulated_set - ideal_set) # leakage + false_negatives = len(ideal_set - simulated_set) # undercoverage + + total_simulated = len(simulated_set) + total_ideal = len(ideal_set) + + leakage_rate = false_positives / total_simulated * 100 if total_simulated else 0 + undercoverage_rate = false_negatives / total_ideal * 100 if total_ideal else 0 + + return { + "true_positives": true_positives, + "false_positives": false_positives, + "false_negatives": false_negatives, + "total_simulated": total_simulated, + "total_ideal": total_ideal, + "leakage_rate": leakage_rate, + "undercoverage_rate": undercoverage_rate, + } + + @api.model + def _get_ideal_population_ids(self, scenario): + """Get the ideal population IDs from the ideal population expression.""" + executor = self.env["spp.cel.executor"] + all_ids = [] + try: + for batch_ids in executor.compile_for_batch( + "res.partner", + scenario.ideal_population_expression, + batch_size=5000, + ): + all_ids.extend(batch_ids) + except Exception: + _logger.warning( + "Failed to compute ideal population for scenario '%s'", + scenario.name, + exc_info=True, + ) + return all_ids diff --git a/spp_simulation/static/description/index.html b/spp_simulation/static/description/index.html new file mode 100644 index 00000000..6e32c0fd --- /dev/null +++ b/spp_simulation/static/description/index.html @@ -0,0 +1,512 @@ + + + + + +README.rst + + + +
+ + + +Odoo Community Association + +
+

OpenSPP Targeting Simulation

+ +

Alpha License: LGPL-3 OpenSPP/openspp-modules

+
+

Targeting Simulation & Fairness Analysis

+

Simulate targeting scenarios, analyze fairness and distribution, and +compare different targeting strategies before committing to criteria.

+
+

Key Features

+
    +
  • Scenario Builder: Define targeting criteria using CEL expressions +with live preview counts
  • +
  • Template Library: Pre-built templates for common targeting +patterns (elderly pension, female-headed households, etc.)
  • +
  • Distribution Analysis: Gini coefficient, Lorenz curve, percentile +breakdown
  • +
  • Fairness Analysis: Disparity ratios across gender, disability, +location with traffic-light status indicators
  • +
  • Targeting Efficiency: Confusion matrix, leakage rate, +undercoverage against ideal populations
  • +
  • Budget Simulation: Fixed cap and proportional reduction +strategies
  • +
  • Scenario Comparison: Side-by-side comparison of multiple +targeting approaches with overlap analysis
  • +
  • Custom Metrics: Define CEL-based aggregate, coverage, and ratio +metrics
  • +
+
+
+

Privacy

+

Only aggregated counts, percentages, and metrics are stored. No +individual beneficiary records are persisted in simulation results.

+
+
+

Models

+ ++++ + + + + + + + + + + + + + + + + + + + + + + + + + +
ModelDescription
sp +p.simulation.scenario.templatePre-built targeting scenario +templates
spp.simulation.scenarioTargeting scenario definitions
s +pp.simulation.entitlement.ruleAmount calculation rules
spp.simulation.runAggregated simulation results +(non-deletable)
spp.simulation.comparisonSide-by-side run comparisons
spp.simulation.metricCustom evaluation metrics
+
+
+

Security Groups

+ ++++ + + + + + + + + + + + + + + + + +
GroupAccess
Simulation ViewerRead-only access to all simulation data
Simulation OfficerCreate/edit scenarios, run simulations
Simulation ManagerFull access including comparisons and archiving
+
+ +
+
+

Bug Tracker

+

Bugs are tracked on GitHub Issues. +In case of trouble, please check there if your issue has already been reported. +If you spotted it first, help us to smash it by providing a detailed and welcomed +feedback.

+

Do not contact contributors directly about support or help with technical issues.

+
+
+

Credits

+
+

Authors

+
    +
  • OpenSPP.org
  • +
+
+
+

Maintainers

+

Current maintainer:

+

jeremi

+

This module is part of the OpenSPP/openspp-modules project on GitHub.

+

You are welcome to contribute.

+
+
+
+
+ + diff --git a/spp_simulation/static/src/comparison_table/comparison_table.js b/spp_simulation/static/src/comparison_table/comparison_table.js new file mode 100644 index 00000000..5b2ed2bb --- /dev/null +++ b/spp_simulation/static/src/comparison_table/comparison_table.js @@ -0,0 +1,99 @@ +/** @odoo-module **/ + +import {Component, useState, onWillStart, onWillUpdateProps} from "@odoo/owl"; +import {registry} from "@web/core/registry"; +import {standardFieldProps} from "@web/views/fields/standard_field_props"; + +export class SimulationComparisonTable extends Component { + static template = "spp_simulation.SimulationComparisonTable"; + static props = { + ...standardFieldProps, + }; + + setup() { + this.state = useState({ + runs: [], + metrics: [ + {key: "beneficiary_count", label: "Beneficiaries", format: "number"}, + {key: "total_cost", label: "Total Cost", format: "currency"}, + {key: "coverage_rate", label: "Coverage Rate", format: "percent"}, + {key: "equity_score", label: "Equity Score", format: "score"}, + {key: "gini_coefficient", label: "Benefit Equality (Gini)", format: "decimal"}, + {key: "leakage_rate", label: "Leakage", format: "percent"}, + {key: "undercoverage_rate", label: "Missed Population", format: "percent"}, + {key: "targeting_accuracy", label: "Targeting Accuracy", format: "percent"}, + {key: "budget_utilization", label: "Budget Utilization", format: "percent"}, + ], + }); + + onWillStart(() => { + this.updateFromProps(); + }); + + onWillUpdateProps((nextProps) => { + this.updateFromProps(nextProps); + }); + } + + updateFromProps(props = this.props) { + const value = props.record?.data?.[props.name]; + if (value && value.runs) { + this.state.runs = value.runs; + } else { + this.state.runs = []; + } + } + + formatValue(value, format) { + if (value === null || value === undefined) return "-"; + switch (format) { + case "number": + return value.toLocaleString(); + case "currency": + return value.toLocaleString(undefined, { + minimumFractionDigits: 2, + maximumFractionDigits: 2, + }); + case "percent": + return value.toFixed(1) + "%"; + case "score": + return Math.round(value) + "/100"; + case "decimal": + return value.toFixed(2); + default: + return String(value); + } + } + + formatDate(isoString) { + if (!isoString) return ""; + try { + const date = new Date(isoString); + return date.toLocaleDateString(undefined, { + month: "short", + day: "numeric", + year: "numeric", + hour: "2-digit", + minute: "2-digit", + }); + } catch { + return isoString; + } + } + + isBestValue(metricKey, runIndex) { + const values = this.state.runs.map((r) => r[metricKey] || 0); + const value = values[runIndex]; + // For these metrics, lower is better + const lowerIsBetter = ["gini_coefficient", "leakage_rate", "undercoverage_rate", "total_cost"]; + if (lowerIsBetter.includes(metricKey)) { + return value === Math.min(...values); + } + return value === Math.max(...values); + } +} + +registry.category("fields").add("simulation_comparison_table", { + component: SimulationComparisonTable, + supportedTypes: ["json"], +}); diff --git a/spp_simulation/static/src/comparison_table/comparison_table.xml b/spp_simulation/static/src/comparison_table/comparison_table.xml new file mode 100644 index 00000000..1ecbf675 --- /dev/null +++ b/spp_simulation/static/src/comparison_table/comparison_table.xml @@ -0,0 +1,36 @@ + + + +
+
+ No comparison data available. Click "Recompute" to generate. +
+
+ + + + + + + + + + + + + + + +
Metric +
+ +
+ + + +
+
+
+
+
diff --git a/spp_simulation/static/src/fairness_table/fairness_table.js b/spp_simulation/static/src/fairness_table/fairness_table.js new file mode 100644 index 00000000..8c4ebcbe --- /dev/null +++ b/spp_simulation/static/src/fairness_table/fairness_table.js @@ -0,0 +1,106 @@ +/** @odoo-module **/ + +import {Component, useState, onWillStart, onWillUpdateProps} from "@odoo/owl"; +import {useService} from "@web/core/utils/hooks"; +import {registry} from "@web/core/registry"; +import {standardFieldProps} from "@web/views/fields/standard_field_props"; + +export class SimulationFairnessTable extends Component { + static template = "spp_simulation.SimulationFairnessTable"; + static props = { + ...standardFieldProps, + }; + + setup() { + this.orm = useService("orm"); + this.state = useState({ + loading: true, + attributes: [], + equityScore: 0, + hasDisparity: false, + }); + + onWillStart(async () => { + await this.loadData(); + }); + + onWillUpdateProps(async (nextProps) => { + if (nextProps.record?.resId !== this.props.record?.resId) { + await this.loadData(nextProps.record?.resId); + } + }); + } + + get resId() { + return this.props.record?.resId; + } + + async loadData(recordId = null) { + const resId = recordId || this.resId; + if (!resId) { + this.state.loading = false; + return; + } + this.state.loading = true; + try { + const [data] = await this.orm.read( + "spp.simulation.run", + [resId], + ["fairness_json", "equity_score", "has_disparity"] + ); + if (data && data.fairness_json) { + const fairness = data.fairness_json; + this.state.equityScore = data.equity_score || 0; + this.state.hasDisparity = data.has_disparity || false; + this.state.attributes = Object.entries(fairness.attributes || {}).map(([key, attr]) => ({ + name: key, + groups: attr.groups || [], + worstRatio: attr.worst_ratio || 0, + hasDisparity: attr.has_disparity || false, + })); + } + } catch (error) { + console.error("Failed to load fairness data:", error); + } finally { + this.state.loading = false; + } + } + + getStatusIcon(status) { + if (status === "proportional" || status === "fair") return "\u2713"; + if (status === "low_coverage" || status === "warning") return "\u26A0"; + return "\u2717"; + } + + getStatusClass(status) { + if (status === "proportional" || status === "fair") return "text-success"; + if (status === "low_coverage" || status === "warning") return "text-warning"; + return "text-danger"; + } + + getStatusLabel(status) { + if (status === "proportional" || status === "fair") return "Proportional"; + if (status === "low_coverage" || status === "warning") return "Low coverage"; + if (status === "under_represented" || status === "disparity") return "Under-represented"; + return status; + } + + formatRatio(value) { + return (value || 0).toFixed(2); + } + + formatPercent(value) { + return (value || 0).toFixed(1) + "%"; + } + + getScoreBadgeClass() { + if (this.state.equityScore >= 80) return "bg-success"; + if (this.state.equityScore >= 60) return "bg-warning"; + return "bg-danger"; + } +} + +registry.category("fields").add("simulation_fairness_table", { + component: SimulationFairnessTable, + supportedTypes: ["json"], +}); diff --git a/spp_simulation/static/src/fairness_table/fairness_table.xml b/spp_simulation/static/src/fairness_table/fairness_table.xml new file mode 100644 index 00000000..35bf7f24 --- /dev/null +++ b/spp_simulation/static/src/fairness_table/fairness_table.xml @@ -0,0 +1,59 @@ + + + +
+
+