From 2b4e4ee76299a36d93829107bd516d68a09cfe27 Mon Sep 17 00:00:00 2001 From: Edwin Gonzales Date: Mon, 16 Mar 2026 13:32:59 +0800 Subject: [PATCH 1/2] fix(spp_analytics): correct DESCRIPTION.md and add coverage tests and QA guide MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix model names (spp.aggregation.* → spp.analytics.*), menu paths, dependency name, and security group documentation in DESCRIPTION.md. Add USAGE.md with UI testing guide for QA verification. Add 45 new tests covering error handling, cache paths, and edge cases to bring coverage above 95%. --- spp_analytics/README.rst | 469 +++++++++++++-- spp_analytics/readme/DESCRIPTION.md | 50 +- spp_analytics/readme/USAGE.md | 253 +++++++++ spp_analytics/tests/__init__.py | 1 + spp_analytics/tests/test_coverage_gaps.py | 661 ++++++++++++++++++++++ 5 files changed, 1372 insertions(+), 62 deletions(-) create mode 100644 spp_analytics/readme/USAGE.md create mode 100644 spp_analytics/tests/test_coverage_gaps.py diff --git a/spp_analytics/README.rst b/spp_analytics/README.rst index 4f09de58..c49ec394 100644 --- a/spp_analytics/README.rst +++ b/spp_analytics/README.rst @@ -22,7 +22,7 @@ OpenSPP Analytics |badge1| |badge2| |badge3| -Unified aggregation service that all consumers (simulation API, GIS API, +Unified analytics service that all consumers (simulation API, GIS API, dashboards) use to compute population statistics with demographic breakdowns and privacy enforcement. Resolves a scope (CEL expression, area, polygon, explicit IDs) to registrant IDs, computes requested @@ -31,46 +31,52 @@ statistics, applies k-anonymity suppression, and caches results. Key Capabilities ~~~~~~~~~~~~~~~~ -- Single entry point (``spp.aggregation.service.compute_aggregation``) - for all analytics queries +- Single entry point (``spp.analytics.service.compute_aggregation``) for + all analytics queries - Scope resolution: CEL expressions, admin areas, area tags, spatial polygons/buffers, explicit IDs +- Indicator registry with built-in statistics (count, gini) and + extensible via CEL variables - Multi-dimensional breakdown (up to 3 dimensions) using demographic dimensions -- Result caching with configurable TTL and manual invalidation +- Result caching with per-scope-type TTL and manual invalidation - Per-user access rules controlling scope types, dimensions, and k-anonymity thresholds Key Models ~~~~~~~~~~ -+----------------------------------------+----------------------------------+ -| Model | Description | -+========================================+==================================+ -| ``spp.aggregation.scope`` | Defines what to aggregate (CEL, | -| | area, polygon, explicit IDs) | -+----------------------------------------+----------------------------------+ -| ``spp.aggregation.access.rule`` | Per-user/group access level, | -| | scope restrictions, k-threshold | -+----------------------------------------+----------------------------------+ -| ``spp.aggregation.cache.entry`` | Cached aggregation results | -+----------------------------------------+----------------------------------+ -| ``spp.aggregation.service`` | Abstract service: main | -| | aggregation entry point | -+----------------------------------------+----------------------------------+ -| ``spp.aggregation.scope.resolver`` | Abstract service: resolves | -| | scopes to registrant IDs | -+----------------------------------------+----------------------------------+ -| ``spp.aggregation.statistic.registry`` | Abstract service: dispatches | -| | statistic computation | -+----------------------------------------+----------------------------------+ ++--------------------------------------+----------------------------------+ +| Model | Description | ++======================================+==================================+ +| ``spp.analytics.scope`` | Defines what to aggregate (CEL, | +| | area, polygon, explicit IDs) | ++--------------------------------------+----------------------------------+ +| ``spp.analytics.access.rule`` | Per-user/group access level, | +| | scope restrictions, k-threshold | ++--------------------------------------+----------------------------------+ +| ``spp.analytics.cache.entry`` | Cached aggregation results with | +| | TTL expiration | ++--------------------------------------+----------------------------------+ +| ``spp.analytics.service`` | Abstract service: main | +| | aggregation entry point | ++--------------------------------------+----------------------------------+ +| ``spp.analytics.scope.resolver`` | Abstract service: resolves | +| | scopes to registrant IDs | ++--------------------------------------+----------------------------------+ +| ``spp.analytics.indicator.registry`` | Abstract service: maps statistic | +| | names to computation logic | ++--------------------------------------+----------------------------------+ +| ``spp.analytics.cache`` | Abstract service: cache | +| | operations with TTL management | ++--------------------------------------+----------------------------------+ Configuration ~~~~~~~~~~~~~ After installing: -1. Navigate to **Settings > Aggregation > Configuration > Scopes** to +1. Navigate to **Settings > Analytics > Configuration > Scopes** to define reusable scopes 2. Configure **Access Rules** to set per-user/group privacy levels and scope restrictions @@ -80,41 +86,426 @@ After installing: UI Location ~~~~~~~~~~~ -- **Menu**: Settings > Aggregation > Configuration > Scopes -- **Menu**: Settings > Aggregation > Configuration > Demographic +- **Menu**: Settings > Analytics > Configuration > Scopes +- **Menu**: Settings > Analytics > Configuration > Demographic Dimensions -- **Menu**: Settings > Aggregation > Configuration > Access Rules +- **Menu**: Settings > Analytics > Configuration > Access Rules Security ~~~~~~~~ -============================= ============================= -Group Access -============================= ============================= -``group_aggregation_read`` Read scopes and cache entries -``group_aggregation_write`` Full CRUD on scopes and cache -``group_aggregation_manager`` Full CRUD on access rules -============================= ============================= ++-------------------------------+--------------------------------------+ +| Group | Access | ++===============================+======================================+ +| ``group_aggregation_read`` | Read scopes and cache entries (Tier | +| | 3 technical) | ++-------------------------------+--------------------------------------+ +| ``group_aggregation_write`` | Write scopes and cache entries (Tier | +| | 3 technical) | ++-------------------------------+--------------------------------------+ +| ``group_aggregation_viewer`` | View aggregate statistics only (Tier | +| | 2) | ++-------------------------------+--------------------------------------+ +| ``group_aggregation_officer`` | Query with individual record access | +| | (Tier 2) | ++-------------------------------+--------------------------------------+ +| ``group_aggregation_manager`` | Full management including access | +| | rules (Tier 2) | ++-------------------------------+--------------------------------------+ Extension Points ~~~~~~~~~~~~~~~~ -- Add new scope types by extending ``spp.aggregation.scope`` and - ``spp.aggregation.scope.resolver`` -- Register custom statistics via ``spp.aggregation.statistic.registry`` +- Add new scope types by extending ``spp.analytics.scope`` and + ``spp.analytics.scope.resolver`` +- Register custom statistics via ``spp.analytics.indicator.registry`` - Override ``_compute_single_statistic()`` for custom computation logic Dependencies ~~~~~~~~~~~~ ``base``, ``spp_cel_domain``, ``spp_area``, ``spp_registry``, -``spp_security``, ``spp_metrics_services`` +``spp_security``, ``spp_metric_service`` **Table of contents** .. contents:: :local: +Usage +===== + +This guide covers manual testing of the OpenSPP Analytics module for QA +verification. All tests assume you are logged in as an administrator. + +Prerequisites +~~~~~~~~~~~~~ + +- The module **OpenSPP Analytics** is installed +- Test registrants exist in the system (individuals and groups) +- At least one administrative area exists under **Registry > + Configuration > Areas** +- At least one area tag exists (e.g., "Urban", "Rural") + +Accessing the Module +~~~~~~~~~~~~~~~~~~~~ + +1. Navigate to **Settings > Analytics > Configuration** +2. Verify the following three menu items are visible: + + - **Scopes** + - **Demographic Dimensions** + - **Access Rules** + +.. + + **Note**: The Analytics menu requires the **Analytics Manager** role. + If the menu is not visible, check that your user has the Manager + privilege under the **Analytics Engine** category in **Settings > + Users & Companies > Users**. + +Test 1: Create and Validate Scopes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +**1.1 Create an Administrative Area Scope** + +1. Go to **Settings > Analytics > Configuration > Scopes** +2. Click **New** +3. Fill in: + + - **Name**: "District Test Scope" + - **Scope Type**: "Administrative Area" + +4. Verify that the **Administrative Area** tab appears in the notebook +5. Select an area in the **Area** field +6. Leave **Include Child Areas** checked +7. Click **Save** +8. Verify the **Registrants** stat button in the top-right shows a count + > 0 +9. Click the **Registrants** stat button and verify it opens a list of + registrants filtered to those in the selected area + +**1.2 Create a CEL Expression Scope** + +1. Click **New** from the Scopes list +2. Fill in: + + - **Name**: "Adult Individuals" + - **Scope Type**: "CEL Expression" + +3. Verify the **CEL Expression** tab appears +4. Set **CEL Profile** to "Individuals" +5. Enter a CEL expression, e.g.: ``r.is_group == false`` +6. Click **Save** +7. Verify the **Registrants** count updates + +**1.3 Create an Explicit IDs Scope** + +1. Click **New** +2. Fill in: + + - **Name**: "Manual Selection" + - **Scope Type**: "Explicit IDs" + +3. Verify the **Explicit Registrants** tab appears +4. Add 3-5 registrants using the **Add a line** button +5. Click **Save** +6. Verify the **Registrants** count matches the number of registrants + added + +**1.4 Create an Area Tags Scope** + +1. Click **New** +2. Fill in: + + - **Name**: "Urban Areas" + - **Scope Type**: "Area Tags" + +3. Verify the **Area Tags** tab appears +4. Add one or more area tags (e.g., "Urban") +5. Click **Save** +6. Verify the scope is saved without errors + +**1.5 Validation Error Tests** + +Test that required fields are enforced for each scope type: + ++---------------------+-----------------------+-----------------------+ +| Scope Type | Leave blank | Expected result | ++=====================+=======================+=======================+ +| CEL Expression | CEL Expression field | Validation error: | +| | | "CEL expression is | +| | | required..." | ++---------------------+-----------------------+-----------------------+ +| Administrative Area | Area field | Validation error: | +| | | "Area is required..." | ++---------------------+-----------------------+-----------------------+ +| Explicit IDs | Explicit Registrants | Validation error: "At | +| | list | least one registrant | +| | | is required..." | ++---------------------+-----------------------+-----------------------+ +| Area Tags | Area Tags field | Validation error: "At | +| | | least one area tag is | +| | | required..." | ++---------------------+-----------------------+-----------------------+ +| Within Polygon | Geometry (GeoJSON) | Validation error: | +| | field | "GeoJSON geometry is | +| | | required..." | ++---------------------+-----------------------+-----------------------+ +| Within Distance | Buffer Radius | Validation error: | +| | | "Buffer radius must | +| | | be a positive | +| | | number." | ++---------------------+-----------------------+-----------------------+ + +**1.6 Spatial Polygon Validation** + +1. Create a scope with type "Within Polygon" +2. Enter invalid JSON (e.g., ``not json``) and save + + - Expected: Validation error about invalid GeoJSON + +3. Enter valid JSON but wrong type (e.g., + ``{"type": "Point", "coordinates": [0, 0]}``) + + - Expected: Validation error about requiring Polygon, MultiPolygon, + Feature, or FeatureCollection + +**1.7 Spatial Buffer Validation** + +1. Create a scope with type "Within Distance" +2. Enter latitude ``100`` (out of range), longitude ``0``, radius ``10`` + + - Expected: Validation error "Latitude must be between -90 and 90." + +3. Enter latitude ``0``, longitude ``200`` (out of range), radius ``10`` + + - Expected: Validation error "Longitude must be between -180 and + 180." + +Test 2: Scope List View and Search +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +1. Go to **Settings > Analytics > Configuration > Scopes** +2. Verify the list shows columns: **Name**, **Scope Type**, **Registrant + Count**, **Active** +3. Test search filters: + + - Click the **CEL** filter and verify only CEL scopes appear + - Click the **Area** filter and verify only area scopes appear + - Click the **Spatial** filter and verify only spatial scopes appear + +4. Test the **Scope Type** group-by and verify scopes are grouped + correctly +5. Test the **Archived** filter: + + - Archive a scope (open it, uncheck **Active**, save) + - Verify it disappears from the default list + - Enable the **Archived** filter and verify it appears with the + "Archived" ribbon + +Test 3: Cache Management +~~~~~~~~~~~~~~~~~~~~~~~~ + +**3.1 Refresh Cache Button** + +1. Open any scope record +2. Verify the **Cache Settings** tab exists in the notebook +3. Note the **Last Cache Refresh** field is empty (or has a previous + date) +4. Click the **Refresh Cache** button (refresh icon in the stat button + area) +5. Verify the **Last Cache Refresh** field is now populated with the + current timestamp + +**3.2 Cache Settings Tab** + +1. Open a scope record and go to the **Cache Settings** tab +2. Verify the following fields are visible: + + - **Enable Caching** (checkbox, default checked) + - **Cache TTL (seconds)** (visible only when caching is enabled) + - **Last Cache Refresh** (read-only) + +3. Uncheck **Enable Caching** and verify the **Cache TTL** field is + hidden +4. Re-check **Enable Caching** and verify **Cache TTL** reappears + +**3.3 Scheduled Action** + +1. Go to **Settings > Technical > Scheduled Actions** +2. Search for "Analytics: Cache Cleanup" +3. Verify the scheduled action exists and is **Active** +4. Verify the interval is set to **1 Hour** + +Test 4: Access Rules +~~~~~~~~~~~~~~~~~~~~ + +**4.1 Create a User-Specific Access Rule** + +1. Go to **Settings > Analytics > Configuration > Access Rules** +2. Click **New** +3. Fill in: + + - **Name**: "Test User Rule" + - **User**: Select a specific user + - **Access Level**: "Aggregates Only" (radio button) + - **Minimum K-Anonymity**: 5 + +4. Click **Save** +5. Verify the record saves without errors + +**4.2 Create a Group-Based Access Rule** + +1. Click **New** +2. Fill in: + + - **Name**: "Test Group Rule" + - **Security Group**: Select a group (e.g., "Internal User") + - **Access Level**: "Individual Records" + +3. Click **Save** + +**4.3 Validation: User and Group Mutual Exclusivity** + +1. Create a new access rule +2. Set both **User** and **Security Group** fields +3. Click **Save** + + - Expected: Validation error "A rule cannot apply to both a specific + user and a group." + +4. Clear both **User** and **Security Group** fields +5. Click **Save** + + - Expected: Validation error "A rule must apply to either a user or a + group." + +**4.4 K-Anonymity Validation** + +1. Create a new access rule with a user set +2. Set **Minimum K-Anonymity** to ``0`` and save + + - Expected: Validation error "Minimum k-anonymity must be at least + 1." + +3. Set **Minimum K-Anonymity** to ``101`` and save + + - Expected: Validation error "Minimum k-anonymity should not exceed + 100." + +**4.5 Max Dimensions Validation** + +1. Set **Max Group By Dimensions** to ``-1`` and save + + - Expected: Validation error "Maximum group_by dimensions cannot be + negative." + +2. Set **Max Group By Dimensions** to ``11`` and save + + - Expected: Validation error "Maximum group_by dimensions should not + exceed 10." + +**4.6 Scope Restrictions** + +1. Create an access rule with **Allowed Scope Types** set to "Predefined + Scopes Only" +2. Verify the **Allowed Scopes** tab appears in the notebook +3. Add one or more scopes to the allowed list +4. Change **Allowed Scope Types** to "All Scope Types" +5. Verify the **Allowed Scopes** tab is hidden + +**4.7 Dimension Restrictions** + +1. Open an access rule +2. Go to the **Allowed Dimensions** tab +3. Add one or more demographic dimensions +4. Verify the dimensions are displayed with **Name** and **Label** + columns + +Test 5: Access Rules List View and Search +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +1. Go to **Settings > Analytics > Configuration > Access Rules** +2. Verify the list columns: drag handle (sequence), **Name**, **User**, + **Security Group**, **Access Level**, **Minimum K-Anonymity**, + **Active** +3. Verify rules can be reordered by dragging the handle +4. Test search filters: + + - **Aggregate Only**: Shows only rules with access level "Aggregates + Only" + - **Individual Access**: Shows only rules with access level + "Individual Records" + - **Archived**: Shows archived rules + +5. Test the **Access Level** group-by + +Test 6: Security Groups +~~~~~~~~~~~~~~~~~~~~~~~ + +Verify that users with different security roles see the appropriate +menus: + ++-------------------+--------------------------------------------------+ +| Role | Expected Access | ++===================+==================================================+ +| No Analytics role | Cannot see the **Analytics** menu under Settings | ++-------------------+--------------------------------------------------+ +| Viewer | Cannot see the Analytics menu (Viewer implies | +| | read-only data access, not config) | ++-------------------+--------------------------------------------------+ +| Analyst | Cannot see the Analytics menu (Analyst implies | +| | query access, not config) | ++-------------------+--------------------------------------------------+ +| Manager | Can see and use all three menu items under | +| | **Settings > Analytics > Configuration** | ++-------------------+--------------------------------------------------+ +| Administrator | Full access (admin implies Manager) | ++-------------------+--------------------------------------------------+ + +To test: + +1. Go to **Settings > Users & Companies > Users** +2. Open a test user +3. Under the **Analytics Engine** section, set the privilege level +4. Log in as that user and verify menu visibility matches the table + above + +Test 7: Demographic Dimensions +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +1. Go to **Settings > Analytics > Configuration > Demographic + Dimensions** +2. Verify the menu opens the demographic dimension list (provided by + ``spp_metric_service``) +3. This view should show available dimensions like "registrant_type", + "area", etc. +4. Verify dimensions can be viewed but that create/edit depends on your + permission level + +Common Issues +~~~~~~~~~~~~~ + ++----------------------------------+----------------------------------+ +| Symptom | Likely Cause | ++==================================+==================================+ +| Analytics menu not visible | User lacks the **Manager** role | +| | under Analytics Engine | ++----------------------------------+----------------------------------+ +| Registrant count shows 0 on area | No registrants assigned to the | +| scope | selected area | ++----------------------------------+----------------------------------+ +| CEL scope shows 0 registrants | CEL expression syntax error or | +| | no matching registrants | ++----------------------------------+----------------------------------+ +| Spatial scopes return empty | The ``spp_aggregation_spatial`` | +| results | bridge module is not installed | ++----------------------------------+----------------------------------+ +| "Refresh Cache" button has no | Cache was already empty; check | +| visible effect | **Last Cache Refresh** timestamp | ++----------------------------------+----------------------------------+ + Bug Tracker =========== diff --git a/spp_analytics/readme/DESCRIPTION.md b/spp_analytics/readme/DESCRIPTION.md index e70ceb2a..cb446d86 100644 --- a/spp_analytics/readme/DESCRIPTION.md +++ b/spp_analytics/readme/DESCRIPTION.md @@ -1,4 +1,4 @@ -Unified aggregation service that all consumers (simulation API, GIS API, dashboards) +Unified analytics service that all consumers (simulation API, GIS API, dashboards) use to compute population statistics with demographic breakdowns and privacy enforcement. Resolves a scope (CEL expression, area, polygon, explicit IDs) to registrant IDs, computes requested statistics, applies k-anonymity suppression, @@ -6,51 +6,55 @@ and caches results. ### Key Capabilities -- Single entry point (`spp.aggregation.service.compute_aggregation`) for all analytics queries +- Single entry point (`spp.analytics.service.compute_aggregation`) for all analytics queries - Scope resolution: CEL expressions, admin areas, area tags, spatial polygons/buffers, explicit IDs +- Indicator registry with built-in statistics (count, gini) and extensible via CEL variables - Multi-dimensional breakdown (up to 3 dimensions) using demographic dimensions -- Result caching with configurable TTL and manual invalidation +- Result caching with per-scope-type TTL and manual invalidation - Per-user access rules controlling scope types, dimensions, and k-anonymity thresholds ### Key Models -| Model | Description | -| ------------------------------ | ----------------------------------------------------------- | -| `spp.aggregation.scope` | Defines what to aggregate (CEL, area, polygon, explicit IDs)| -| `spp.aggregation.access.rule` | Per-user/group access level, scope restrictions, k-threshold| -| `spp.aggregation.cache.entry` | Cached aggregation results | -| `spp.aggregation.service` | Abstract service: main aggregation entry point | -| `spp.aggregation.scope.resolver` | Abstract service: resolves scopes to registrant IDs | -| `spp.aggregation.statistic.registry` | Abstract service: dispatches statistic computation | +| Model | Description | +| ------------------------------------ | ------------------------------------------------------------ | +| `spp.analytics.scope` | Defines what to aggregate (CEL, area, polygon, explicit IDs) | +| `spp.analytics.access.rule` | Per-user/group access level, scope restrictions, k-threshold | +| `spp.analytics.cache.entry` | Cached aggregation results with TTL expiration | +| `spp.analytics.service` | Abstract service: main aggregation entry point | +| `spp.analytics.scope.resolver` | Abstract service: resolves scopes to registrant IDs | +| `spp.analytics.indicator.registry` | Abstract service: maps statistic names to computation logic | +| `spp.analytics.cache` | Abstract service: cache operations with TTL management | ### Configuration After installing: -1. Navigate to **Settings > Aggregation > Configuration > Scopes** to define reusable scopes +1. Navigate to **Settings > Analytics > Configuration > Scopes** to define reusable scopes 2. Configure **Access Rules** to set per-user/group privacy levels and scope restrictions 3. Verify the **Cache Cleanup** scheduled action is active under **Settings > Technical > Scheduled Actions** ### UI Location -- **Menu**: Settings > Aggregation > Configuration > Scopes -- **Menu**: Settings > Aggregation > Configuration > Demographic Dimensions -- **Menu**: Settings > Aggregation > Configuration > Access Rules +- **Menu**: Settings > Analytics > Configuration > Scopes +- **Menu**: Settings > Analytics > Configuration > Demographic Dimensions +- **Menu**: Settings > Analytics > Configuration > Access Rules ### Security -| Group | Access | -| ----------------------------- | -------------------------------- | -| `group_aggregation_read` | Read scopes and cache entries | -| `group_aggregation_write` | Full CRUD on scopes and cache | -| `group_aggregation_manager` | Full CRUD on access rules | +| Group | Access | +| -------------------------------- | ------------------------------------------------- | +| `group_aggregation_read` | Read scopes and cache entries (Tier 3 technical) | +| `group_aggregation_write` | Write scopes and cache entries (Tier 3 technical) | +| `group_aggregation_viewer` | View aggregate statistics only (Tier 2) | +| `group_aggregation_officer` | Query with individual record access (Tier 2) | +| `group_aggregation_manager` | Full management including access rules (Tier 2) | ### Extension Points -- Add new scope types by extending `spp.aggregation.scope` and `spp.aggregation.scope.resolver` -- Register custom statistics via `spp.aggregation.statistic.registry` +- Add new scope types by extending `spp.analytics.scope` and `spp.analytics.scope.resolver` +- Register custom statistics via `spp.analytics.indicator.registry` - Override `_compute_single_statistic()` for custom computation logic ### Dependencies -`base`, `spp_cel_domain`, `spp_area`, `spp_registry`, `spp_security`, `spp_metrics_services` +`base`, `spp_cel_domain`, `spp_area`, `spp_registry`, `spp_security`, `spp_metric_service` diff --git a/spp_analytics/readme/USAGE.md b/spp_analytics/readme/USAGE.md new file mode 100644 index 00000000..3caf222a --- /dev/null +++ b/spp_analytics/readme/USAGE.md @@ -0,0 +1,253 @@ +This guide covers manual testing of the OpenSPP Analytics module for QA +verification. All tests assume you are logged in as an administrator. + +### Prerequisites + +- The module **OpenSPP Analytics** is installed +- Test registrants exist in the system (individuals and groups) +- At least one administrative area exists under **Registry > Configuration > Areas** +- At least one area tag exists (e.g., "Urban", "Rural") + +### Accessing the Module + +1. Navigate to **Settings > Analytics > Configuration** +2. Verify the following three menu items are visible: + - **Scopes** + - **Demographic Dimensions** + - **Access Rules** + +> **Note**: The Analytics menu requires the **Analytics Manager** role. If the +> menu is not visible, check that your user has the Manager privilege under the +> **Analytics Engine** category in **Settings > Users & Companies > Users**. + +### Test 1: Create and Validate Scopes + +**1.1 Create an Administrative Area Scope** + +1. Go to **Settings > Analytics > Configuration > Scopes** +2. Click **New** +3. Fill in: + - **Name**: "District Test Scope" + - **Scope Type**: "Administrative Area" +4. Verify that the **Administrative Area** tab appears in the notebook +5. Select an area in the **Area** field +6. Leave **Include Child Areas** checked +7. Click **Save** +8. Verify the **Registrants** stat button in the top-right shows a count > 0 +9. Click the **Registrants** stat button and verify it opens a list of + registrants filtered to those in the selected area + +**1.2 Create a CEL Expression Scope** + +1. Click **New** from the Scopes list +2. Fill in: + - **Name**: "Adult Individuals" + - **Scope Type**: "CEL Expression" +3. Verify the **CEL Expression** tab appears +4. Set **CEL Profile** to "Individuals" +5. Enter a CEL expression, e.g.: `r.is_group == false` +6. Click **Save** +7. Verify the **Registrants** count updates + +**1.3 Create an Explicit IDs Scope** + +1. Click **New** +2. Fill in: + - **Name**: "Manual Selection" + - **Scope Type**: "Explicit IDs" +3. Verify the **Explicit Registrants** tab appears +4. Add 3-5 registrants using the **Add a line** button +5. Click **Save** +6. Verify the **Registrants** count matches the number of registrants added + +**1.4 Create an Area Tags Scope** + +1. Click **New** +2. Fill in: + - **Name**: "Urban Areas" + - **Scope Type**: "Area Tags" +3. Verify the **Area Tags** tab appears +4. Add one or more area tags (e.g., "Urban") +5. Click **Save** +6. Verify the scope is saved without errors + +**1.5 Validation Error Tests** + +Test that required fields are enforced for each scope type: + +| Scope Type | Leave blank | Expected result | +|---|---|---| +| CEL Expression | CEL Expression field | Validation error: "CEL expression is required..." | +| Administrative Area | Area field | Validation error: "Area is required..." | +| Explicit IDs | Explicit Registrants list | Validation error: "At least one registrant is required..." | +| Area Tags | Area Tags field | Validation error: "At least one area tag is required..." | +| Within Polygon | Geometry (GeoJSON) field | Validation error: "GeoJSON geometry is required..." | +| Within Distance | Buffer Radius | Validation error: "Buffer radius must be a positive number." | + +**1.6 Spatial Polygon Validation** + +1. Create a scope with type "Within Polygon" +2. Enter invalid JSON (e.g., `not json`) and save + - Expected: Validation error about invalid GeoJSON +3. Enter valid JSON but wrong type (e.g., `{"type": "Point", "coordinates": [0, 0]}`) + - Expected: Validation error about requiring Polygon, MultiPolygon, Feature, or FeatureCollection + +**1.7 Spatial Buffer Validation** + +1. Create a scope with type "Within Distance" +2. Enter latitude `100` (out of range), longitude `0`, radius `10` + - Expected: Validation error "Latitude must be between -90 and 90." +3. Enter latitude `0`, longitude `200` (out of range), radius `10` + - Expected: Validation error "Longitude must be between -180 and 180." + +### Test 2: Scope List View and Search + +1. Go to **Settings > Analytics > Configuration > Scopes** +2. Verify the list shows columns: **Name**, **Scope Type**, **Registrant Count**, **Active** +3. Test search filters: + - Click the **CEL** filter and verify only CEL scopes appear + - Click the **Area** filter and verify only area scopes appear + - Click the **Spatial** filter and verify only spatial scopes appear +4. Test the **Scope Type** group-by and verify scopes are grouped correctly +5. Test the **Archived** filter: + - Archive a scope (open it, uncheck **Active**, save) + - Verify it disappears from the default list + - Enable the **Archived** filter and verify it appears with the "Archived" ribbon + +### Test 3: Cache Management + +**3.1 Refresh Cache Button** + +1. Open any scope record +2. Verify the **Cache Settings** tab exists in the notebook +3. Note the **Last Cache Refresh** field is empty (or has a previous date) +4. Click the **Refresh Cache** button (refresh icon in the stat button area) +5. Verify the **Last Cache Refresh** field is now populated with the current timestamp + +**3.2 Cache Settings Tab** + +1. Open a scope record and go to the **Cache Settings** tab +2. Verify the following fields are visible: + - **Enable Caching** (checkbox, default checked) + - **Cache TTL (seconds)** (visible only when caching is enabled) + - **Last Cache Refresh** (read-only) +3. Uncheck **Enable Caching** and verify the **Cache TTL** field is hidden +4. Re-check **Enable Caching** and verify **Cache TTL** reappears + +**3.3 Scheduled Action** + +1. Go to **Settings > Technical > Scheduled Actions** +2. Search for "Analytics: Cache Cleanup" +3. Verify the scheduled action exists and is **Active** +4. Verify the interval is set to **1 Hour** + +### Test 4: Access Rules + +**4.1 Create a User-Specific Access Rule** + +1. Go to **Settings > Analytics > Configuration > Access Rules** +2. Click **New** +3. Fill in: + - **Name**: "Test User Rule" + - **User**: Select a specific user + - **Access Level**: "Aggregates Only" (radio button) + - **Minimum K-Anonymity**: 5 +4. Click **Save** +5. Verify the record saves without errors + +**4.2 Create a Group-Based Access Rule** + +1. Click **New** +2. Fill in: + - **Name**: "Test Group Rule" + - **Security Group**: Select a group (e.g., "Internal User") + - **Access Level**: "Individual Records" +3. Click **Save** + +**4.3 Validation: User and Group Mutual Exclusivity** + +1. Create a new access rule +2. Set both **User** and **Security Group** fields +3. Click **Save** + - Expected: Validation error "A rule cannot apply to both a specific user and a group." +4. Clear both **User** and **Security Group** fields +5. Click **Save** + - Expected: Validation error "A rule must apply to either a user or a group." + +**4.4 K-Anonymity Validation** + +1. Create a new access rule with a user set +2. Set **Minimum K-Anonymity** to `0` and save + - Expected: Validation error "Minimum k-anonymity must be at least 1." +3. Set **Minimum K-Anonymity** to `101` and save + - Expected: Validation error "Minimum k-anonymity should not exceed 100." + +**4.5 Max Dimensions Validation** + +1. Set **Max Group By Dimensions** to `-1` and save + - Expected: Validation error "Maximum group_by dimensions cannot be negative." +2. Set **Max Group By Dimensions** to `11` and save + - Expected: Validation error "Maximum group_by dimensions should not exceed 10." + +**4.6 Scope Restrictions** + +1. Create an access rule with **Allowed Scope Types** set to "Predefined Scopes Only" +2. Verify the **Allowed Scopes** tab appears in the notebook +3. Add one or more scopes to the allowed list +4. Change **Allowed Scope Types** to "All Scope Types" +5. Verify the **Allowed Scopes** tab is hidden + +**4.7 Dimension Restrictions** + +1. Open an access rule +2. Go to the **Allowed Dimensions** tab +3. Add one or more demographic dimensions +4. Verify the dimensions are displayed with **Name** and **Label** columns + +### Test 5: Access Rules List View and Search + +1. Go to **Settings > Analytics > Configuration > Access Rules** +2. Verify the list columns: drag handle (sequence), **Name**, **User**, **Security Group**, + **Access Level**, **Minimum K-Anonymity**, **Active** +3. Verify rules can be reordered by dragging the handle +4. Test search filters: + - **Aggregate Only**: Shows only rules with access level "Aggregates Only" + - **Individual Access**: Shows only rules with access level "Individual Records" + - **Archived**: Shows archived rules +5. Test the **Access Level** group-by + +### Test 6: Security Groups + +Verify that users with different security roles see the appropriate menus: + +| Role | Expected Access | +|---|---| +| No Analytics role | Cannot see the **Analytics** menu under Settings | +| Viewer | Cannot see the Analytics menu (Viewer implies read-only data access, not config) | +| Analyst | Cannot see the Analytics menu (Analyst implies query access, not config) | +| Manager | Can see and use all three menu items under **Settings > Analytics > Configuration** | +| Administrator | Full access (admin implies Manager) | + +To test: + +1. Go to **Settings > Users & Companies > Users** +2. Open a test user +3. Under the **Analytics Engine** section, set the privilege level +4. Log in as that user and verify menu visibility matches the table above + +### Test 7: Demographic Dimensions + +1. Go to **Settings > Analytics > Configuration > Demographic Dimensions** +2. Verify the menu opens the demographic dimension list (provided by `spp_metric_service`) +3. This view should show available dimensions like "registrant_type", "area", etc. +4. Verify dimensions can be viewed but that create/edit depends on your permission level + +### Common Issues + +| Symptom | Likely Cause | +|---|---| +| Analytics menu not visible | User lacks the **Manager** role under Analytics Engine | +| Registrant count shows 0 on area scope | No registrants assigned to the selected area | +| CEL scope shows 0 registrants | CEL expression syntax error or no matching registrants | +| Spatial scopes return empty results | The `spp_aggregation_spatial` bridge module is not installed | +| "Refresh Cache" button has no visible effect | Cache was already empty; check **Last Cache Refresh** timestamp | diff --git a/spp_analytics/tests/__init__.py b/spp_analytics/tests/__init__.py index 6e41c0f0..6b8644da 100644 --- a/spp_analytics/tests/__init__.py +++ b/spp_analytics/tests/__init__.py @@ -13,3 +13,4 @@ from . import test_scope_resolver from . import test_indicator_registry from . import test_coverage +from . import test_coverage_gaps diff --git a/spp_analytics/tests/test_coverage_gaps.py b/spp_analytics/tests/test_coverage_gaps.py new file mode 100644 index 00000000..8b838d52 --- /dev/null +++ b/spp_analytics/tests/test_coverage_gaps.py @@ -0,0 +1,661 @@ +# Part of OpenSPP. See LICENSE file for full copyright and licensing details. +"""Tests to close remaining coverage gaps in spp_analytics. + +Targets untested branches and error-handling paths across all source files. +""" + +from datetime import timedelta +from unittest.mock import MagicMock, patch + +from odoo import fields +from odoo.exceptions import UserError, ValidationError +from odoo.tests import tagged + +from .common import AnalyticsTestCase + + +@tagged("post_install", "-at_install") +class TestScopeRegistrantCountErrorHandling(AnalyticsTestCase): + """Tests for _compute_registrant_count error paths in analytics_scope.py.""" + + def test_registrant_count_returns_zero_on_validation_error(self): + """_compute_registrant_count catches ValidationError and sets count to 0.""" + scope = self.create_scope( + "area", + area_id=self.area_district.id, + ) + with patch.object( + type(self.env["spp.analytics.scope.resolver"]), + "resolve", + side_effect=ValidationError("test error"), + ): + scope._compute_registrant_count() + self.assertEqual(scope.registrant_count, 0) + + def test_registrant_count_returns_zero_on_user_error(self): + """_compute_registrant_count catches UserError and sets count to 0.""" + scope = self.create_scope( + "area", + area_id=self.area_district.id, + ) + with patch.object( + type(self.env["spp.analytics.scope.resolver"]), + "resolve", + side_effect=UserError("test error"), + ): + scope._compute_registrant_count() + self.assertEqual(scope.registrant_count, 0) + + def test_registrant_count_for_area_scope_resolves(self): + """_compute_registrant_count resolves non-explicit scopes via resolver.""" + scope = self.create_scope( + "area", + area_id=self.area_district.id, + ) + # Area scope should resolve to registrants in that area + self.assertGreater(scope.registrant_count, 0) + + +@tagged("post_install", "-at_install") +class TestActionRefreshCacheWithEntries(AnalyticsTestCase): + """Tests for action_refresh_cache when cache entries exist.""" + + def test_action_refresh_cache_invalidates_existing_entries(self): + """action_refresh_cache invalidates cache entries for the scope's type.""" + scope = self.create_scope( + "area", + area_id=self.area_district.id, + ) + # Store a cache entry for this scope type + cache_service = self.env["spp.analytics.cache"] + cache_service.store_result(scope, ["count"], [], {"total_count": 10}) + + # Verify entry exists + entries_before = self.env["spp.analytics.cache.entry"].sudo().search([("scope_type", "=", "area")]) + self.assertTrue(entries_before) + + # Refresh cache + scope.action_refresh_cache() + + # Entries should be invalidated + entries_after = self.env["spp.analytics.cache.entry"].sudo().search([("scope_type", "=", "area")]) + self.assertFalse(entries_after) + + +@tagged("post_install", "-at_install") +class TestComputeStatisticsErrorHandling(AnalyticsTestCase): + """Tests for _compute_statistics exception handling in service_aggregation.py.""" + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.service = cls.env["spp.analytics.service"] + + def test_compute_statistics_catches_value_error(self): + """_compute_statistics catches ValueError and returns error dict.""" + with patch.object( + type(self.env["spp.analytics.indicator.registry"]), + "compute", + side_effect=ValueError("bad value"), + ): + result = self.service._compute_statistics(self.registrants[:5].ids, ["count"], k_threshold=5) + self.assertIn("count", result) + self.assertIsNone(result["count"]["value"]) + self.assertIn("error", result["count"]) + self.assertFalse(result["count"]["suppressed"]) + + def test_compute_statistics_catches_attribute_error(self): + """_compute_statistics catches AttributeError and returns error dict.""" + with patch.object( + type(self.env["spp.analytics.indicator.registry"]), + "compute", + side_effect=AttributeError("no such attr"), + ): + result = self.service._compute_statistics(self.registrants[:5].ids, ["some_stat"], k_threshold=5) + self.assertIn("some_stat", result) + self.assertIsNone(result["some_stat"]["value"]) + self.assertIn("error", result["some_stat"]) + + def test_compute_statistics_catches_type_error(self): + """_compute_statistics catches TypeError and returns error dict.""" + with patch.object( + type(self.env["spp.analytics.indicator.registry"]), + "compute", + side_effect=TypeError("wrong type"), + ): + result = self.service._compute_statistics(self.registrants[:3].ids, ["count"], k_threshold=5) + self.assertIn("count", result) + self.assertIsNone(result["count"]["value"]) + + def test_compute_statistics_catches_key_error(self): + """_compute_statistics catches KeyError and returns error dict.""" + with patch.object( + type(self.env["spp.analytics.indicator.registry"]), + "compute", + side_effect=KeyError("missing key"), + ): + result = self.service._compute_statistics(self.registrants[:3].ids, ["count"], k_threshold=5) + self.assertIn("count", result) + self.assertIsNone(result["count"]["value"]) + + +@tagged("post_install", "-at_install") +class TestCacheServiceEdgeCases(AnalyticsTestCase): + """Tests for cache service error handling and edge cases.""" + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.cache_service = cls.env["spp.analytics.cache"] + + def test_get_cached_result_handles_invalid_json(self): + """get_cached_result returns None and cleans up entry with invalid JSON.""" + scope = self.create_scope("area", area_id=self.area_district.id) + # Store valid entry first + self.cache_service.store_result(scope, ["count"], [], {"total": 5}) + + # Corrupt the JSON in the entry + entry = self.env["spp.analytics.cache.entry"].sudo().search([], limit=1) + entry.write({"result_json": "not valid json {"}) + + # Should return None and clean up the bad entry + result = self.cache_service.get_cached_result(scope, ["count"], []) + self.assertIsNone(result) + + # Entry should be deleted + remaining = self.env["spp.analytics.cache.entry"].sudo().search([("id", "=", entry.id)]) + self.assertFalse(remaining) + + def test_get_cached_result_removes_expired_entry(self): + """get_cached_result removes expired entries and returns None.""" + scope = self.create_scope("area", area_id=self.area_district.id) + self.cache_service.store_result(scope, ["count"], [], {"total": 5}) + + # Backdate the entry to make it expired + entry = self.env["spp.analytics.cache.entry"].sudo().search([], limit=1) + expired_time = fields.Datetime.now() - timedelta(seconds=7200) + entry.write({"computed_at": expired_time}) + + # Should return None + result = self.cache_service.get_cached_result(scope, ["count"], []) + self.assertIsNone(result) + + # Expired entry should be removed + remaining = self.env["spp.analytics.cache.entry"].sudo().search([("id", "=", entry.id)]) + self.assertFalse(remaining) + + def test_store_result_updates_existing_entry(self): + """store_result updates existing entry when cache key matches.""" + scope = self.create_scope("area", area_id=self.area_district.id) + + # Store initial result + self.cache_service.store_result(scope, ["count"], [], {"total": 5}) + entry = self.env["spp.analytics.cache.entry"].sudo().search([], limit=1) + original_json = entry.result_json + + # Store updated result with same key + self.cache_service.store_result(scope, ["count"], [], {"total": 10}) + + # Should still be one entry, but with updated content + entries = self.env["spp.analytics.cache.entry"].sudo().search([]) + area_entries = entries.filtered(lambda e: e.scope_type == "area") + self.assertEqual(len(area_entries), 1) + self.assertNotEqual(area_entries.result_json, original_json) + self.assertIn("10", area_entries.result_json) + + def test_cache_resolve_scope_with_int(self): + """Cache _resolve_scope handles integer scope ID.""" + scope = self.create_scope("area", area_id=self.area_district.id) + resolved = self.cache_service._resolve_scope(scope.id) + self.assertEqual(resolved.id, scope.id) + + +@tagged("post_install", "-at_install") +class TestScopeResolverErrorPaths(AnalyticsTestCase): + """Tests for scope resolver error handling paths.""" + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.resolver = cls.env["spp.analytics.scope.resolver"] + + def test_resolve_record_catches_exception_in_resolver_method(self): + """resolve() catches exceptions from resolver methods and returns empty.""" + scope = self.create_scope( + "explicit", + explicit_partner_ids=[(6, 0, self.registrants[:5].ids)], + ) + with patch.object( + type(self.resolver), + "_resolve_explicit", + side_effect=RuntimeError("resolver failed"), + ): + result = self.resolver.resolve(scope) + self.assertEqual(result, []) + + def test_resolve_inline_catches_exception_in_resolver(self): + """_resolve_inline catches exceptions from inline resolver methods.""" + scope_dict = { + "scope_type": "explicit", + "explicit_partner_ids": [1, 2, 3], + } + with patch.object( + type(self.resolver), + "_resolve_explicit_inline", + side_effect=RuntimeError("inline resolver failed"), + ): + result = self.resolver.resolve(scope_dict) + self.assertEqual(result, []) + + def test_resolve_explicit_inline_filters_non_registrants(self): + """_resolve_explicit_inline filters out non-registrant partner IDs.""" + # Create a non-registrant partner + non_registrant = self.env["res.partner"].create( + { + "name": "Not A Registrant", + "is_registrant": False, + } + ) + # Mix registrant and non-registrant IDs + mixed_ids = self.registrants[:3].ids + [non_registrant.id] + scope_dict = { + "scope_type": "explicit", + "explicit_partner_ids": mixed_ids, + } + result = self.resolver._resolve_explicit_inline(scope_dict) + # Non-registrant should be filtered out + self.assertNotIn(non_registrant.id, result) + self.assertEqual(len(result), 3) + + def test_resolve_explicit_inline_empty_list(self): + """_resolve_explicit_inline with empty list returns empty.""" + result = self.resolver._resolve_explicit_inline({"scope_type": "explicit", "explicit_partner_ids": []}) + self.assertEqual(result, []) + + def test_resolve_area_with_membership_indirect_resolution(self): + """_resolve_area_ids finds individuals via group membership.""" + # Create a group in the district + group = self.env["res.partner"].create( + { + "name": "Test Group for Membership", + "is_registrant": True, + "is_group": True, + "area_id": self.area_district.id, + } + ) + # Create an individual WITHOUT area_id + individual = self.env["res.partner"].create( + { + "name": "Individual No Area", + "is_registrant": True, + "is_group": False, + "area_id": False, + } + ) + # Create membership linking individual to group + self.env["spp.group.membership"].create( + { + "group": group.id, + "individual": individual.id, + } + ) + + # Resolve area should find both the group and the individual + result = self.resolver._resolve_area_ids([self.area_district.id], include_children=False) + self.assertIn(group.id, result) + self.assertIn(individual.id, result) + + def test_resolve_area_no_area_ids_returns_empty(self): + """_resolve_area_ids with empty list returns empty.""" + result = self.resolver._resolve_area_ids([], include_children=True) + self.assertEqual(result, []) + + def test_resolve_area_inline_missing_area_id_returns_empty(self): + """_resolve_area_inline with no area_id returns empty.""" + result = self.resolver._resolve_area_inline({"scope_type": "area"}) + self.assertEqual(result, []) + + def test_resolve_area_record_missing_area_returns_empty(self): + """_resolve_area with scope record where area_id is unset returns empty.""" + scope = self.create_scope("area", area_id=self.area_district.id) + # Directly call _resolve_area with a scope that has no area + with patch.object(type(scope), "area_id", new_callable=lambda: property(lambda s: self.env["spp.area"])): + result = self.resolver._resolve_area(scope) + self.assertEqual(result, []) + + def test_resolve_area_tag_inline_empty_tags_returns_empty(self): + """_resolve_area_tag_inline with no tag_ids returns empty.""" + result = self.resolver._resolve_area_tag_inline({"scope_type": "area_tag"}) + self.assertEqual(result, []) + + def test_resolve_area_tag_record_empty_tags_returns_empty(self): + """_resolve_area_tag with scope that has no tags returns empty.""" + scope = self.create_scope( + "area_tag", + area_tag_ids=[(6, 0, [self.tag_urban.id])], + ) + # Patch tag_ids to empty + with patch.object( + type(scope), + "area_tag_ids", + new_callable=lambda: property(lambda s: self.env["spp.area.tag"]), + ): + result = self.resolver._resolve_area_tag(scope) + self.assertEqual(result, []) + + def test_resolve_spatial_buffer_params_missing_returns_empty(self): + """_resolve_spatial_buffer_params with incomplete params returns empty.""" + # Missing radius + result = self.resolver._resolve_spatial_buffer_params(1.0, 2.0, None) + self.assertEqual(result, []) + + # Missing latitude + result = self.resolver._resolve_spatial_buffer_params(None, 2.0, 10.0) + self.assertEqual(result, []) + + # All zero (falsy) + result = self.resolver._resolve_spatial_buffer_params(0, 0, 0) + self.assertEqual(result, []) + + def test_resolve_spatial_buffer_without_bridge(self): + """_resolve_spatial_buffer_params without bridge module returns empty.""" + if self.env.get("spp.analytics.spatial.resolver"): + self.skipTest("Spatial bridge module is installed") + result = self.resolver._resolve_spatial_buffer_params(1.5, 2.5, 10.0) + self.assertEqual(result, []) + + +@tagged("post_install", "-at_install") +class TestIndicatorRegistryEdgeCases(AnalyticsTestCase): + """Tests for indicator registry edge cases in indicator_registry.py.""" + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.ind_registry = cls.env["spp.analytics.indicator.registry"] + + def test_compute_from_variable_empty_registrants(self): + """_compute_from_variable returns 0 for empty registrant list.""" + if self.env.get("spp.cel.variable") is None: + self.skipTest("spp_cel not installed") + variable = self.env["spp.cel.variable"].create( + { + "name": "test_empty_reg_var", + "cel_accessor": "test_empty_reg_var", + "source_type": "computed", + "cel_expression": "true", + "value_type": "number", + "state": "active", + } + ) + result = self.ind_registry._compute_from_variable(variable, []) + self.assertEqual(result, 0) + + def test_compute_from_variable_no_cel_service(self): + """_compute_from_variable returns None when cel service unavailable.""" + if self.env.get("spp.cel.variable") is None: + self.skipTest("spp_cel not installed") + variable = self.env["spp.cel.variable"].create( + { + "name": "test_no_cel_svc_var", + "cel_accessor": "test_no_cel_svc_var", + "source_type": "computed", + "cel_expression": "true", + "value_type": "number", + "state": "active", + } + ) + with patch.object(type(self.env), "get", return_value=None): + result = self.ind_registry._compute_from_variable(variable, self.registrants[:3].ids) + self.assertIsNone(result) + + def test_compute_from_variable_no_expression_returns_none(self): + """_compute_from_variable returns None when variable has no expression.""" + if self.env.get("spp.cel.variable") is None: + self.skipTest("spp_cel not installed") + variable = self.env["spp.cel.variable"].create( + { + "name": "test_no_expr_var", + "cel_accessor": "test_no_expr_var", + "source_type": "computed", + "cel_expression": "", + "value_type": "number", + "state": "active", + } + ) + result = self.ind_registry._compute_from_variable(variable, self.registrants[:3].ids) + # Empty expression should return None or 0 + # (returns None because expression is falsy) + self.assertIn(result, (None, 0)) + + def test_compute_from_variable_catches_exception(self): + """_compute_from_variable catches exceptions from CEL compilation.""" + if self.env.get("spp.cel.variable") is None: + self.skipTest("spp_cel not installed") + variable = self.env["spp.cel.variable"].create( + { + "name": "test_exc_var", + "cel_accessor": "test_exc_var", + "source_type": "computed", + "cel_expression": "r.invalid_field_xyz", + "value_type": "number", + "state": "active", + } + ) + # Mock compile_expression to raise, verifying exception is caught + cel_service = self.env["spp.cel.service"].sudo() + with patch.object( + type(cel_service), + "compile_expression", + side_effect=RuntimeError("CEL compilation failed"), + ): + result = self.ind_registry._compute_from_variable(variable, self.registrants[:3].ids) + self.assertIsNone(result) + + def test_is_member_aggregate_without_source_type_field(self): + """_is_member_aggregate returns False when variable lacks source_type field.""" + mock_var = MagicMock() + mock_var._fields = {} # No source_type field + result = self.ind_registry._is_member_aggregate(mock_var, "members.count(m, true)") + self.assertFalse(result) + + def test_is_member_aggregate_non_aggregate_type(self): + """_is_member_aggregate returns False for non-aggregate source_type.""" + mock_var = MagicMock() + mock_var._fields = {"source_type": True} + mock_var.source_type = "computed" + result = self.ind_registry._is_member_aggregate(mock_var, "members.count(m, true)") + self.assertFalse(result) + + def test_is_member_aggregate_non_member_expression(self): + """_is_member_aggregate returns False for non-members expression.""" + mock_var = MagicMock() + mock_var._fields = {"source_type": True} + mock_var.source_type = "aggregate" + result = self.ind_registry._is_member_aggregate(mock_var, "r.some_field > 5") + self.assertFalse(result) + + def test_is_member_aggregate_true_case(self): + """_is_member_aggregate returns True for aggregate members.* expression.""" + mock_var = MagicMock() + mock_var._fields = {"source_type": True} + mock_var.source_type = "aggregate" + result = self.ind_registry._is_member_aggregate(mock_var, "members.count(m, true)") + self.assertTrue(result) + + def test_compute_member_aggregate_sum_missing_method(self): + """_compute_member_aggregate_sum returns None when method not available.""" + mock_cel_service = MagicMock(spec=[]) # No evaluate_member_aggregate + result = self.ind_registry._compute_member_aggregate_sum( + mock_cel_service, "members.count(m, true)", self.registrants[:5].ids + ) + self.assertIsNone(result) + + def test_compute_member_aggregate_sum_skips_non_groups(self): + """_compute_member_aggregate_sum skips non-group registrants.""" + mock_cel_service = MagicMock() + mock_cel_service.evaluate_member_aggregate.return_value = 3 + + # Use only individual registrants (is_group=False) + individual_ids = self.registrants.filtered(lambda r: not r.is_group).ids[:5] + result = self.ind_registry._compute_member_aggregate_sum( + mock_cel_service, "members.count(m, true)", individual_ids + ) + # All are non-groups, so total should be 0 + self.assertEqual(result, 0) + mock_cel_service.evaluate_member_aggregate.assert_not_called() + + def test_compute_member_aggregate_sum_handles_bool_true(self): + """_compute_member_aggregate_sum counts True booleans as 1.""" + mock_cel_service = MagicMock() + mock_cel_service.evaluate_member_aggregate.return_value = True + + # Use group registrants + group_ids = self.registrants.filtered(lambda r: r.is_group).ids + if not group_ids: + self.skipTest("No group registrants in test data") + result = self.ind_registry._compute_member_aggregate_sum(mock_cel_service, "members.count(m, true)", group_ids) + self.assertEqual(result, len(group_ids)) + + def test_compute_member_aggregate_sum_handles_bool_false(self): + """_compute_member_aggregate_sum counts False booleans as 0.""" + mock_cel_service = MagicMock() + mock_cel_service.evaluate_member_aggregate.return_value = False + + group_ids = self.registrants.filtered(lambda r: r.is_group).ids + if not group_ids: + self.skipTest("No group registrants in test data") + result = self.ind_registry._compute_member_aggregate_sum(mock_cel_service, "members.count(m, true)", group_ids) + self.assertEqual(result, 0) + + def test_compute_member_aggregate_sum_catches_exception(self): + """_compute_member_aggregate_sum catches exceptions and returns None.""" + mock_cel_service = MagicMock() + mock_cel_service.evaluate_member_aggregate.side_effect = RuntimeError("fail") + + group_ids = self.registrants.filtered(lambda r: r.is_group).ids + if not group_ids: + self.skipTest("No group registrants in test data") + result = self.ind_registry._compute_member_aggregate_sum(mock_cel_service, "members.count(m, true)", group_ids) + self.assertIsNone(result) + + def test_list_available_deduplicates_variables(self): + """list_available does not duplicate names already in the list.""" + available = self.ind_registry.list_available() + names = [a["name"] for a in available] + # Built-ins should not be duplicated + self.assertEqual(names.count("count"), 1) + + def test_compute_unknown_returns_none(self): + """compute() returns None for completely unknown statistic.""" + result = self.ind_registry.compute("totally_unknown_stat_xyz_123", self.registrants[:3].ids) + self.assertIsNone(result) + + +@tagged("post_install", "-at_install") +class TestCacheCleanupMultipleTypes(AnalyticsTestCase): + """Tests for cache cleanup across multiple scope types.""" + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.cache_service = cls.env["spp.analytics.cache"] + + def test_cleanup_expired_removes_only_expired(self): + """cleanup_expired removes expired entries but keeps fresh ones.""" + # Create entries for different scope types + area_scope = self.create_scope("area", area_id=self.area_district.id) + explicit_scope = self.create_scope( + "explicit", + explicit_partner_ids=[(6, 0, self.registrants[:3].ids)], + ) + + self.cache_service.store_result(area_scope, ["count"], [], {"total": 10}) + self.cache_service.store_result(explicit_scope, ["count"], [], {"total": 3}) + + # Backdate area entry to make it expired (area TTL = 3600) + area_entry = self.env["spp.analytics.cache.entry"].sudo().search([("scope_type", "=", "area")], limit=1) + expired_time = fields.Datetime.now() - timedelta(seconds=7200) + area_entry.write({"computed_at": expired_time}) + + # Run cleanup + removed = self.cache_service.cleanup_expired() + self.assertGreaterEqual(removed, 1) + + # Area entry should be gone, explicit should remain + remaining_area = self.env["spp.analytics.cache.entry"].sudo().search([("scope_type", "=", "area")]) + remaining_explicit = self.env["spp.analytics.cache.entry"].sudo().search([("scope_type", "=", "explicit")]) + self.assertFalse(remaining_area) + self.assertTrue(remaining_explicit) + + def test_cleanup_expired_skips_non_cached_types(self): + """cleanup_expired skips scope types with TTL=0 (spatial).""" + # spatial_polygon has TTL=0, should never have cache entries + # This just verifies cleanup runs without error + removed = self.cache_service.cleanup_expired() + self.assertIsInstance(removed, int) + + +@tagged("post_install", "-at_install") +class TestCacheHitPath(AnalyticsTestCase): + """Tests for the cache hit path in compute_aggregation.""" + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.service = cls.env["spp.analytics.service"] + + def test_compute_aggregation_returns_cached_result(self): + """compute_aggregation returns cached result on second call.""" + scope = self.create_scope( + "area", + area_id=self.area_district.id, + ) + + # First call populates cache + result1 = self.service.compute_aggregation(scope, use_cache=True) + self.assertFalse(result1["from_cache"]) + + # Second call should return cached result + result2 = self.service.compute_aggregation(scope, use_cache=True) + self.assertTrue(result2["from_cache"]) + self.assertEqual(result1["total_count"], result2["total_count"]) + + def test_cached_result_includes_access_level(self): + """Cached result includes access_level from current user's rule.""" + scope = self.create_scope( + "area", + area_id=self.area_district.id, + ) + + # First call + self.service.compute_aggregation(scope, use_cache=True) + + # Second call (from cache) + result = self.service.compute_aggregation(scope, use_cache=True) + self.assertIn("access_level", result) + + +@tagged("post_install", "-at_install") +class TestResolveAreaWithChildAreas(AnalyticsTestCase): + """Tests for area resolution with child area expansion.""" + + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.resolver = cls.env["spp.analytics.scope.resolver"] + + def test_resolve_area_with_children_includes_all_levels(self): + """_resolve_area_ids with include_children finds registrants at all levels.""" + # Registrants are in district and region. Resolving country with + # children should find them all. + result = self.resolver._resolve_area_ids([self.area_country.id], include_children=True) + # Should include registrants from region and district + self.assertGreater(len(result), 0) + + def test_resolve_area_without_children_only_direct(self): + """_resolve_area_ids without children only finds direct registrants.""" + result_with = self.resolver._resolve_area_ids([self.area_country.id], include_children=True) + result_without = self.resolver._resolve_area_ids([self.area_country.id], include_children=False) + # Country has no direct registrants, but children do + self.assertGreaterEqual(len(result_with), len(result_without)) From c19987ff0209d55b00a6d1bfce758022e998d234 Mon Sep 17 00:00:00 2001 From: Edwin Gonzales Date: Mon, 16 Mar 2026 16:05:54 +0800 Subject: [PATCH 2/2] chore(spp_analytics): promote development status to Production/Stable --- spp_analytics/README.rst | 4 ++-- spp_analytics/__manifest__.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/spp_analytics/README.rst b/spp_analytics/README.rst index c49ec394..81c4fa16 100644 --- a/spp_analytics/README.rst +++ b/spp_analytics/README.rst @@ -10,9 +10,9 @@ OpenSPP Analytics !! source digest: sha256:9951d094574dd68b1d86ae2167e53c5ccea5240188acf11a2b7f619ede6c5b54 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -.. |badge1| image:: https://img.shields.io/badge/maturity-Beta-yellow.png +.. |badge1| image:: https://img.shields.io/badge/maturity-Production%2FStable-green.png :target: https://odoo-community.org/page/development-status - :alt: Beta + :alt: Production/Stable .. |badge2| image:: https://img.shields.io/badge/license-LGPL--3-blue.png :target: http://www.gnu.org/licenses/lgpl-3.0-standalone.html :alt: License: LGPL-3 diff --git a/spp_analytics/__manifest__.py b/spp_analytics/__manifest__.py index f3a523a8..aa3b6098 100644 --- a/spp_analytics/__manifest__.py +++ b/spp_analytics/__manifest__.py @@ -8,7 +8,7 @@ "author": "OpenSPP.org", "website": "https://github.com/OpenSPP/OpenSPP2", "license": "LGPL-3", - "development_status": "Beta", + "development_status": "Production/Stable", "maintainers": ["jeremi"], "depends": [ "base",