diff --git a/spreadsheet_oca/README.rst b/spreadsheet_oca/README.rst index 5babde5d..786e7f87 100644 --- a/spreadsheet_oca/README.rst +++ b/spreadsheet_oca/README.rst @@ -1,7 +1,3 @@ -.. image:: https://odoo-community.org/readme-banner-image - :target: https://odoo-community.org/get-involved?utm_source=readme - :alt: Odoo Community Association - =============== Spreadsheet Oca =============== @@ -17,7 +13,7 @@ Spreadsheet Oca .. |badge1| image:: https://img.shields.io/badge/maturity-Beta-yellow.png :target: https://odoo-community.org/page/development-status :alt: Beta -.. |badge2| image:: https://img.shields.io/badge/license-AGPL--3-blue.png +.. |badge2| image:: https://img.shields.io/badge/licence-AGPL--3-blue.png :target: http://www.gnu.org/licenses/agpl-3.0-standalone.html :alt: License: AGPL-3 .. |badge3| image:: https://img.shields.io/badge/github-OCA%2Fspreadsheet-lightgray.png?logo=github @@ -32,11 +28,29 @@ Spreadsheet Oca |badge1| |badge2| |badge3| |badge4| |badge5| -This module adds a functionality for adding and editing Spreadsheets -using Odoo CE. - -It is an alternative to the proprietary module ``spreadsheet_edition`` -of Odoo Enterprise Edition. +This module provides a full-featured spreadsheet editor for Odoo CE using +the ``o-spreadsheet`` engine. It serves as a community alternative that +requires only Odoo CE and OCA dependencies. + +Beyond basic spreadsheet editing, the module includes server-side features +for operational use: + +- **Scheduled Refresh** — cron-based pivot data refresh with email digest + notifications and input parameter substitution in domains +- **KPI Alerts** — cell-value threshold monitors with edge or level trigger + modes, sending notifications when conditions are met +- **What-If Scenarios** — named cell-override sets for scenario planning, + with comparison export and apply-to-copy workflow +- **Email Subscriptions** — partner-level daily/weekly/monthly digest emails + with optional pivot data summaries +- **Input Parameters** — named cell registry for domain token substitution + (e.g. ``%(start_date)s``) used by scheduled refresh and alerts +- **Cell Writeback** — edit Odoo record fields directly from list-view cells + in the spreadsheet, with full audit trail and rollback +- **XLSX Export** — server-rendered ``.xlsx`` download with fresh pivot data + on dedicated sheets, styled headers, and static cell content +- **Collaborative Editing** — revision-based multi-user editing with conflict + resolution via the OWL-based spreadsheet component **Table of contents** diff --git a/spreadsheet_oca/__manifest__.py b/spreadsheet_oca/__manifest__.py index be5be297..98c89ab0 100644 --- a/spreadsheet_oca/__manifest__.py +++ b/spreadsheet_oca/__manifest__.py @@ -5,16 +5,27 @@ "name": "Spreadsheet Oca", "summary": """ Allow to edit spreadsheets""", - "version": "18.0.1.2.3", + "version": "18.0.2.0.0", "license": "AGPL-3", "author": "CreuBlanca,Odoo Community Association (OCA)", "website": "https://github.com/OCA/spreadsheet", - "depends": ["spreadsheet", "base_sparse_field", "bus"], + "depends": ["spreadsheet", "base_sparse_field", "bus", "web_tour"], "data": [ "security/security.xml", "security/ir.model.access.csv", "views/spreadsheet_spreadsheet.xml", + "views/spreadsheet_refresh_schedule_views.xml", + "views/spreadsheet_alert_views.xml", + "views/spreadsheet_subscription_views.xml", + "views/spreadsheet_scenario_views.xml", + "views/spreadsheet_xlsx_export_views.xml", + "views/spreadsheet_writeback_views.xml", + "views/spreadsheet_input_param_views.xml", + "data/mail_templates.xml", "data/spreadsheet_spreadsheet_import_mode.xml", + "data/spreadsheet_alert_cron.xml", + "data/spreadsheet_subscription_cron.xml", + "data/web_tour_tour.xml", "wizards/spreadsheet_select_row_number.xml", "wizards/spreadsheet_spreadsheet_import.xml", ], @@ -28,6 +39,7 @@ "spreadsheet_oca/static/src/spreadsheet/list_controller.esm.js", "spreadsheet_oca/static/src/spreadsheet/list_renderer.esm.js", "spreadsheet_oca/static/src/spreadsheet/list_controller.xml", + "spreadsheet_oca/static/src/tours/spreadsheet_feature_tour.esm.js", ], "web.assets_backend_lazy": [ "spreadsheet_oca/static/src/spreadsheet/pivot_controller.esm.js", diff --git a/spreadsheet_oca/controllers/__init__.py b/spreadsheet_oca/controllers/__init__.py index 12a7e529..2807c38b 100644 --- a/spreadsheet_oca/controllers/__init__.py +++ b/spreadsheet_oca/controllers/__init__.py @@ -1 +1,3 @@ from . import main +from . import spreadsheet_writeback +from . import spreadsheet_input_params diff --git a/spreadsheet_oca/controllers/spreadsheet_input_params.py b/spreadsheet_oca/controllers/spreadsheet_input_params.py new file mode 100644 index 00000000..fbea9415 --- /dev/null +++ b/spreadsheet_oca/controllers/spreadsheet_input_params.py @@ -0,0 +1,31 @@ +# Copyright 2025 Ledo Enterprises LLC +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). +""" +JSON endpoint for spreadsheet input parameters. + +Provides a lightweight API so that a future JS plugin can read the current +named-parameter values without having to parse spreadsheet_raw itself. +""" + +from odoo.http import Controller, request, route + + +class SpreadsheetInputParamsController(Controller): + @route( + "/spreadsheet/input_params/", + type="json", + auth="user", + ) + def get_input_params(self, spreadsheet_id): + """Return {name: current_value} for all active parameters.""" + spreadsheet = request.env["spreadsheet.spreadsheet"].browse(spreadsheet_id) + if not spreadsheet.exists(): + return {"error": "Spreadsheet not found."} + spreadsheet.check_access("read") + params = request.env["spreadsheet.input_param"].search( + [ + ("spreadsheet_id", "=", spreadsheet_id), + ("active", "=", True), + ] + ) + return {p.name: p.current_value for p in params} diff --git a/spreadsheet_oca/controllers/spreadsheet_writeback.py b/spreadsheet_oca/controllers/spreadsheet_writeback.py new file mode 100644 index 00000000..f2fccfa0 --- /dev/null +++ b/spreadsheet_oca/controllers/spreadsheet_writeback.py @@ -0,0 +1,183 @@ +# Copyright 2025 Ledo Enterprises LLC +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). +""" +Controller for cell writeback: edit list cells to update Odoo records. + +The JS list cell-edit handler POSTs here with: + spreadsheet_id — int + model — str (e.g. "sale.order") + record_id — int + field_name — str (e.g. "name") + new_value — any (JSON-decoded by Odoo's JSON-RPC dispatcher) + +Returns a JSON-serialisable dict: + {'success': True, 'old_value': str, 'new_value': str, 'log_id': int} + or + {'error': ''} + +All exceptions are caught so a writeback failure never results in a +500 error reaching the browser. +""" + +import logging + +from odoo import _ +from odoo.exceptions import AccessError +from odoo.http import Controller, request, route + +_logger = logging.getLogger(__name__) + + +class SpreadsheetWriteback(Controller): + @route( + "/spreadsheet/writeback", + type="json", + auth="user", + methods=["POST"], + ) + def writeback(self, spreadsheet_id, model, record_id, field_name, new_value): + """ + Write a single field value to an Odoo record on behalf of the + spreadsheet's List cell-edit handler. + + Security checks (in order): + 1. spreadsheet.writeback_enabled must be True. + 2. Current user must have read access on the spreadsheet record. + 3. model must be a registered model in this environment. + 4. The target record must exist. + 5. Current user must have write access on the target record. + + The old value is captured before the write and stored in the audit + log. For relational fields str() is used which may not be + directly re-writable; see the model docstring for details. + """ + log_vals_base = { + "spreadsheet_id": spreadsheet_id, + "res_model": model, + "record_id": record_id, + "field_name": field_name, + "new_value": str(new_value), + } + + try: + # 1. Load spreadsheet and check writeback_enabled + spreadsheet = request.env["spreadsheet.spreadsheet"].browse(spreadsheet_id) + if not spreadsheet.exists(): + return {"error": "Spreadsheet not found."} + + if not spreadsheet.writeback_enabled: + return {"error": "Writeback not enabled for this spreadsheet."} + + # 2. Check spreadsheet read access + try: + spreadsheet.check_access("read") + except AccessError: + return {"error": "Access denied to spreadsheet."} + + # 3. Validate model + if model not in request.env: + return {"error": f"Model {model!r} is not available."} + + # 4. Load and check record existence + record = request.env[model].browse(record_id) + if not record.exists(): + return {"error": f"Record {model}({record_id}) not found."} + + # 5. Check write access on the target record + try: + record.check_access("write") + except AccessError: + _logger.warning( + "Writeback: user %d denied write on %s(%d)", + request.env.uid, + model, + record_id, + ) + return {"error": "Access denied: no write access on record."} + + # 6. Validate field_name exists and is writable + model_fields = request.env[model]._fields + if field_name not in model_fields: + return { + "error": _( + "Field %(field)s does not exist on model %(model)s.", + field=field_name, + model=model, + ) + } + field_obj = model_fields[field_name] + if field_obj.readonly or field_obj.compute: + return { + "error": _( + "Field %(field)s on %(model)s is computed or readonly" + " and cannot be written to.", + field=field_name, + model=model, + ) + } + + # Capture old value before writing + old_value = record[field_name] + old_value_str = str(old_value) + + # Perform the write + record.write({field_name: new_value}) + + # Create audit log (sudo so the log can always be written + # regardless of the user's access on spreadsheet.writeback.log) + log = ( + request.env["spreadsheet.writeback.log"] + .sudo() + .create( + dict( + log_vals_base, + old_value=old_value_str, + status="ok", + ) + ) + ) + + # Post a brief chatter note on the spreadsheet + spreadsheet.sudo().message_post( + body=_( + "Writeback: field %(field)s on " + "%(model)s #%(record_id)d changed " + "from %(old)s to %(new)s.", + field=field_name, + model=model, + record_id=record_id, + old=old_value_str, + new=str(new_value), + ), + subtype_xmlid="mail.mt_note", + ) + + return { + "success": True, + "old_value": old_value_str, + "new_value": str(new_value), + "log_id": log.id, + } + + except Exception as exc: + _logger.exception( + "Writeback error: spreadsheet=%d model=%s record=%d field=%s", + spreadsheet_id, + model, + record_id, + field_name, + ) + # Attempt to write an error log (best effort — use sudo and + # ignore any secondary failure so the route always returns JSON) + try: + request.env["spreadsheet.writeback.log"].sudo().create( + dict( + log_vals_base, + status="error", + error_message=str(exc)[:255], + ) + ) + except Exception: + _logger.exception("Failed to create writeback error log") + + return {"error": str(exc)} diff --git a/spreadsheet_oca/data/mail_templates.xml b/spreadsheet_oca/data/mail_templates.xml new file mode 100644 index 00000000..984bc455 --- /dev/null +++ b/spreadsheet_oca/data/mail_templates.xml @@ -0,0 +1,219 @@ + + + + + + spreadsheet.alert.notification + qweb + + +
+ +
+ ⚠ KPI Alert Triggered +
+ +
+

+ +

+ + + + + + + + + + + + + +
Cell + + + + + on sheet + + + +
Value + + + +
Condition + +   + +
+ + +
+
+
+
+
+ + + + spreadsheet.subscription.digest + qweb + + +
+

+ +

+
+ Generated: +
+
+ +

+ No pivot data sources are available in this spreadsheet. +

+
+ + +
+ +
+
+
+
+ + +
+ You are receiving this because you subscribed + to digest emails for this spreadsheet. +
+
+
+
+
+ + + + spreadsheet.refresh.notification + qweb + + +
+ +

No ODOO pivot data sources found in this spreadsheet.

+
+ + + +
+
+
+
+
+
+
+ + + + spreadsheet.refresh.warning + qweb + + +

+ [Warning] Refresh schedule + + + could not load pivot(s): + + + + + , + . + Check server logs for details. +

+
+
+
+
diff --git a/spreadsheet_oca/data/spreadsheet_alert_cron.xml b/spreadsheet_oca/data/spreadsheet_alert_cron.xml new file mode 100644 index 00000000..ea4124be --- /dev/null +++ b/spreadsheet_oca/data/spreadsheet_alert_cron.xml @@ -0,0 +1,14 @@ + + + + + Spreadsheet KPI Alert Evaluation + + code + model._cron_evaluate_all() + 1 + hours + True + + diff --git a/spreadsheet_oca/data/spreadsheet_subscription_cron.xml b/spreadsheet_oca/data/spreadsheet_subscription_cron.xml new file mode 100644 index 00000000..01e471a2 --- /dev/null +++ b/spreadsheet_oca/data/spreadsheet_subscription_cron.xml @@ -0,0 +1,14 @@ + + + + + Spreadsheet Dashboard Digest + + code + model._cron_send_digests() + 1 + days + True + + diff --git a/spreadsheet_oca/data/web_tour_tour.xml b/spreadsheet_oca/data/web_tour_tour.xml new file mode 100644 index 00000000..d5e62794 --- /dev/null +++ b/spreadsheet_oca/data/web_tour_tour.xml @@ -0,0 +1,154 @@ + + + + spreadsheet_oca_features + /odoo + Congratulations! You've explored all spreadsheet features. + + + + + + 10 + .o_menu_sections a:contains('Spreadsheets'), .o_navbar a:contains('Spreadsheets'), a.dropdown-item:contains('Spreadsheets') + Open the Spreadsheets menu to see your dashboards. + click + + + + + 20 + .o_kanban_view + Welcome to Spreadsheets! You're looking at your spreadsheet dashboards. + + + + + 30 + .o_kanban_record:contains('Sales Pipeline Summary') + Open the Sales Pipeline Summary spreadsheet to explore its features. + click + + + + + 40 + .o_form_view div[name='button_box'], .o_form_view #button_box + These smart buttons give you quick access to all features: alerts, scenarios, parameters, and more. + + + + + 50 + button[name='action_open_alerts'], div[name='alert_count'] .oe_stat_button + Click to view KPI Alerts configured for this spreadsheet. + click + + + + + 60 + .o_list_view, .o_kanban_view + KPI Alerts monitor cell values and notify you when thresholds are crossed. + + + + + 70 + .o_back_button, .breadcrumb-item a + Go back to the spreadsheet form. + click + + + + + 80 + button[name='action_open_scenarios'], div[name='scenario_count'] .oe_stat_button + Click to view What-If Scenarios. + click + + + + + 90 + .o_list_view, .o_kanban_view + What-If Scenarios let you model different outcomes without duplicating your spreadsheet. + + + + + 100 + .o_back_button, .breadcrumb-item a + Go back to the spreadsheet form. + click + + + + + 110 + button[name='action_open_input_params'], div[name='input_param_count'] .oe_stat_button + Click to view Input Parameters. + click + + + + + 120 + .o_list_view, .o_kanban_view + Input Parameters bind named cells to server-side values for scheduled refreshes. + + + + + 130 + .o_back_button, .breadcrumb-item a + Go back to the spreadsheet form. + click + + + + + 140 + button[name='action_export_xlsx'] + Export XLSX generates a server-side .xlsx file with fresh pivot data. + + + + + 150 + .o_form_view + You're all set! Explore each feature to unlock the full power of your spreadsheets. + + diff --git a/spreadsheet_oca/demo/demo_kpi_dashboard.json b/spreadsheet_oca/demo/demo_kpi_dashboard.json new file mode 100644 index 00000000..ba59dfac --- /dev/null +++ b/spreadsheet_oca/demo/demo_kpi_dashboard.json @@ -0,0 +1,85 @@ +{ + "version": 21, + "sheets": [ + { + "id": "dashboard", + "name": "Dashboard", + "colNumber": 26, + "rowNumber": 100, + "rows": {}, + "cols": {}, + "merges": [], + "cells": { + "A1": {"content": "KPI", "style": 1, "border": 1}, + "B1": {"content": "Target", "style": 1, "border": 1}, + "C1": {"content": "Actual", "style": 1, "border": 1}, + "D1": {"content": "Variance", "style": 1, "border": 1}, + "E1": {"content": "Status", "style": 1, "border": 1}, + "A2": {"content": "Cost Per Lead"}, + "B2": {"content": "50", "format": 1}, + "C2": {"content": "42", "format": 1}, + "D2": {"content": "=C2-B2", "format": 1}, + "E2": {"content": "=IF(C2<=B2,\"On Track\",\"Over\")"}, + "A3": {"content": "Revenue Growth"}, + "B3": {"content": "0.15", "format": 2}, + "C3": {"content": "0.12", "format": 2}, + "D3": {"content": "=C3-B3", "format": 2}, + "E3": {"content": "=IF(C3>=B3,\"On Track\",\"Below\")"}, + "A4": {"content": "Customer Churn"}, + "B4": {"content": "0.05", "format": 2}, + "C4": {"content": "0.032", "format": 2}, + "D4": {"content": "=C4-B4", "format": 2}, + "E4": {"content": "=IF(C4<=B4,\"On Track\",\"High\")"}, + "A5": {"content": "Avg Deal Size"}, + "B5": {"content": "25000", "format": 1}, + "C5": {"content": "28500", "format": 1}, + "D5": {"content": "=C5-B5", "format": 1}, + "E5": {"content": "=IF(C5>=B5,\"On Track\",\"Low\")"}, + "A6": {"content": "NPS Score"}, + "B6": {"content": "70"}, + "C6": {"content": "78"}, + "D6": {"content": "=C6-B6"}, + "E6": {"content": "=IF(C6>=B6,\"On Track\",\"Low\")"}, + "A8": {"content": "Last Updated"}, + "B8": {"content": "2026-03-01"}, + "A9": {"content": "Updated By"}, + "B9": {"content": "Admin"} + }, + "conditionalFormats": [], + "figures": [], + "filterTables": [], + "tables": [], + "dataValidationRules": [], + "comments": {}, + "headerGroups": {"ROW": [], "COL": []}, + "areGridLinesVisible": true, + "isVisible": true + } + ], + "settings": {}, + "customTableStyles": {}, + "styles": { + "1": {"bold": true, "align": "center"} + }, + "formats": { + "1": "$#,##0", + "2": "0.00%" + }, + "borders": { + "1": { + "top": ["thin", "#000"], + "bottom": ["thin", "#000"], + "left": ["thin", "#000"], + "right": ["thin", "#000"] + } + }, + "revisionId": "START_REVISION", + "uniqueFigureIds": true, + "odooVersion": 12, + "globalFilters": [], + "pivots": {}, + "pivotNextId": 1, + "lists": {}, + "listNextId": 1, + "chartOdooMenusReferences": {} +} diff --git a/spreadsheet_oca/demo/demo_pivot_dashboard.json b/spreadsheet_oca/demo/demo_pivot_dashboard.json new file mode 100644 index 00000000..2ed2f20e --- /dev/null +++ b/spreadsheet_oca/demo/demo_pivot_dashboard.json @@ -0,0 +1,98 @@ +{ + "version": 21, + "sheets": [ + { + "id": "sheet_partners", + "name": "Partners by Country", + "colNumber": 26, + "rowNumber": 100, + "rows": {}, + "cols": { + "0": {"size": 220}, + "1": {"size": 140}, + "2": {"size": 140}, + "3": {"size": 140} + }, + "merges": [], + "cells": { + "A1": {"content": "=PIVOT(1)"} + }, + "conditionalFormats": [], + "figures": [], + "filterTables": [], + "tables": [], + "dataValidationRules": [], + "comments": {}, + "headerGroups": {"ROW": [], "COL": []}, + "areGridLinesVisible": true, + "isVisible": true + }, + { + "id": "sheet_regions", + "name": "Regions per Country", + "colNumber": 26, + "rowNumber": 100, + "rows": {}, + "cols": { + "0": {"size": 220}, + "1": {"size": 140} + }, + "merges": [], + "cells": { + "A1": {"content": "=PIVOT(2)"} + }, + "conditionalFormats": [], + "figures": [], + "filterTables": [], + "tables": [], + "dataValidationRules": [], + "comments": {}, + "headerGroups": {"ROW": [], "COL": []}, + "areGridLinesVisible": true, + "isVisible": true + } + ], + "settings": {}, + "customTableStyles": {}, + "styles": {}, + "formats": {}, + "borders": {}, + "revisionId": "START_REVISION", + "uniqueFigureIds": true, + "odooVersion": 12, + "globalFilters": [], + "pivots": { + "1": { + "type": "ODOO", + "id": "1", + "formulaId": "1", + "name": "Partners by Country & Type", + "model": "res.partner", + "domain": [["active", "=", true]], + "context": {}, + "measures": [{"id": "__count", "fieldName": "__count"}], + "rows": [{"fieldName": "country_id", "order": "desc"}], + "columns": [{"fieldName": "is_company"}], + "sortedColumn": null, + "fieldMatching": {} + }, + "2": { + "type": "ODOO", + "id": "2", + "formulaId": "2", + "name": "Regions per Country", + "model": "res.country.state", + "domain": [], + "context": {}, + "measures": [{"id": "__count", "fieldName": "__count"}], + "rows": [{"fieldName": "country_id", "order": "desc"}], + "columns": [], + "sortedColumn": null, + "fieldMatching": {} + } + }, + "pivotNextId": 3, + "lists": {}, + "listNextId": 1, + "chartOdooMenusReferences": {} +} diff --git a/spreadsheet_oca/demo/demo_spreadsheet.json b/spreadsheet_oca/demo/demo_spreadsheet.json index 79a0811d..0677f7d0 100644 --- a/spreadsheet_oca/demo/demo_spreadsheet.json +++ b/spreadsheet_oca/demo/demo_spreadsheet.json @@ -1,50 +1,114 @@ { - "version": 12.5, + "version": 21, "sheets": [ { - "id": "Sheet1", - "name": "Demo Sheet 1", + "id": "pipeline", + "name": "Sales Pipeline", "colNumber": 26, "rowNumber": 100, "rows": {}, "cols": {}, "merges": [], "cells": { - "A1": {"style": 1, "content": "First Value", "border": 1}, - "A2": {"format": 1, "content": "30.55", "border": 1}, - "B1": {"style": 1, "content": "Second Value", "border": 1}, - "B2": {"format": 1, "content": "50", "border": 1}, - "C1": {"style": 1, "content": "Total", "border": 1}, - "C2": {"format": 1, "content": "=sum(A2:B2)", "border": 1}, - "A3": {"border": 2}, - "B3": {"border": 2}, - "C3": {"border": 2}, - "D1": {"border": 3}, - "D2": {"border": 3} + "A1": {"style": 1, "content": "Partner", "border": 1}, + "B1": {"style": 1, "content": "Q1 Revenue", "border": 1}, + "C1": {"style": 1, "content": "Q2 Revenue", "border": 1}, + "D1": {"style": 1, "content": "Q3 Revenue", "border": 1}, + "E1": {"style": 1, "content": "Annual Total", "border": 1}, + "A2": {"content": "Wood Corner"}, + "B2": {"content": "120000", "format": 1}, + "C2": {"content": "135000", "format": 1}, + "D2": {"content": "148000", "format": 1}, + "E2": {"content": "=SUM(B2:D2)", "format": 1}, + "A3": {"content": "Acme Corp"}, + "B3": {"content": "85000", "format": 1}, + "C3": {"content": "92000", "format": 1}, + "D3": {"content": "101000", "format": 1}, + "E3": {"content": "=SUM(B3:D3)", "format": 1}, + "A4": {"content": "Azure Interior"}, + "B4": {"content": "200000", "format": 1}, + "C4": {"content": "215000", "format": 1}, + "D4": {"content": "230000", "format": 1}, + "E4": {"content": "=SUM(B4:D4)", "format": 1}, + "A5": {"content": "Ready Mat"}, + "B5": {"content": "45000", "format": 1}, + "C5": {"content": "48000", "format": 1}, + "D5": {"content": "52000", "format": 1}, + "E5": {"content": "=SUM(B5:D5)", "format": 1}, + "A6": {"content": "Global Solutions"}, + "B6": {"content": "310000", "format": 1}, + "C6": {"content": "340000", "format": 1}, + "D6": {"content": "375000", "format": 1}, + "E6": {"content": "=SUM(B6:D6)", "format": 1}, + "A8": {"content": "Grand Total", "style": 2}, + "B8": {"content": "=SUM(B2:B6)", "format": 1, "style": 2}, + "C8": {"content": "=SUM(C2:C6)", "format": 1, "style": 2}, + "D8": {"content": "=SUM(D2:D6)", "format": 1, "style": 2}, + "E8": {"content": "=SUM(E2:E6)", "format": 1, "style": 2}, + "A10": {"content": "Revenue Target"}, + "B10": {"content": "500000", "format": 1} }, "conditionalFormats": [], "figures": [], "filterTables": [], + "tables": [], + "dataValidationRules": [], + "comments": {}, + "headerGroups": {"ROW": [], "COL": []}, + "areGridLinesVisible": true, + "isVisible": true + }, + { + "id": "params", + "name": "Parameters", + "colNumber": 26, + "rowNumber": 100, + "rows": {}, + "cols": {}, + "merges": [], + "cells": { + "A1": {"content": "Parameter", "style": 1}, + "B1": {"content": "Value", "style": 1}, + "A2": {"content": "Start Date"}, + "B2": {"content": "2026-01-01"}, + "A3": {"content": "End Date"}, + "B3": {"content": "2026-12-31"}, + "A4": {"content": "Growth Rate"}, + "B4": {"content": "0.15"}, + "A5": {"content": "Threshold"}, + "B5": {"content": "500000"} + }, + "conditionalFormats": [], + "figures": [], + "filterTables": [], + "tables": [], + "dataValidationRules": [], + "comments": {}, + "headerGroups": {"ROW": [], "COL": []}, "areGridLinesVisible": true, "isVisible": true } ], - "entities": {}, - "styles": {"1": {"bold": true, "align": "center"}}, - "formats": {"1": "[$$]#,##0.00"}, + "settings": {}, + "customTableStyles": {}, + "styles": { + "1": {"bold": true, "align": "center"}, + "2": {"bold": true, "italic": true} + }, + "formats": { + "1": "$#,##0" + }, "borders": { "1": { "top": ["thin", "#000"], "bottom": ["thin", "#000"], "left": ["thin", "#000"], "right": ["thin", "#000"] - }, - "2": {"top": ["thin", "#000"]}, - "3": {"left": ["thin", "#000"]} + } }, "revisionId": "START_REVISION", "uniqueFigureIds": true, - "odooVersion": 5, + "odooVersion": 12, "globalFilters": [], "pivots": {}, "pivotNextId": 1, diff --git a/spreadsheet_oca/demo/spreadsheet_spreadsheet.xml b/spreadsheet_oca/demo/spreadsheet_spreadsheet.xml index 11222ed5..9c5809ee 100644 --- a/spreadsheet_oca/demo/spreadsheet_spreadsheet.xml +++ b/spreadsheet_oca/demo/spreadsheet_spreadsheet.xml @@ -1,11 +1,229 @@ + + + + Müller GmbH + + + + + Hans Weber + + + + + Dupont SA + + + + + Marie Leclerc + + + + + British Solutions Ltd + + + + + James Clarke + + + + + Tanaka Industries + + + + + Silva Comércio Ltda + + + + + Ana Costa + + + + Patel Technologies Pvt Ltd + + + + + Priya Sharma + + + + + Outback Systems Pty Ltd + + + + + + - Demo spreadsheet + Sales Pipeline Summary + + + + KPI Dashboard + + + + + + + Partner Pivot Dashboard + + + + + + + + Weekly Pipeline Refresh + + 1 + weeks + + + + + + + Revenue Below Target + + E8 + Sales Pipeline + + 2000000 + edge + + + + Cost Per Lead Warning + + C2 + + 45 + level + + + + + + start_date + + Parameters!B2 + Start of reporting period + 2026-01-01 + + + + end_date + + Parameters!B3 + End of reporting period + 2026-12-31 + + + + growth_rate + + Parameters!B4 + Expected annual growth rate + 0.15 + + + + + + Base Case + + + Current pipeline estimates + {} + + + + Optimistic (+20%) + + 20% uplift on all revenue + {"Sales Pipeline!B2": 144000, "Sales Pipeline!C2": 168000, "Sales Pipeline!D2": 192000, "Sales Pipeline!B3": 96000, "Sales Pipeline!C3": 108000, "Sales Pipeline!D3": 120000, "Sales Pipeline!B4": 60000, "Sales Pipeline!C4": 72000, "Sales Pipeline!D4": 84000, "Sales Pipeline!B5": 36000, "Sales Pipeline!C5": 42000, "Sales Pipeline!D5": 48000, "Sales Pipeline!B6": 24000, "Sales Pipeline!C6": 30000, "Sales Pipeline!D6": 36000} + + + + Pessimistic (-30%) + + 30% reduction across the board + {"Sales Pipeline!B2": 84000, "Sales Pipeline!C2": 98000, "Sales Pipeline!D2": 112000, "Sales Pipeline!B3": 56000, "Sales Pipeline!C3": 63000, "Sales Pipeline!D3": 70000, "Sales Pipeline!B4": 35000, "Sales Pipeline!C4": 42000, "Sales Pipeline!D4": 49000, "Sales Pipeline!B5": 21000, "Sales Pipeline!C5": 24500, "Sales Pipeline!D5": 28000, "Sales Pipeline!B6": 14000, "Sales Pipeline!C6": 17500, "Sales Pipeline!D6": 21000} + + + + + + + + weekly + + + + + + + + res.partner + + phone + +1 555-0101 + +1 555-0199 + ok + + + + + res.partner + + name + Dupont SA + Dupont SA International + rolled_back + diff --git a/spreadsheet_oca/models/__init__.py b/spreadsheet_oca/models/__init__.py index c5ec2360..8fdbe3a0 100644 --- a/spreadsheet_oca/models/__init__.py +++ b/spreadsheet_oca/models/__init__.py @@ -1,6 +1,15 @@ +from . import cell_ref # noqa: F401 — shared helpers; must be first from . import spreadsheet_abstract from . import spreadsheet_spreadsheet_tag from . import spreadsheet_spreadsheet from . import spreadsheet_oca_revision from . import ir_websocket from . import spreadsheet_spreadsheet_import_mode +from . import pivot_data +from . import spreadsheet_refresh_schedule +from . import spreadsheet_alert +from . import spreadsheet_subscription +from . import spreadsheet_xlsx_export +from . import spreadsheet_scenario +from . import spreadsheet_writeback +from . import spreadsheet_input_param diff --git a/spreadsheet_oca/models/cell_ref.py b/spreadsheet_oca/models/cell_ref.py new file mode 100644 index 00000000..0e258ff1 --- /dev/null +++ b/spreadsheet_oca/models/cell_ref.py @@ -0,0 +1,140 @@ +# Copyright 2025 Ledo Enterprises LLC +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). +""" +Shared cell-reference helpers for spreadsheet_oca. + +Used by spreadsheet_alert, spreadsheet_scenario, and spreadsheet_input_param +to avoid duplicating cell-address parsing and raw-JSON access logic. +""" + +import re + +# Pre-compiled pattern: column letters + row number (1-based, no zero row). +_CELL_REF_RE = re.compile(r"^([A-Za-z]+)([1-9][0-9]*)$") + + +def _idx_to_cell_address(col_idx, row_idx): + """Convert 0-based (col, row) to cell address like 'A1', 'B3', 'AA12'.""" + col_str = "" + c = col_idx + while True: + col_str = chr(ord("A") + c % 26) + col_str + c = c // 26 - 1 + if c < 0: + break + return f"{col_str}{row_idx + 1}" + + +def parse_cell_ref(ref): + """ + Parse a bare cell reference like 'B3' or 'AA12' into (col_index, row_index). + + Both indices are 0-based to match the o-spreadsheet JSON cell-map format. + Returns (None, None) on invalid input (empty string, zero row, etc.). + """ + m = _CELL_REF_RE.match(ref.strip()) + if not m: + return None, None + col_str, row_str = m.group(1).upper(), m.group(2) + col_idx = 0 + for ch in col_str: + col_idx = col_idx * 26 + (ord(ch) - ord("A") + 1) + col_idx -= 1 # convert to 0-based + row_idx = int(row_str) - 1 # convert to 0-based + return col_idx, row_idx + + +def parse_cell_key(key): + """ + Parse a possibly-qualified cell key into (sheet_name_or_None, col_idx, row_idx). + + Supported formats: + - ``"B3"`` — no sheet qualifier; sheet_name = None + - ``"Sheet1!B3"`` — explicit sheet qualifier + """ + key = key.strip() + if "!" in key: + sheet_part, addr_part = key.split("!", 1) + sheet_name = sheet_part.strip() + else: + sheet_name = None + addr_part = key + col_idx, row_idx = parse_cell_ref(addr_part) + return sheet_name, col_idx, row_idx + + +def _resolve_sheet(sheets, sheet_name=None): + """Return the target sheet dict from a list of sheets. + + If *sheet_name* is given, searches case-insensitively; falls back to the + first sheet if not found. Returns None when *sheets* is empty. + """ + if not sheets: + return None + if sheet_name: + for s in sheets: + if s.get("name", "").lower() == sheet_name.lower(): + return s + return sheets[0] + + +def read_cell_value(spreadsheet_raw, cell_ref, sheet_name=None): + """ + Read the value of a cell from a spreadsheet_raw JSON dict. + + *cell_ref* may be bare (``"B3"``) or sheet-qualified (``"Sheet1!B3"``). + *sheet_name*, when provided, overrides any sheet qualifier embedded in + *cell_ref* and forces lookup in the named sheet (falling back to sheet 0). + + Return value priority: + 1. The cell's evaluated ``"value"`` key (set by o-spreadsheet when the + workbook is saved after formula evaluation in the browser). + 2. The cell's ``"content"`` string (for static / hand-typed cells). + 3. ``None`` when the cell, sheet, or raw JSON is absent. + """ + sheets = (spreadsheet_raw or {}).get("sheets", []) + ref_sheet, col_idx, row_idx = parse_cell_key(cell_ref) + if col_idx is None: + return None + + target_name = sheet_name or ref_sheet + target_sheet = _resolve_sheet(sheets, target_name) + if target_sheet is None: + return None + + cells = target_sheet.get("cells", {}) + cell_addr = _idx_to_cell_address(col_idx, row_idx) + cell_data = cells.get(cell_addr, {}) + if not cell_data: + return None + + value = cell_data.get("value") + if value is None: + value = cell_data.get("content") + return value if value != "" else None + + +def write_cell_content(spreadsheet_raw, cell_ref, value, sheet_name=None): + """ + Write a value into ``cells[row][col]["content"]`` of *spreadsheet_raw* in-place. + + Creates nested dicts as needed. *cell_ref* and *sheet_name* follow the + same conventions as :func:`read_cell_value`. + + Returns the (mutated) *spreadsheet_raw* dict. + """ + sheets = (spreadsheet_raw or {}).get("sheets", []) + ref_sheet, col_idx, row_idx = parse_cell_key(cell_ref) + if col_idx is None: + return spreadsheet_raw + + target_name = sheet_name or ref_sheet + target_sheet = _resolve_sheet(sheets, target_name) + if target_sheet is None: + return spreadsheet_raw + + cells = target_sheet.setdefault("cells", {}) + cell_addr = _idx_to_cell_address(col_idx, row_idx) + cell_data = cells.setdefault(cell_addr, {}) + cell_data["content"] = str(value) if value is not None else "" + return spreadsheet_raw diff --git a/spreadsheet_oca/models/pivot_data.py b/spreadsheet_oca/models/pivot_data.py new file mode 100644 index 00000000..0233cc69 --- /dev/null +++ b/spreadsheet_oca/models/pivot_data.py @@ -0,0 +1,366 @@ +# Copyright 2025 Ledo Enterprises LLC +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). +""" +Server-side pivot data helper. + +Replicates the read_group strategy used by the Odoo web PivotModel +(addons/web/static/src/views/pivot/pivot_model.js) to produce pivot table +data server-side, without executing any JavaScript. + +The JS pivot loads data by: + 1. Computing all row-groupby prefixes ("sections"): + rows=["partner_id","date:month"] → [[], ["partner_id"], + ["partner_id","date:month"]] + 2. Computing all col-groupby prefixes ("sections"): + cols=["stage_id"] → [[], ["stage_id"]] + 3. Taking the cartesian product (row_prefix × col_prefix) for "divisors". + 4. For each divisor [rowPrefix, colPrefix], calling: + read_group(domain, fields=measureSpecs, + groupby=rowPrefix+colPrefix, lazy=False) + +This module replicates that strategy in Python and exposes: + - ``get_pivot_data(model, domain, context, rows, columns, measures)`` + +Rows / columns are lists of dimension dicts: + {"fieldName": "date_order", "granularity": "month"} + {"fieldName": "partner_id"} (no granularity) + +Measures are lists of measure dicts: + {"fieldName": "amount_total", "aggregator": "sum"} + {"fieldName": "__count"} +""" + +import itertools +import logging + +_logger = logging.getLogger(__name__) + +# --------------------------------------------------------------------------- +# Helpers mirroring the JS helpers in pivot_model.js +# --------------------------------------------------------------------------- + +DATE_GRANULARITIES = {"day", "week", "month", "quarter", "year"} + + +def _dimension_to_groupby(dim): + """Convert a dimension dict to an Odoo read_group groupby string. + + {"fieldName": "date_order", "granularity": "month"} → "date_order:month" + {"fieldName": "partner_id"} → "partner_id" + """ + name = dim["fieldName"] + gran = dim.get("granularity") + return f"{name}:{gran}" if gran else name + + +def _sections(lst): + """Return all prefixes of lst including the empty prefix. + + sections(["a", "b", "c"]) → [[], ["a"], ["a", "b"], ["a", "b", "c"]] + + Mirrors the JS ``sections()`` helper. + """ + return [lst[:i] for i in range(len(lst) + 1)] + + +def _measure_to_field_spec(measure): + """Convert a measure dict to a read_group ``fields`` element. + + {"fieldName": "amount_total", "aggregator": "sum"} → "amount_total:sum" + {"fieldName": "__count"} → "__count" + """ + if measure["fieldName"] == "__count": + return "__count" + agg = measure.get("aggregator") or "sum" + return f"{measure['fieldName']}:{agg}" + + +# --------------------------------------------------------------------------- +# Main computation +# --------------------------------------------------------------------------- + + +def _get_pivot_data(env, model_name, domain, context, row_dims, col_dims, measures): + """Compute pivot table data using the same read_group strategy as the JS. + + Returns a dict: + { + "fields": {fieldName: {type, string, ...}}, + "groups": [ + { + "rowValues": ["2026-01", ...], # normalised group key values + "colValues": ["Confirmed", ...], + "rowGroupBy": ["date_order:month"], + "colGroupBy": ["stage_id"], + "count": 12, + "measures": {"amount_total:sum": 9800.0, ...}, + }, + ... + ], + "rowDimensions": [{"fieldName": ..., "granularity": ...}, ...], + "colDimensions": [{"fieldName": ..., "granularity": ...}, ...], + "measureSpecs": ["amount_total:sum", ...], + } + """ + Model = env[model_name].with_context(**(context or {})) + + # ── 1. Fields metadata (needed for label resolution) ───────────────── + all_field_names = [d["fieldName"] for d in row_dims + col_dims] + [ + m["fieldName"] for m in measures if m["fieldName"] != "__count" + ] + # fields_get returns {fieldName: {type, string, selection, ...}} + fields_meta = Model.fields_get( + all_field_names, attributes=["type", "string", "selection"] + ) + + # ── 2. Build groupby strings ────────────────────────────────────────── + row_groupbys = [_dimension_to_groupby(d) for d in row_dims] + col_groupbys = [_dimension_to_groupby(d) for d in col_dims] + measure_specs = [_measure_to_field_spec(m) for m in measures] + + # Ensure count is always fetched (JS always adds __count implicitly) + field_specs_with_count = measure_specs + ( + [] if "__count" in measure_specs else ["__count"] + ) + + # ── 3. Compute divisors (cartesian product of all prefixes) ────────── + row_sections = _sections(row_groupbys) + col_sections = _sections(col_groupbys) + divisors = list(itertools.product(row_sections, col_sections)) + + # ── 4. Fire read_group for each divisor ────────────────────────────── + groups = [] + for row_prefix, col_prefix in divisors: + groupby = row_prefix + col_prefix + try: + results = Model.read_group( + domain=domain or [], + fields=field_specs_with_count, + groupby=groupby, + lazy=False, + ) + except Exception: + _logger.exception( + "read_group failed for model=%s groupby=%s", model_name, groupby + ) + continue + + for rg in results: + group_entry = { + "rowGroupBy": row_prefix, + "colGroupBy": col_prefix, + "rowValues": _extract_group_values(rg, row_prefix, fields_meta), + "colValues": _extract_group_values(rg, col_prefix, fields_meta), + "count": rg.get("__count", 0), + "measures": _extract_measures(rg, measures, fields_meta), + "domain": rg.get("__domain", []), + } + groups.append(group_entry) + + return { + "fields": fields_meta, + "groups": groups, + "rowDimensions": row_dims, + "colDimensions": col_dims, + "measureSpecs": measure_specs, + } + + +def _extract_group_values(rg_row, groupby_list, fields_meta): + """Extract normalised group values from a read_group result row. + + Many2one fields return (id, display_name) — we normalise to the id (int). + Date/datetime fields return a formatted string (Odoo already handles + granularity in the groupby key). + """ + values = [] + for gb_spec in groupby_list: + field_name = gb_spec.split(":")[0] + raw = rg_row.get(gb_spec) or rg_row.get(field_name) + if raw is False or raw is None: + values.append(False) + elif isinstance(raw, list | tuple) and len(raw) == 2: + # Many2one: (id, display_name) — store id; JS uses id for grouping + values.append(raw[0]) + else: + values.append(raw) + return values + + +def _extract_measures(rg_row, measures, fields_meta): + """Extract measure values from a read_group result row.""" + result = {} + for measure in measures: + fname = measure["fieldName"] + agg = measure.get("aggregator") + if fname == "__count": + result["__count"] = rg_row.get("__count", 0) + continue + # read_group key: field_name (no aggregator suffix in result keys) + raw = rg_row.get(fname, 0) + if isinstance(raw, list | tuple): + # Many2one used as measure — count distinct occurrences + raw = 1 if raw else 0 + if raw is False: + raw = 0 + spec_key = f"{fname}:{agg}" if agg else fname + result[spec_key] = raw + return result + + +# --------------------------------------------------------------------------- +# Shared helpers for pivot iteration and HTML rendering +# --------------------------------------------------------------------------- + + +def collect_pivot_summaries(env, spreadsheet_raw, domain_transform=None): + """Iterate over ODOO-type pivots and return fresh data for each. + + Args: + env: Odoo environment. + spreadsheet_raw: dict — the spreadsheet's raw JSON data. + domain_transform: optional callable(domain) -> domain, applied to each + pivot's domain before querying (e.g. parameter substitution). + + Returns: + A tuple ``(summaries, failed_names)`` where *summaries* is a list of + ``{"name": ..., "model": ..., "result": ...}`` dicts, and + *failed_names* is a list of pivot display names that could not be loaded. + """ + pivots = spreadsheet_raw.get("pivots", {}) + summaries = [] + failed_names = [] + for pivot_id, pivot_def in pivots.items(): + if pivot_def.get("type") != "ODOO": + continue + model_name = pivot_def.get("model") + pivot_name = pivot_def.get("name") or f"Pivot #{pivot_id}" + if not model_name or model_name not in env: + _logger.warning( + "collect_pivot_summaries: unknown model %r — skipping pivot %s", + model_name, + pivot_id, + ) + failed_names.append(pivot_name) + continue + try: + domain = pivot_def.get("domain", []) + if domain_transform: + domain = domain_transform(domain) + result = _get_pivot_data( + env, + model_name, + domain, + pivot_def.get("context", {}), + pivot_def.get("rows", []), + pivot_def.get("columns", []), + pivot_def.get("measures", []), + ) + summaries.append( + { + "name": pivot_name, + "model": model_name, + "result": result, + } + ) + except Exception: + _logger.exception( + "collect_pivot_summaries: failed to compute pivot %s", + pivot_id, + ) + failed_names.append(pivot_name) + return summaries, failed_names + + +def render_pivot_table_html(summary, max_rows=10): + """Render a single pivot summary as an HTML table string. + + Args: + summary: dict with keys ``"name"``, ``"model"``, ``"result"`` + (as returned by ``collect_pivot_summaries``). + max_rows: maximum number of detail rows to include before truncating. + + Returns: + str — HTML fragment for the pivot table. + """ + result = summary["result"] + name = summary["name"] + model = summary["model"] + parts = [] + + parts.append( + f'

{name}' + f' ({model})

' + ) + + row_dims = result.get("rowDimensions", []) + groups = result.get("groups", []) + + # Grand total row + grand_totals = [ + g for g in groups if g["rowGroupBy"] == [] and g["colGroupBy"] == [] + ] + if grand_totals: + gt = grand_totals[0] + count = gt.get("count", 0) + parts.append( + f'

Total records: {count}

' + ) + for key, val in gt.get("measures", {}).items(): + if key != "__count" and val is not None: + parts.append( + f'

{key}: {val}

' + ) + + # Row breakdown table + if row_dims: + row_gb = [d["fieldName"] for d in row_dims] + row_groups = [ + g for g in groups if g["rowGroupBy"] == row_gb and g["colGroupBy"] == [] + ] + if row_groups: + measure_keys = [ + k for k in (row_groups[0].get("measures") or {}) if k != "__count" + ] + headers = ["Group"] + measure_keys + ["Count"] + parts.append( + '' + ) + parts.append("") + for h in headers: + parts.append( + '' + ) + parts.append("") + for g in row_groups[:max_rows]: + label = ", ".join(str(v) for v in g["rowValues"]) + parts.append("") + parts.append( + f'' + ) + for mk in measure_keys: + val = g.get("measures", {}).get(mk, "") + parts.append( + '' + ) + parts.append( + ''.format(g.get("count", "")) + ) + parts.append("") + if len(row_groups) > max_rows: + colspan = len(headers) + extra = len(row_groups) - max_rows + more_text = f"and {extra} more rows" + parts.append( + f'" + ) + parts.append("
{h}
{label}{val}{}
' + f"… {more_text}
") + + return "".join(parts) diff --git a/spreadsheet_oca/models/spreadsheet_alert.py b/spreadsheet_oca/models/spreadsheet_alert.py new file mode 100644 index 00000000..1f8203db --- /dev/null +++ b/spreadsheet_oca/models/spreadsheet_alert.py @@ -0,0 +1,236 @@ +# Copyright 2025 Ledo Enterprises LLC +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). +""" +Threshold alerts / KPI watches. + +Users define a watch on a named cell in a spreadsheet. A shared cron +periodically evaluates the cell's current value (computed server-side +by calling _get_pivot_data for the first matching pivot at that cell, +or by reading the static value from spreadsheet_raw) and fires a +Discuss/email notification when the threshold is crossed. + +Two trigger modes: + edge — notify only on the first evaluation that crosses the threshold + (stays silent until the condition resets and re-triggers) + level — notify on every cron cycle where the condition holds + +Operators: >, >=, <, <=, ==, != +""" + +import logging +import operator as _op + +from odoo import _, api, fields, models +from odoo.exceptions import ValidationError + +from .cell_ref import parse_cell_ref, read_cell_value + +_logger = logging.getLogger(__name__) + +_OPERATORS = [ + (">", "> (greater than)"), + (">=", ">= (greater or equal)"), + ("<", "< (less than)"), + ("<=", "<= (less or equal)"), + ("==", "== (equal to)"), + ("!=", "!= (not equal to)"), +] + +_TRIGGER_MODES = [ + ("edge", "Edge — notify once when threshold is first crossed"), + ("level", "Level — notify every cycle the condition holds"), +] + +# Maps operator selection values to Python comparison callables. +_OP_FUNCS = { + ">": _op.gt, + ">=": _op.ge, + "<": _op.lt, + "<=": _op.le, + "==": _op.eq, + "!=": _op.ne, +} + + +class SpreadsheetAlert(models.Model): + _name = "spreadsheet.alert" + _description = "Spreadsheet KPI Threshold Alert" + _inherit = ["mail.thread"] + _order = "spreadsheet_id, name" + + name = fields.Char(required=True, tracking=True) + spreadsheet_id = fields.Many2one( + "spreadsheet.spreadsheet", + required=True, + ondelete="cascade", + index=True, + ) + active = fields.Boolean(default=True, tracking=True) + + # ── Cell reference ──────────────────────────────────────────────────────── + sheet_name = fields.Char( + help="Sheet containing the watched cell (blank = first sheet).", + ) + cell_ref = fields.Char( + string="Cell Reference", + required=True, + help="Spreadsheet cell reference, e.g. B3 or D12.", + tracking=True, + ) + + # ── Threshold ──────────────────────────────────────────────────────────── + operator = fields.Selection( + _OPERATORS, + required=True, + default=">", + tracking=True, + ) + threshold = fields.Float(required=True, tracking=True) + + # ── Trigger mode ───────────────────────────────────────────────────────── + trigger_mode = fields.Selection( + _TRIGGER_MODES, + default="edge", + required=True, + tracking=True, + help=( + "Edge: notify once when the condition changes from False to True. " + "Level: notify every cron cycle the condition is True." + ), + ) + last_state = fields.Boolean( + default=False, + readonly=True, + copy=False, + help="Previous evaluation state (used for edge mode).", + ) + last_value = fields.Float(readonly=True, copy=False) + last_checked = fields.Datetime(readonly=True, copy=False) + + # ── Notification ───────────────────────────────────────────────────────── + notify_partner_ids = fields.Many2many( + "res.partner", + string="Notify Partners", + help="Notified by email when the threshold is crossed.", + ) + + @api.constrains("cell_ref") + def _check_cell_ref(self): + for rec in self: + col, _row = parse_cell_ref(rec.cell_ref.strip()) + if col is None: + raise ValidationError( + _( + "Cell reference %(cell_ref)s must be in the form" + " 'A1', 'B12', etc.", + cell_ref=rec.cell_ref, + ) + ) + + # ── Shared cron ─────────────────────────────────────────────────────────── + + @api.model + def _cron_evaluate_all(self): + """Called by the shared ir.cron: evaluate all active alerts.""" + alerts = self.search([("active", "=", True)]) + for alert in alerts: + try: + alert._evaluate() + except Exception: + _logger.exception( + "Failed to evaluate alert %s (%s)", alert.id, alert.name + ) + + # ── Single alert evaluation ─────────────────────────────────────────────── + + def _evaluate(self): + """ + Evaluate this alert's cell value and fire a notification if the + threshold condition is met (subject to trigger_mode). + """ + self.ensure_one() + value = self._read_cell_value() + if value is None: + _logger.debug( + "Alert %s: cell %s not found or non-numeric", self.id, self.cell_ref + ) + self.write({"last_checked": fields.Datetime.now()}) + return + + condition_met = self._check_condition(value) + now = fields.Datetime.now() + + should_notify = False + if self.trigger_mode == "level": + should_notify = condition_met + else: # edge + should_notify = condition_met and not self.last_state + + if should_notify: + self._fire_notification(value) + + self.write( + { + "last_state": condition_met, + "last_value": value, + "last_checked": now, + } + ) + + def _check_condition(self, value): + """Return True if value satisfies operator(value, threshold).""" + func = _OP_FUNCS.get(self.operator) + return func(value, self.threshold) if func else False + + def _read_cell_value(self): + """ + Read the current numeric value of the watched cell from spreadsheet_raw. + + Uses the shared ``read_cell_value`` helper to locate the cell, then + converts the result to float. Returns None if the cell is absent or + non-numeric. + + Note: For formula cells (=PIVOT(…)), the stored value in spreadsheet_raw + is whatever was last computed client-side and saved. + """ + raw = self.spreadsheet_id.sudo().spreadsheet_raw or {} + value = read_cell_value(raw, self.cell_ref.strip(), self.sheet_name or None) + if value is None: + return None + try: + return float(str(value).replace(",", ".")) + except (ValueError, TypeError): + return None + + def _fire_notification(self, value): + """Post a Chatter alert message and email subscribers.""" + op_label = dict(_OPERATORS).get(self.operator, self.operator) + base_url = self.env["ir.config_parameter"].sudo().get_param("web.base.url") + body = self.env["ir.qweb"]._render( + "spreadsheet_oca.spreadsheet_alert_notification_template", + { + "alert": self, + "value_str": f"{value:.4g}", + "op_label": op_label, + "threshold_str": f"{self.threshold:.4g}", + "base_url": base_url, + }, + ) + + self.spreadsheet_id.sudo().message_post( + body=body, + subject=_("KPI Alert: %(name)s", name=self.name), + partner_ids=self.notify_partner_ids.ids, + subtype_xmlid=( + "mail.mt_comment" if self.notify_partner_ids else "mail.mt_note" + ), + ) + + def action_evaluate_now(self): + """Manually trigger evaluation of this alert.""" + self.ensure_one() + self._evaluate() + + def action_reset_state(self): + """Reset last_state so an edge alert can trigger again.""" + self.write({"last_state": False}) diff --git a/spreadsheet_oca/models/spreadsheet_input_param.py b/spreadsheet_oca/models/spreadsheet_input_param.py new file mode 100644 index 00000000..b0c6b08d --- /dev/null +++ b/spreadsheet_oca/models/spreadsheet_input_param.py @@ -0,0 +1,152 @@ +# Copyright 2025 Ledo Enterprises LLC +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). +""" +Named input parameters. + +Users mark specific cells as named "input parameters." A parameter stores +the cell's current value so that: + + 1. Server-side domain substitution can reference it during scheduled refresh + (e.g. ``[("date", ">=", "%(start_date)s")]``). + 2. A future JS side-panel can re-query pivot data sources in real time when + an input cell changes (the JSON endpoint is already wired up). + +This is the Python backend layer only. The o-spreadsheet JS plugin work +required for live client-side re-query is a planned follow-on. +""" + +import logging +import re + +from odoo import _, api, fields, models +from odoo.exceptions import ValidationError + +from .cell_ref import parse_cell_key, read_cell_value + +_logger = logging.getLogger(__name__) + +# Valid parameter names: start with a lowercase letter, then lowercase letters, +# digits, or underscores. Mirrors Python %(name)s identifier conventions. +_NAME_RE = re.compile(r"^[a-z][a-z0-9_]*$") + + +class SpreadsheetInputParam(models.Model): + _name = "spreadsheet.input_param" + _description = "Spreadsheet Input Parameter" + _inherit = ["mail.thread"] + _order = "spreadsheet_id, name" + + name = fields.Char( + required=True, + tracking=True, + help=( + "Identifier used in domain templates as %(name)s.\n" + "Must start with a lowercase letter and contain only lowercase letters, " + "digits, and underscores." + ), + ) + spreadsheet_id = fields.Many2one( + "spreadsheet.spreadsheet", + required=True, + ondelete="cascade", + index=True, + ) + cell_ref = fields.Char( + string="Cell Reference", + required=True, + tracking=True, + help=( + "Cell to read the parameter value from.\n" + "Use a bare reference (e.g. B3) or include the sheet name " + "(e.g. Sheet1!B3)." + ), + ) + description = fields.Char( + help="Optional human note explaining what this parameter controls.", + ) + active = fields.Boolean(default=True, tracking=True) + + # ── Synced value ────────────────────────────────────────────────────────── + current_value = fields.Char( + readonly=True, + copy=False, + help="Last value read from the spreadsheet cell.", + ) + last_synced = fields.Datetime( + readonly=True, + copy=False, + ) + + # ── Unique name per spreadsheet ─────────────────────────────────────────── + _sql_constraints = [ + ( + "unique_name_per_spreadsheet", + "UNIQUE(spreadsheet_id, name)", + "A parameter with this name already exists for this spreadsheet.", + ), + ] + + # ── Constraints ─────────────────────────────────────────────────────────── + + @api.constrains("cell_ref") + def _check_cell_ref(self): + for rec in self: + if not rec.cell_ref: + continue + _sheet, col, row = parse_cell_key(rec.cell_ref.strip()) + if col is None: + raise ValidationError( + _("Cell reference %(ref)r is not valid. Use 'B3' or 'Sheet1!B3'.") + % {"ref": rec.cell_ref} + ) + + @api.constrains("name") + def _check_name(self): + for rec in self: + if rec.name and not _NAME_RE.match(rec.name): + raise ValidationError( + _( + "Parameter name %(name)r is not valid. " + "It must start with a lowercase letter and contain only " + "lowercase letters, digits, and underscores." + ) + % {"name": rec.name} + ) + + # ── Sync logic ──────────────────────────────────────────────────────────── + + def _sync_from_spreadsheet(self): + """Read this parameter's cell from spreadsheet_raw and store the value.""" + self.ensure_one() + raw = self.spreadsheet_id.sudo().spreadsheet_raw or {} + value = read_cell_value(raw, self.cell_ref.strip()) + now = fields.Datetime.now() + if value is not None: + self.write({"current_value": str(value), "last_synced": now}) + else: + self.write({"last_synced": now}) + + def action_sync_now(self): + """Manually trigger a sync for this parameter.""" + self.ensure_one() + self._sync_from_spreadsheet() + + @api.model + def _sync_all_for_spreadsheet(self, spreadsheet_id): + """Sync all active input parameters for the given spreadsheet record ID.""" + params = self.search( + [ + ("spreadsheet_id", "=", spreadsheet_id), + ("active", "=", True), + ] + ) + for param in params: + try: + param._sync_from_spreadsheet() + except Exception: + _logger.exception( + "Failed to sync input param %s (%s) for spreadsheet %s", + param.id, + param.name, + spreadsheet_id, + ) diff --git a/spreadsheet_oca/models/spreadsheet_refresh_schedule.py b/spreadsheet_oca/models/spreadsheet_refresh_schedule.py new file mode 100644 index 00000000..7141b194 --- /dev/null +++ b/spreadsheet_oca/models/spreadsheet_refresh_schedule.py @@ -0,0 +1,255 @@ +# Copyright 2025 Ledo Enterprises LLC +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). +""" +Scheduled data refresh. + +Allows users to configure a cron-based schedule that periodically: + 1. Reads all ODOO-type pivot definitions from a spreadsheet's JSON. + 2. Fetches fresh aggregate data via _get_pivot_data(). + 3. Posts a Chatter summary on the spreadsheet record and emails + subscribed partners. + +This fills a gap that neither Odoo CE nor Enterprise address: +auto-refresh without a user opening the browser. +""" + +import logging + +from markupsafe import Markup + +from odoo import _, api, fields, models +from odoo.exceptions import ValidationError + +from .pivot_data import collect_pivot_summaries, render_pivot_table_html + +_logger = logging.getLogger(__name__) + + +def _apply_param_substitution(domain, params): + """ + Replace ``%(name)s`` tokens in string leaf values of an Odoo domain list. + + Only the *value* position (index 2) of ``(field, operator, value)`` tuples + is touched — field names and operators are never modified. Nested domain + lists (e.g. ``["&", cond1, cond2]``) are handled recursively. + + *params* is a ``{name: value}`` dict (values are already strings). + Safe: substituted values come from the DB, not from user input at runtime. + + Example:: + + domain = [("date", ">=", "%(start_date)s")] + params = {"start_date": "2026-01-01"} + → [("date", ">=", "2026-01-01")] + """ + if not isinstance(domain, list): + return domain + result = [] + for item in domain: + if isinstance(item, tuple | list) and len(item) == 3: + field, op, value = item + if isinstance(value, str): + try: + value = value % params + except KeyError as exc: + _logger.warning( + "Domain substitution: unknown param %s — token left as-is", exc + ) + except TypeError: + _logger.debug( + "Domain substitution: value %r has lone %%", + value, + ) + result.append((field, op, value)) + elif isinstance(item, list): + result.append(_apply_param_substitution(item, params)) + else: + result.append(item) + return result + + +_INTERVAL_TYPES = [ + ("hours", "Hour(s)"), + ("days", "Day(s)"), + ("weeks", "Week(s)"), + ("months", "Month(s)"), +] + + +class SpreadsheetRefreshSchedule(models.Model): + _name = "spreadsheet.refresh.schedule" + _description = "Spreadsheet Scheduled Data Refresh" + _inherit = ["mail.thread"] + _order = "spreadsheet_id, name" + + name = fields.Char(required=True, tracking=True) + spreadsheet_id = fields.Many2one( + "spreadsheet.spreadsheet", + required=True, + ondelete="cascade", + index=True, + ) + active = fields.Boolean(default=True, tracking=True) + cron_id = fields.Many2one( + "ir.cron", + string="Cron Job", + ondelete="set null", + readonly=True, + copy=False, + ) + last_run = fields.Datetime(readonly=True, copy=False) + notify_partner_ids = fields.Many2many( + "res.partner", + string="Notify Partners", + help="These partners receive an email summary after each refresh.", + ) + interval_number = fields.Integer( + default=1, + string="Every", + tracking=True, + ) + interval_type = fields.Selection( + _INTERVAL_TYPES, + default="weeks", + string="Interval", + tracking=True, + required=True, + ) + + @api.constrains("interval_number") + def _check_interval_number(self): + for rec in self: + if rec.interval_number < 1: + raise ValidationError(_("Interval must be at least 1.")) + + # ── Cron lifecycle ──────────────────────────────────────────────────────── + + def action_activate(self): + """Create or reactivate the cron job for this schedule.""" + for rec in self: + if rec.cron_id: + rec.cron_id.sudo().write( + { + "active": True, + "interval_number": rec.interval_number, + "interval_type": rec.interval_type, + } + ) + else: + model_id = self.env["ir.model"].sudo()._get(self._name).id + cron = ( + self.env["ir.cron"] + .sudo() + .create( + { + "name": _( + "Spreadsheet Refresh: %(name)s", + name=rec.spreadsheet_id.name, + ), + "model_id": model_id, + "state": "code", + "code": f"model.browse({rec.id})._run_refresh()", + "interval_number": rec.interval_number, + "interval_type": rec.interval_type, + "active": True, + } + ) + ) + rec.cron_id = cron + + def action_deactivate(self): + """Pause (deactivate) the cron job without deleting it.""" + for rec in self: + if rec.cron_id: + rec.cron_id.sudo().write({"active": False}) + + def action_run_now(self): + """Manually trigger a refresh immediately.""" + self.ensure_one() + self._run_refresh() + + def unlink(self): + crons = self.mapped("cron_id").sudo() + result = super().unlink() + crons.unlink() + return result + + # ── Refresh execution ──────────────────────────────────────────────────── + + def _run_refresh(self): + """ + Execute one refresh cycle: + - Read pivot definitions from spreadsheet_raw JSON. + - Compute fresh data for each ODOO pivot. + - Post Chatter summary; email notify_partner_ids. + - Record last_run timestamp. + """ + self.ensure_one() + spreadsheet = self.spreadsheet_id + + # Sync input parameters and build substitution dict before processing pivots. + self.env["spreadsheet.input_param"]._sync_all_for_spreadsheet(spreadsheet.id) + input_params = self.env["spreadsheet.input_param"].search( + [ + ("spreadsheet_id", "=", spreadsheet.id), + ("active", "=", True), + ] + ) + param_dict = {p.name: p.current_value or "" for p in input_params} + + raw = spreadsheet.sudo().spreadsheet_raw or {} + + if not raw.get("pivots"): + _logger.info( + "Spreadsheet refresh %s: no pivots found in spreadsheet %s", + self.id, + spreadsheet.id, + ) + self.sudo().write({"last_run": fields.Datetime.now()}) + return + + summaries, failed_pivot_names = collect_pivot_summaries( + self.env, + raw, + domain_transform=lambda d: _apply_param_substitution(d, param_dict), + ) + + body = self._render_refresh_html(summaries) + partner_ids = self.notify_partner_ids.ids + + spreadsheet.sudo().message_post( + body=body, + subject=_("Data refresh: %(name)s", name=spreadsheet.name), + partner_ids=partner_ids, + subtype_xmlid="mail.mt_comment" if partner_ids else "mail.mt_note", + ) + + if failed_pivot_names: + warning_body = self.env["ir.qweb"]._render( + "spreadsheet_oca.spreadsheet_refresh_warning_template", + { + "schedule_name": self.name, + "failed_names": failed_pivot_names, + }, + ) + spreadsheet.sudo().message_post( + body=warning_body, + subtype_xmlid="mail.mt_note", + ) + + self.sudo().write({"last_run": fields.Datetime.now()}) + + # ── HTML rendering ──────────────────────────────────────────────────────── + + @api.model + def _render_refresh_html(self, summaries): + """Render a compact HTML summary of fresh pivot data. + + Uses the QWeb template ``spreadsheet_refresh_notification_template`` + which can be customised via Settings > Technical > Views. + """ + pivot_html_list = [Markup(render_pivot_table_html(s)) for s in summaries] + return self.env["ir.qweb"]._render( + "spreadsheet_oca.spreadsheet_refresh_notification_template", + {"pivot_html_list": pivot_html_list}, + ) diff --git a/spreadsheet_oca/models/spreadsheet_scenario.py b/spreadsheet_oca/models/spreadsheet_scenario.py new file mode 100644 index 00000000..b564ae2a --- /dev/null +++ b/spreadsheet_oca/models/spreadsheet_scenario.py @@ -0,0 +1,315 @@ +# Copyright 2025 Ledo Enterprises LLC +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). +""" +Named scenarios / what-if manager. + +A *scenario* stores a named set of cell overrides for a spreadsheet. +Users can define multiple "what-if" variants (e.g. "Optimistic", "Pessimistic", +"Base Case") without duplicating the spreadsheet. + +The ``action_apply_to_copy()`` method materialises a scenario into a new +spreadsheet record with the overrides written into the raw JSON — giving +immediate value with no JavaScript required. + +Long-term target: a JS side-panel overlay that applies overrides live in +the o-spreadsheet editor without creating a copy. +""" + +import copy +import json + +from odoo import _, api, fields, models +from odoo.exceptions import ValidationError + +from .cell_ref import ( + _idx_to_cell_address, + _resolve_sheet, + parse_cell_key, + read_cell_value, +) + + +class SpreadsheetScenario(models.Model): + _name = "spreadsheet.scenario" + _description = "Spreadsheet What-If Scenario" + _inherit = ["mail.thread"] + _order = "spreadsheet_id, is_base desc, name" + + name = fields.Char(required=True, tracking=True) + spreadsheet_id = fields.Many2one( + "spreadsheet.spreadsheet", + required=True, + ondelete="cascade", + index=True, + ) + description = fields.Char( + help="Brief note on what this scenario represents", + ) + is_base = fields.Boolean( + default=False, + tracking=True, + help="Mark as the baseline scenario. Only one active scenario per spreadsheet " + "may be designated as the base.", + ) + active = fields.Boolean(default=True) + cell_overrides = fields.Text( + help=( + "JSON dict mapping cell references to override values.\n" + "Supported formats:\n" + ' {"Sheet1!B3": 125000, "C5": 0.15, "D7": "text", "E9": null}\n' + "Keys may include an optional sheet name prefix (SheetName!ColRow) " + "or use a bare column+row reference that applies to the first sheet.\n" + "Values must be numbers, strings, booleans, or null." + ), + ) + override_count = fields.Integer( + compute="_compute_override_count", + store=False, + string="# Overrides", + ) + + # ── Computed fields ─────────────────────────────────────────────────────── + + def _compute_override_count(self): + for rec in self: + raw = rec.cell_overrides + if raw and raw.strip(): + try: + parsed = json.loads(raw) + rec.override_count = len(parsed) if isinstance(parsed, dict) else 0 + except (ValueError, TypeError): + rec.override_count = 0 + else: + rec.override_count = 0 + + # ── Constraints ─────────────────────────────────────────────────────────── + + @api.constrains("is_base", "spreadsheet_id") + def _check_single_base(self): + """Ensure at most one active base scenario exists per spreadsheet.""" + for rec in self: + if not rec.is_base: + continue + conflict = self.search( + [ + ("spreadsheet_id", "=", rec.spreadsheet_id.id), + ("is_base", "=", True), + ("active", "=", True), + ("id", "!=", rec.id), + ], + limit=1, + ) + if conflict: + raise ValidationError( + _( + "Spreadsheet %(sheet)s already has a base" + " scenario: %(base)s. " + "Only one active scenario per spreadsheet" + " can be marked as base. " + "Please unmark the existing base" + " scenario first.", + sheet=rec.spreadsheet_id.name, + base=conflict.name, + ) + ) + + @api.constrains("cell_overrides") + def _check_cell_overrides(self): + """Validate cell_overrides JSON structure and cell reference format.""" + for rec in self: + raw = rec.cell_overrides + if not raw or not raw.strip(): + continue + + # Must be valid JSON. + try: + parsed = json.loads(raw) + except (ValueError, TypeError) as exc: + raise ValidationError( + _("Cell overrides is not valid JSON: %(error)s", error=exc) + ) from exc + + # Top-level must be a dict. + if not isinstance(parsed, dict): + raise ValidationError( + _( + "Cell overrides must be a JSON object (dict), " + 'e.g. {"B3": 125000}. Got: %(type_name)s', + type_name=type(parsed).__name__, + ) + ) + + # Validate each key and value. + for key, value in parsed.items(): + _sheet, col_idx, _row = parse_cell_key(key) + if col_idx is None: + raise ValidationError( + _( + "Invalid cell reference key %(key)r in cell overrides. " + "Expected format: 'B3' or 'Sheet1!B3' " + "(letters followed by a positive integer, " + "with an optional 'SheetName!' prefix).", + key=key, + ) + ) + if value is not None and not isinstance( + value, int | float | str | bool + ): + raise ValidationError( + _( + "Invalid value %(value)r for key" + " %(key)r in cell overrides. " + "Values must be numbers, strings," + " booleans, or null.", + value=value, + key=key, + ) + ) + + # ── Actions ─────────────────────────────────────────────────────────────── + + def action_apply_to_copy(self): + """Create a new spreadsheet with this scenario's cell overrides applied. + + Returns an ir.actions.act_window pointing to the new spreadsheet record. + Does not modify the original spreadsheet. + + Implementation note: + The current approach deep-copies spreadsheet_raw (the o-spreadsheet JSON + blob) and writes the overridden values directly into the cell map, then + creates a new ``spreadsheet.spreadsheet`` record with the modified JSON. + This gives immediate value with no frontend JavaScript required. + + Long-term target: a JS side-panel that applies overrides live in the + o-spreadsheet editor without creating a persistent copy. + """ + self.ensure_one() + spreadsheet = self.spreadsheet_id + + # Parse overrides (empty → nothing to apply, but we still create a copy). + overrides = {} + raw_overrides = self.cell_overrides + if raw_overrides and raw_overrides.strip(): + overrides = json.loads(raw_overrides) + + # Deep-copy the source raw JSON so we never mutate the original. + source_raw = spreadsheet.sudo().spreadsheet_raw or {} + new_raw = copy.deepcopy(source_raw) + + sheets = new_raw.get("sheets", []) + + for key, value in overrides.items(): + sheet_name, col_idx, row_idx = parse_cell_key(key) + if col_idx is None: + # Already validated by constraint, but be defensive. + continue + + # Resolve the target sheet (case-insensitive, falls back to first). + target_sheet = _resolve_sheet(sheets, sheet_name) + if target_sheet is None: + continue + + # Write cell using flat "A1" format (o-spreadsheet native). + cells = target_sheet.setdefault("cells", {}) + cell_addr = _idx_to_cell_address(col_idx, row_idx) + + # o-spreadsheet cell data: preserve existing keys (style, format, …) + # but update/set 'content' to the override value. + cell_data = cells.setdefault(cell_addr, {}) + cell_data["content"] = str(value) if value is not None else "" + + new_spreadsheet = self.env["spreadsheet.spreadsheet"].create( + { + "name": _( + "%(sheet)s \u2014 %(scenario)s", + sheet=spreadsheet.name, + scenario=self.name, + ), + "spreadsheet_raw": new_raw, + } + ) + + return new_spreadsheet.get_formview_action() + + def action_export_comparison(self): + """Generate a comparison of this scenario vs the base values. + + Reads base (current) cell values from spreadsheet_raw for each key in + this scenario's overrides and returns a display_notification action + with an HTML summary showing: cell ref, base value, override value. + + If no base scenario is found, the comparison is made against the raw + values currently stored in the spreadsheet. + """ + self.ensure_one() + overrides = {} + raw_overrides = self.cell_overrides + if raw_overrides and raw_overrides.strip(): + overrides = json.loads(raw_overrides) + + if not overrides: + return { + "type": "ir.actions.client", + "tag": "display_notification", + "params": { + "title": _("No Overrides"), + "message": _("This scenario has no cell overrides defined."), + "type": "info", + "sticky": False, + }, + } + + source_raw = self.spreadsheet_id.sudo().spreadsheet_raw or {} + + rows_html = [] + for key, override_val in overrides.items(): + sheet_name, col_idx, row_idx = parse_cell_key(key) + if col_idx is None: + continue + # Reconstruct a cell_ref string that read_cell_value can parse. + addr = key if not sheet_name else key.split("!", 1)[1] + base_val = read_cell_value(source_raw, addr, sheet_name or None) + base_display = str(base_val) if base_val is not None else _("(empty)") + override_display = ( + str(override_val) if override_val is not None else _("(null/clear)") + ) + td_style = "padding:3px 8px;border:1px solid #ddd" + rows_html.append( + "" + f"{key}" + f"{base_display}" + f"{override_display}" + "" + ) + + th_style = "padding:4px 8px;border:1px solid #ccc;background:#f0f0f0" + table = ( + "" + "" + f"" + f"" + f"" + "" + "" + "".join(rows_html) + "" + "
CellBase ValueOverride
" + ) + message = ( + _( + "

Scenario %(name)s overrides %(count)d cell(s):

", + name=self.name, + count=len(overrides), + ) + + table + ) + + return { + "type": "ir.actions.client", + "tag": "display_notification", + "params": { + "title": _("Scenario Comparison: %(name)s", name=self.name), + "message": message, + "type": "info", + "sticky": True, + }, + } diff --git a/spreadsheet_oca/models/spreadsheet_spreadsheet.py b/spreadsheet_oca/models/spreadsheet_spreadsheet.py index 55a9ae9f..17567350 100644 --- a/spreadsheet_oca/models/spreadsheet_spreadsheet.py +++ b/spreadsheet_oca/models/spreadsheet_spreadsheet.py @@ -6,6 +6,7 @@ from io import BytesIO from odoo import _, api, fields, models +from odoo.exceptions import UserError class SpreadsheetSpreadsheet(models.Model): @@ -56,10 +57,264 @@ class SpreadsheetSpreadsheet(models.Model): string="Tags", comodel_name="spreadsheet.spreadsheet.tag" ) + # ── DRY helper for read_group-based count fields ───────────────────────── + + def _compute_related_count(self, comodel, field_name, extra_domain=None): + """Compute a count field by grouping *comodel* on ``spreadsheet_id``. + + By default the domain filters on ``active=True``; pass *extra_domain* + to override (e.g. ``[("status", "!=", "error")]`` for writeback logs). + """ + domain = [("spreadsheet_id", "in", self.ids)] + if extra_domain is not None: + domain += extra_domain + else: + domain.append(("active", "=", True)) + counts = self.env[comodel].read_group( + domain, ["spreadsheet_id"], ["spreadsheet_id"] + ) + count_map = {c["spreadsheet_id"][0]: c["spreadsheet_id_count"] for c in counts} + for rec in self: + rec[field_name] = count_map.get(rec.id, 0) + + refresh_schedule_count = fields.Integer( + compute="_compute_refresh_schedule_count", string="Refresh Schedules" + ) + + def _compute_refresh_schedule_count(self): + self._compute_related_count( + "spreadsheet.refresh.schedule", "refresh_schedule_count" + ) + + alert_count = fields.Integer(compute="_compute_alert_count", string="KPI Alerts") + + def _compute_alert_count(self): + self._compute_related_count("spreadsheet.alert", "alert_count") + + subscriber_count = fields.Integer( + compute="_compute_subscriber_count", string="Subscribers" + ) + + def _compute_subscriber_count(self): + self._compute_related_count("spreadsheet.subscription", "subscriber_count") + + def action_open_alerts(self): + self.ensure_one() + return { + "type": "ir.actions.act_window", + "name": _("KPI Alerts"), + "res_model": "spreadsheet.alert", + "view_mode": "list,form", + "domain": [("spreadsheet_id", "=", self.id)], + "context": {"default_spreadsheet_id": self.id}, + } + + def action_open_refresh_schedules(self): + self.ensure_one() + return { + "type": "ir.actions.act_window", + "name": _("Refresh Schedules"), + "res_model": "spreadsheet.refresh.schedule", + "view_mode": "list,form", + "domain": [("spreadsheet_id", "=", self.id)], + "context": {"default_spreadsheet_id": self.id}, + } + + def action_open_subscriptions(self): + self.ensure_one() + return { + "type": "ir.actions.act_window", + "name": _("Subscribers"), + "res_model": "spreadsheet.subscription", + "view_mode": "list,form", + "domain": [("spreadsheet_id", "=", self.id)], + "context": {"default_spreadsheet_id": self.id}, + } + + scenario_count = fields.Integer( + compute="_compute_scenario_count", string="What-If Scenarios" + ) + + def _compute_scenario_count(self): + self._compute_related_count("spreadsheet.scenario", "scenario_count") + + def action_open_scenarios(self): + self.ensure_one() + return { + "type": "ir.actions.act_window", + "name": _("What-If Scenarios"), + "res_model": "spreadsheet.scenario", + "view_mode": "list,form", + "domain": [("spreadsheet_id", "=", self.id)], + "context": {"default_spreadsheet_id": self.id}, + } + + input_param_count = fields.Integer( + compute="_compute_input_param_count", string="Input Parameters" + ) + + def _compute_input_param_count(self): + self._compute_related_count("spreadsheet.input_param", "input_param_count") + + def action_open_input_params(self): + self.ensure_one() + return { + "type": "ir.actions.act_window", + "name": _("Input Parameters"), + "res_model": "spreadsheet.input_param", + "view_mode": "list,form", + "domain": [("spreadsheet_id", "=", self.id)], + "context": { + "default_spreadsheet_id": self.id, + "search_default_active": 1, + }, + } + @api.depends("name") def _compute_filename(self): for record in self: - record.filename = "%s.json" % (self.name or _("Unnamed")) + record.filename = f"{record.name or _('Unnamed')}.json" + + # ── Writeback ───────────────────────────────────────────────────────────── + writeback_enabled = fields.Boolean( + default=False, + tracking=True, + help=( + "Allow users to write Odoo record values directly from this " + "spreadsheet's List views. Each change is logged and reversible." + ), + ) + writeback_log_count = fields.Integer( + compute="_compute_writeback_log_count", + ) + + def _compute_writeback_log_count(self): + self._compute_related_count( + "spreadsheet.writeback.log", + "writeback_log_count", + extra_domain=[("status", "!=", "error")], + ) + + def action_open_writeback_log(self): + self.ensure_one() + return { + "type": "ir.actions.act_window", + "name": _("Writeback Log"), + "res_model": "spreadsheet.writeback.log", + "view_mode": "list,form", + "domain": [("spreadsheet_id", "=", self.id)], + "context": {"default_spreadsheet_id": self.id}, + } + + @api.model + def action_rollback_writeback(self, log_id): + log = self.env["spreadsheet.writeback.log"].sudo().browse(log_id) + if not log.exists(): + raise UserError( + _("Writeback log entry %(log_id)d not found.", log_id=log_id) + ) + + if log.old_value is False or log.old_value is None: + raise UserError( + _( + "Cannot roll back log %(log_id)d: previous value is unknown.", + log_id=log_id, + ) + ) + + if log.res_model not in self.env: + raise UserError( + _( + "Cannot roll back log %(log_id)d:" + " model %(model)r is not available.", + log_id=log_id, + model=log.res_model, + ) + ) + + # Access check: ensure the calling user has write permission on the + # target model/record before we escalate to sudo(). + self.env[log.res_model].check_access("write") + record = self.env[log.res_model].browse(log.record_id) + if not record.exists(): + raise UserError( + _( + "Cannot roll back log %(log_id)d: record " + "%(model)s(%(record_id)d) no longer exists.", + log_id=log_id, + model=log.res_model, + record_id=log.record_id, + ) + ) + record.check_access("write") + + record.write({log.field_name: log.old_value}) + log.write({"status": "rolled_back"}) + + spreadsheet = log.spreadsheet_id.sudo() + spreadsheet.message_post( + body=_( + "Writeback rolled back: field %(field)s on " + "%(model)s #%(record_id)d restored to " + "%(old_value)s (was %(new_value)s).", + field=log.field_name, + model=log.res_model, + record_id=log.record_id, + old_value=log.old_value, + new_value=log.new_value, + ), + subtype_xmlid="mail.mt_note", + ) + + return True + + # ── XLSX Export ─────────────────────────────────────────────────────────── + def action_export_xlsx(self): + """Export this spreadsheet as .xlsx and return a download action.""" + from .spreadsheet_xlsx_export import SpreadsheetXlsxExporter + + self.ensure_one() + exporter = SpreadsheetXlsxExporter(self.env, self) + xlsx_bytes = exporter.render() + + filename = f"{self.name or 'spreadsheet'}.xlsx" + attachment = self.env["ir.attachment"].create( + { + "name": filename, + "type": "binary", + "datas": base64.b64encode(xlsx_bytes), + "mimetype": ( + "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" + ), + "res_model": self._name, + "res_id": self.id, + } + ) + return { + "type": "ir.actions.act_url", + "url": f"/web/content/{attachment.id}?download=true", + "target": "self", + } + + @api.model + def get_xlsx_bytes(self, spreadsheet_id): + """Return raw .xlsx bytes (base64) for a spreadsheet.""" + from .spreadsheet_xlsx_export import SpreadsheetXlsxExporter + + spreadsheet = self.browse(spreadsheet_id) + spreadsheet.check_access("read") + exporter = SpreadsheetXlsxExporter(self.env, spreadsheet) + return base64.b64encode(exporter.render()).decode() + + # ── Pivot Data ──────────────────────────────────────────────────────────── + @api.model + def get_pivot_data(self, model_name, domain, context, row_dims, col_dims, measures): + """Return pivot table data computed server-side (JSON-RPC entry point).""" + from .pivot_data import _get_pivot_data + + return _get_pivot_data( + self.env, model_name, domain, context, row_dims, col_dims, measures + ) def create_document_from_attachment(self, attachment_ids): attachments = self.env["ir.attachment"].browse(attachment_ids) diff --git a/spreadsheet_oca/models/spreadsheet_subscription.py b/spreadsheet_oca/models/spreadsheet_subscription.py new file mode 100644 index 00000000..a61ae8ab --- /dev/null +++ b/spreadsheet_oca/models/spreadsheet_subscription.py @@ -0,0 +1,188 @@ +# Copyright 2025 Ledo Enterprises LLC +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). +""" +Dashboard subscriptions. + +Allows partners to subscribe to a periodic email digest of a spreadsheet. +A shared daily cron evaluates all active subscriptions and sends those that +are due (based on frequency and last_sent timestamp). + +Each digest email optionally includes a compact pivot data summary rendered +with inline CSS, suitable for email clients. +""" + +import logging +from datetime import timedelta + +from markupsafe import Markup + +from odoo import _, api, fields, models + +from .pivot_data import collect_pivot_summaries, render_pivot_table_html + +_logger = logging.getLogger(__name__) + +_FREQUENCY_SELECTION = [ + ("daily", "Daily"), + ("weekly", "Weekly"), + ("monthly", "Monthly"), +] + +_FREQUENCY_DELTA = { + "daily": timedelta(days=1), + "weekly": timedelta(weeks=1), + "monthly": timedelta(days=30), +} + + +class SpreadsheetSubscription(models.Model): + _name = "spreadsheet.subscription" + _description = "Spreadsheet Dashboard Subscription" + _inherit = ["mail.thread"] + _order = "spreadsheet_id, partner_id" + + name = fields.Char( + compute="_compute_name", + store=True, + readonly=False, + ) + spreadsheet_id = fields.Many2one( + "spreadsheet.spreadsheet", + required=True, + ondelete="cascade", + index=True, + ) + partner_id = fields.Many2one( + "res.partner", + string="Subscriber", + required=True, + ondelete="restrict", + index=True, + ) + active = fields.Boolean(default=True, tracking=True) + frequency = fields.Selection( + _FREQUENCY_SELECTION, + default="weekly", + required=True, + tracking=True, + ) + last_sent = fields.Datetime(readonly=True, copy=False) + include_pivot_data = fields.Boolean( + default=True, + help="Include a live pivot data summary in the email.", + ) + + _sql_constraints = [ + ( + "unique_spreadsheet_partner", + "UNIQUE(spreadsheet_id, partner_id)", + "A partner can only have one subscription per spreadsheet.", + ), + ] + + # ── Default name computation ─────────────────────────────────────────────── + + @api.depends("spreadsheet_id", "partner_id") + def _compute_name(self): + for rec in self: + spreadsheet_name = rec.spreadsheet_id.name or _("Unnamed Spreadsheet") + partner_name = rec.partner_id.name or _("Unknown Partner") + rec.name = f"{spreadsheet_name} — {partner_name}" + + # ── Shared cron ─────────────────────────────────────────────────────────── + + @api.model + def _cron_send_digests(self): + """Called by the shared ir.cron: send digests that are due.""" + active_subs = self.search([("active", "=", True)]) + now = fields.Datetime.now() + for sub in active_subs: + try: + sub._send_digest_if_due(now) + except Exception: + _logger.exception( + "Failed to send digest for subscription %s (%s)", + sub.id, + sub.name, + ) + + def _send_digest_if_due(self, now=None): + """Send this subscription's digest if it is due, otherwise skip.""" + self.ensure_one() + if now is None: + now = fields.Datetime.now() + delta = _FREQUENCY_DELTA.get(self.frequency, timedelta(weeks=1)) + if self.last_sent and (now - self.last_sent) < delta: + return # not due yet + self._send_digest() + + # ── Digest sending ──────────────────────────────────────────────────────── + + def _send_digest(self): + """Generate and send a digest email for this subscription.""" + self.ensure_one() + spreadsheet = self.spreadsheet_id.sudo() + summaries = [] + + if self.include_pivot_data: + raw = spreadsheet.spreadsheet_raw or {} + summaries, _failed = collect_pivot_summaries(self.env, raw) + + body_html = self._render_digest_html(spreadsheet, summaries) + subject = _("Spreadsheet Digest: %(name)s", name=spreadsheet.name) + + partner = self.partner_id + email_to = partner.email + if not email_to: + _logger.warning( + "Subscription %s: partner %s has no email — skipping", + self.id, + partner.name, + ) + return + + self.env["mail.mail"].sudo().create( + { + "subject": subject, + "body_html": body_html, + "email_to": email_to, + } + ).send() + + self.sudo().write({"last_sent": fields.Datetime.now()}) + + # ── HTML rendering ──────────────────────────────────────────────────────── + + def _render_digest_html(self, spreadsheet, summaries): + """Return a styled HTML email body for the digest. + + Uses the QWeb template ``spreadsheet_subscription_digest_template`` + which can be customised via Settings > Technical > Views. + + Args: + spreadsheet: browse record of spreadsheet.spreadsheet (already sudo'd). + summaries: list of {"name", "model", "result"} dicts from _get_pivot_data. + + Returns: + str — complete HTML body suitable for sending via mail.mail. + """ + now_str = fields.Datetime.now().strftime("%Y-%m-%d %H:%M UTC") + base_url = self.env["ir.config_parameter"].sudo().get_param("web.base.url") + pivot_html_list = [Markup(render_pivot_table_html(s)) for s in summaries] + return self.env["ir.qweb"]._render( + "spreadsheet_oca.spreadsheet_subscription_digest_template", + { + "spreadsheet": spreadsheet, + "now_str": now_str, + "summaries": summaries, + "pivot_html_list": pivot_html_list, + "base_url": base_url, + }, + ) + + # ── Manual send ─────────────────────────────────────────────────────────── + + def action_send_now(self): + """Manually send the digest immediately, bypassing the due check.""" + self.ensure_one() + self._send_digest() diff --git a/spreadsheet_oca/models/spreadsheet_writeback.py b/spreadsheet_oca/models/spreadsheet_writeback.py new file mode 100644 index 00000000..753999ec --- /dev/null +++ b/spreadsheet_oca/models/spreadsheet_writeback.py @@ -0,0 +1,80 @@ +# Copyright 2025 Ledo Enterprises LLC +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). +""" +Cell writeback: edit list cells to update Odoo records. + +Allows users to write Odoo record field values directly from a +spreadsheet's List view. Each cell edit in the browser posts to +/spreadsheet/writeback (controllers/spreadsheet_writeback.py) which +calls the target model's write() and records an audit log entry here. + +JavaScript integration (not yet implemented): the JS list cell-edit +handler will POST to /spreadsheet/writeback with the spreadsheet_id, +model, record_id, field_name and new_value. The controller returns a +JSON dict with {success, old_value, new_value, log_id} or {error}. + +Limitations of the old_value capture: + - Field values are converted to str() before storage in the Char + field. For Many2one fields str() gives e.g. "product.product(42,)" + which is not directly re-writable; rollback of many2one fields is + therefore only possible for integer / char / float / selection fields + where str→original type conversion is unambiguous. + - The rollback helper (action_rollback_writeback) writes old_value as + a raw string; callers that need type-safe rollback for relational + fields should implement their own conversion before calling write(). +""" + +import logging + +from odoo import fields, models + +_logger = logging.getLogger(__name__) + + +class SpreadsheetWritebackLog(models.Model): + _name = "spreadsheet.writeback.log" + _description = "Spreadsheet Writeback Audit Log" + _order = "writeback_at desc" + + spreadsheet_id = fields.Many2one( + "spreadsheet.spreadsheet", + required=True, + ondelete="cascade", + index=True, + string="Spreadsheet", + ) + res_model = fields.Char(required=True, string="Model") + record_id = fields.Integer(required=True) + field_name = fields.Char(required=True, string="Field") + old_value = fields.Char(string="Previous Value") + new_value = fields.Char(required=True) + user_id = fields.Many2one( + "res.users", + default=lambda self: self.env.user, + readonly=True, + ) + writeback_at = fields.Datetime( + default=fields.Datetime.now, + readonly=True, + string="Written At", + ) + status = fields.Selection( + [ + ("ok", "Success"), + ("error", "Error"), + ("rolled_back", "Rolled Back"), + ], + default="ok", + ) + error_message = fields.Char(string="Error") + + def action_rollback(self): + """ + Roll back this log entry by restoring old_value to the target record. + + Called from the form view "Roll Back" button (type="object"). + Delegates to SpreadsheetSpreadsheet.action_rollback_writeback so the + rollback logic lives in one place. + """ + self.ensure_one() + self.spreadsheet_id.action_rollback_writeback(self.id) diff --git a/spreadsheet_oca/models/spreadsheet_xlsx_export.py b/spreadsheet_oca/models/spreadsheet_xlsx_export.py new file mode 100644 index 00000000..578c9617 --- /dev/null +++ b/spreadsheet_oca/models/spreadsheet_xlsx_export.py @@ -0,0 +1,467 @@ +# Copyright 2025 Ledo Enterprises LLC +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). +""" +Headless XLSX export. + +Server-side generation of .xlsx files from spreadsheet.spreadsheet records, +without requiring a browser session. Addresses the gap described in +odoo/o-spreadsheet Issue #8061 (filed 2026-03-06). + +Two rendering strategies: + 1. Static cells: reads cell values from spreadsheet_raw JSON and writes + them verbatim to the worksheet (preserves text, numbers, booleans). + 2. Pivot sheets: for each ODOO pivot in the spreadsheet JSON, a dedicated + worksheet is generated with fresh data from _get_pivot_data(). This + ensures exported pivots reflect the current Odoo database state rather + than the snapshot saved client-side. + +The result is attached to the spreadsheet's Chatter and/or returned as an +ir.actions.act_url download. + +Usage from Python: + xlsx_bytes = SpreadsheetXlsxExporter(env, spreadsheet).render() + +Usage from Odoo UI: + spreadsheet.action_export_xlsx() # returns download action +""" + +import io +import logging + +import openpyxl +from openpyxl.styles import Alignment, Font, PatternFill +from openpyxl.utils import get_column_letter + +from odoo import _ + +from .pivot_data import collect_pivot_summaries + +_logger = logging.getLogger(__name__) + +# Header row style for pivot sheets +_PIVOT_HEADER_FILL = PatternFill( + start_color="4472C4", end_color="4472C4", fill_type="solid" +) +_PIVOT_HEADER_FONT = Font(color="FFFFFF", bold=True) +_PIVOT_SUBHEADER_FILL = PatternFill( + start_color="D6DCF0", end_color="D6DCF0", fill_type="solid" +) +_PIVOT_SUBHEADER_FONT = Font(bold=True) +_PIVOT_TOTAL_FONT = Font(bold=True, italic=True) + + +class SpreadsheetXlsxExporter: + """ + Renders a spreadsheet.spreadsheet record to an openpyxl Workbook. + + Call .render() to get a bytes object suitable for attachment or download. + """ + + def __init__(self, env, spreadsheet): + self.env = env + self.spreadsheet = spreadsheet + self.raw = spreadsheet.sudo().spreadsheet_raw or {} + + def render(self): + """Return the workbook as a bytes object.""" + wb = openpyxl.Workbook() + wb.remove(wb.active) # remove default empty sheet + + sheets = self.raw.get("sheets", []) + + # ── Render static sheet(s) ──────────────────────────────────────────── + for sheet_def in sheets: + sheet_name = sheet_def.get("name", "Sheet")[:31] # Excel limit + ws = wb.create_sheet(title=sheet_name) + self._render_static_sheet(ws, sheet_def) + + # ── Render one worksheet per ODOO pivot (with fresh data) ───────────── + summaries, _failed = collect_pivot_summaries(self.env, self.raw) + for summary in summaries: + pivot_name = summary["name"] + ws_name = (pivot_name[:28] + " +") if len(pivot_name) > 28 else pivot_name + # Deduplicate sheet names (Excel requires unique names) + existing = [s.title for s in wb.worksheets] + if ws_name in existing: + ws_name = f"{ws_name[:27]}_dup" + ws = wb.create_sheet(title=ws_name) + self._render_pivot_sheet_from_result(ws, summary) + + if not wb.worksheets: + ws = wb.create_sheet(title="Empty") + ws["A1"] = _("This spreadsheet has no sheets.") + + buf = io.BytesIO() + wb.save(buf) + return buf.getvalue() + + # ── Static sheet renderer ───────────────────────────────────────────────── + + def _render_static_sheet(self, ws, sheet_def): + """Copy static cell values from the sheet JSON to the worksheet. + + Cells are stored as a flat dict keyed by cell address strings + (e.g. ``"A1"``, ``"B3"``, ``"AA12"``), matching the o-spreadsheet + native format. + """ + cells = sheet_def.get("cells", {}) + # cells is {"A1": {content, style, ...}, "B3": {...}, ...} + for cell_addr, cell_data in cells.items(): + if not isinstance(cell_data, dict): + continue + # Parse cell address to (col_idx, row_idx) using our helper + from .cell_ref import parse_cell_ref + + col_idx, row_idx = parse_cell_ref(cell_addr) + if col_idx is None: + continue + content = cell_data.get("content", "") + if content is None or content == "": + continue + # openpyxl uses 1-based indices + xl_row = row_idx + 1 + xl_col = col_idx + 1 + # Strip leading "=" for formula cells — write as string + # (server-side we can't evaluate formulas) + if isinstance(content, str) and content.startswith("="): + # Write formula placeholder so user can see what was there + ws.cell(row=xl_row, column=xl_col).value = content + else: + # Try numeric conversion + ws.cell(row=xl_row, column=xl_col).value = _coerce_value(content) + + # Apply column auto-width (rough estimate) + _auto_width(ws) + + # ── Pivot sheet renderer ────────────────────────────────────────────────── + + def _render_pivot_sheet_from_result(self, ws, summary): + """Render a pre-computed pivot summary as a formatted Excel table.""" + display_name = summary["name"] + model_name = summary["model"] + result = summary["result"] + + row_dims = result.get("rowDimensions", []) + col_dims = result.get("colDimensions", []) + groups = result.get("groups", []) + measure_specs = result.get("measureSpecs", []) + + # Title row + title_cell = ws.cell(row=1, column=1, value=display_name) + title_cell.font = Font(bold=True, size=13) + ws.merge_cells( + start_row=1, + start_column=1, + end_row=1, + end_column=max(1, len(row_dims) + len(col_dims) + len(measure_specs)), + ) + + # Subtitle: model + domain + model_label = model_name + try: + model_label = self.env["ir.model"]._get(model_name).name or model_name + except Exception: + _logger.debug("Could not resolve model label for %s", model_name) + ws.cell(row=2, column=1, value=f"{model_label}").font = Font( + italic=True, color="666666" + ) + current_row = 4 + + # Build a flat table: row_headers | col_headers | measures + if not row_dims and not col_dims: + # Grand total only + current_row = self._write_grand_total( + ws, groups, measure_specs, current_row + ) + elif not col_dims: + # Row-only pivot (simple breakdown) + current_row = self._write_row_pivot( + ws, groups, row_dims, measure_specs, current_row + ) + else: + # Full cross-tab pivot + current_row = self._write_crosstab( + ws, groups, row_dims, col_dims, measure_specs, current_row + ) + + _auto_width(ws) + + def _write_grand_total(self, ws, groups, measure_specs, start_row): + totals = [g for g in groups if not g["rowGroupBy"] and not g["colGroupBy"]] + if not totals: + return start_row + gt = totals[0] + # Header + headers = ( + ["Total"] + [_format_measure_name(m) for m in measure_specs] + ["Count"] + ) + for ci, h in enumerate(headers, 1): + cell = ws.cell(row=start_row, column=ci, value=h) + cell.fill = _PIVOT_HEADER_FILL + cell.font = _PIVOT_HEADER_FONT + # Values + row_vals = [_("Grand Total")] + for spec in measure_specs: + row_vals.append(gt.get("measures", {}).get(spec)) + row_vals.append(gt.get("count", 0)) + for ci, v in enumerate(row_vals, 1): + ws.cell(row=start_row + 1, column=ci, value=v) + return start_row + 3 + + def _write_row_pivot(self, ws, groups, row_dims, measure_specs, start_row): + row_gb = [d["fieldName"] for d in row_dims] + row_groups = sorted( + [g for g in groups if g["rowGroupBy"] == row_gb and not g["colGroupBy"]], + key=lambda g: [str(v) for v in g["rowValues"]], + ) + totals = [g for g in groups if not g["rowGroupBy"] and not g["colGroupBy"]] + + # Header row + headers = [d["fieldName"] for d in row_dims] + headers += [_format_measure_name(m) for m in measure_specs] + headers.append("Count") + for ci, h in enumerate(headers, 1): + cell = ws.cell(row=start_row, column=ci, value=h) + cell.fill = _PIVOT_HEADER_FILL + cell.font = _PIVOT_HEADER_FONT + r = start_row + 1 + + for g in row_groups: + for ci, v in enumerate(g["rowValues"], 1): + ws.cell(row=r, column=ci, value=v) + offset = len(row_dims) + for si, spec in enumerate(measure_specs): + ws.cell( + row=r, column=offset + si + 1, value=g.get("measures", {}).get(spec) + ) + ws.cell( + row=r, column=offset + len(measure_specs) + 1, value=g.get("count", 0) + ) + r += 1 + + # Grand total row + if totals: + gt = totals[0] + cell = ws.cell(row=r, column=1, value=_("Grand Total")) + cell.font = _PIVOT_TOTAL_FONT + offset = len(row_dims) + for si, spec in enumerate(measure_specs): + c = ws.cell( + row=r, + column=offset + si + 1, + value=gt.get("measures", {}).get(spec), + ) + c.font = _PIVOT_TOTAL_FONT + ws.cell( + row=r, column=offset + len(measure_specs) + 1, value=gt.get("count", 0) + ).font = _PIVOT_TOTAL_FONT + r += 1 + + return r + 1 + + def _write_crosstab(self, ws, groups, row_dims, col_dims, measure_specs, start_row): + """Write a cross-tab with row headers on left, columns across top.""" + row_gb = [d["fieldName"] for d in row_dims] + col_gb = [d["fieldName"] for d in col_dims] + + # Collect unique col values + col_groups = sorted( + [g for g in groups if g["colGroupBy"] == col_gb and not g["rowGroupBy"]], + key=lambda g: [str(v) for v in g["colValues"]], + ) + col_keys = [tuple(g["colValues"]) for g in col_groups] + + # Collect unique row values + row_groups = sorted( + [g for g in groups if g["rowGroupBy"] == row_gb and not g["colGroupBy"]], + key=lambda g: [str(v) for v in g["rowValues"]], + ) + + # Cell value lookup: (row_values_tuple, col_values_tuple) → group + cell_map = {} + for g in groups: + if g["rowGroupBy"] == row_gb and g["colGroupBy"] == col_gb: + cell_map[(tuple(g["rowValues"]), tuple(g["colValues"]))] = g + + grand_totals = [ + g for g in groups if not g["rowGroupBy"] and not g["colGroupBy"] + ] + + num_row_dims = len(row_dims) + total_col = num_row_dims + len(col_keys) * len(measure_specs) + 1 + + r = self._write_crosstab_headers( + ws, row_dims, col_keys, measure_specs, total_col, start_row + ) + + r = self._write_crosstab_data( + ws, + row_groups, + col_keys, + cell_map, + groups, + row_gb, + num_row_dims, + measure_specs, + total_col, + grand_totals, + r, + ) + + return r + 1 + + def _write_crosstab_headers( + self, ws, row_dims, col_keys, measure_specs, total_col, r + ): + """Write column headers and measure sub-headers for a cross-tab.""" + num_row_dims = len(row_dims) + + for ci in range(num_row_dims): + cell = ws.cell(row=r, column=ci + 1, value=row_dims[ci]["fieldName"]) + cell.font = Font(bold=True) + + for ki, col_key in enumerate(col_keys): + label = " / ".join(str(v) for v in col_key) if col_key else _("(none)") + col_start = num_row_dims + ki * len(measure_specs) + 1 + if len(measure_specs) > 1: + ws.merge_cells( + start_row=r, + start_column=col_start, + end_row=r, + end_column=col_start + len(measure_specs) - 1, + ) + cell = ws.cell(row=r, column=col_start, value=label) + cell.fill = _PIVOT_HEADER_FILL + cell.font = _PIVOT_HEADER_FONT + cell.alignment = Alignment(horizontal="center") + + if len(measure_specs) > 1: + ws.merge_cells( + start_row=r, + start_column=total_col, + end_row=r, + end_column=total_col + len(measure_specs) - 1, + ) + total_hdr = ws.cell(row=r, column=total_col, value=_("Total")) + total_hdr.fill = _PIVOT_HEADER_FILL + total_hdr.font = _PIVOT_HEADER_FONT + if len(measure_specs) > 1: + total_hdr.alignment = Alignment(horizontal="center") + r += 1 + + # Measure sub-headers if multiple measures + if len(measure_specs) > 1: + for ki in range(len(col_keys)): + for si, spec in enumerate(measure_specs): + col_start = num_row_dims + ki * len(measure_specs) + si + 1 + cell = ws.cell( + row=r, + column=col_start, + value=_format_measure_name(spec), + ) + cell.fill = _PIVOT_SUBHEADER_FILL + cell.font = _PIVOT_SUBHEADER_FONT + for si, spec in enumerate(measure_specs): + cell = ws.cell( + row=r, + column=total_col + si, + value=_format_measure_name(spec), + ) + cell.fill = _PIVOT_SUBHEADER_FILL + cell.font = _PIVOT_SUBHEADER_FONT + r += 1 + + return r + + def _write_crosstab_data( + self, + ws, + row_groups, + col_keys, + cell_map, + groups, + row_gb, + num_row_dims, + measure_specs, + total_col, + grand_totals, + r, + ): + """Write data rows and grand total for a cross-tab.""" + for rg in row_groups: + row_key = tuple(rg["rowValues"]) + for ci, v in enumerate(rg["rowValues"], 1): + ws.cell(row=r, column=ci, value=v) + for ki, col_key in enumerate(col_keys): + cell_group = cell_map.get((row_key, col_key)) + for si, spec in enumerate(measure_specs): + col_pos = num_row_dims + ki * len(measure_specs) + si + 1 + val = ( + cell_group.get("measures", {}).get(spec) if cell_group else None + ) + ws.cell(row=r, column=col_pos, value=val) + # Row totals + row_total = [ + g + for g in groups + if g["rowGroupBy"] == row_gb + and not g["colGroupBy"] + and tuple(g["rowValues"]) == row_key + ] + if row_total: + for si, spec in enumerate(measure_specs): + v = row_total[0].get("measures", {}).get(spec) + ws.cell( + row=r, column=total_col + si, value=v + ).font = _PIVOT_TOTAL_FONT + r += 1 + + # Grand total row + if grand_totals: + gt = grand_totals[0] + cell = ws.cell(row=r, column=1, value=_("Grand Total")) + cell.font = _PIVOT_TOTAL_FONT + for si, spec in enumerate(measure_specs): + ws.cell( + row=r, + column=total_col + si, + value=gt.get("measures", {}).get(spec), + ).font = _PIVOT_TOTAL_FONT + r += 1 + + return r + + +def _coerce_value(v): + """Try to return v as int or float; otherwise return the string.""" + if isinstance(v, int | float | bool): + return v + s = str(v).strip() + try: + return int(s) + except (ValueError, TypeError): + _logger.debug("_coerce_value: %r is not an integer", s) + try: + return float(s.replace(",", "")) + except (ValueError, TypeError): + _logger.debug("_coerce_value: %r is not a float", s) + return s + + +def _format_measure_name(spec): + """Turn 'amount_total:sum' into 'Amount Total (Sum)'.""" + if ":" in spec: + field, agg = spec.split(":", 1) + return f"{field.replace('_', ' ').title()} ({agg.title()})" + return spec.replace("_", " ").title() + + +def _auto_width(ws, max_width=60): + """Set approximate column widths based on cell content length.""" + for col in ws.columns: + max_len = 0 + col_letter = get_column_letter(col[0].column) + for cell in col: + if cell.value is not None: + max_len = max(max_len, len(str(cell.value))) + ws.column_dimensions[col_letter].width = min(max_len + 4, max_width) diff --git a/spreadsheet_oca/readme/DESCRIPTION.md b/spreadsheet_oca/readme/DESCRIPTION.md index afee06c5..de411159 100644 --- a/spreadsheet_oca/readme/DESCRIPTION.md +++ b/spreadsheet_oca/readme/DESCRIPTION.md @@ -1,5 +1,23 @@ -This module adds a functionality for adding and editing Spreadsheets -using Odoo CE. +This module provides a full-featured spreadsheet editor for Odoo CE using +the ``o-spreadsheet`` engine. It serves as a community alternative that +requires only Odoo CE and OCA dependencies. -It is an alternative to the proprietary module `spreadsheet_edition` of -Odoo Enterprise Edition. +Beyond basic spreadsheet editing, the module includes server-side features +for operational use: + +- **Scheduled Refresh** — cron-based pivot data refresh with email digest + notifications and input parameter substitution in domains +- **KPI Alerts** — cell-value threshold monitors with edge or level trigger + modes, sending notifications when conditions are met +- **What-If Scenarios** — named cell-override sets for scenario planning, + with comparison export and apply-to-copy workflow +- **Email Subscriptions** — partner-level daily/weekly/monthly digest emails + with optional pivot data summaries +- **Input Parameters** — named cell registry for domain token substitution + (e.g. ``%(start_date)s``) used by scheduled refresh and alerts +- **Cell Writeback** — edit Odoo record fields directly from list-view cells + in the spreadsheet, with full audit trail and rollback +- **XLSX Export** — server-rendered ``.xlsx`` download with fresh pivot data + on dedicated sheets, styled headers, and static cell content +- **Collaborative Editing** — revision-based multi-user editing with conflict + resolution via the OWL-based spreadsheet component diff --git a/spreadsheet_oca/readme/DEVELOP.md b/spreadsheet_oca/readme/DEVELOP.md index 0afb4a08..3d819581 100644 --- a/spreadsheet_oca/readme/DEVELOP.md +++ b/spreadsheet_oca/readme/DEVELOP.md @@ -1,3 +1,52 @@ -If you want to develop custom business functions, you can add others, -based on the file - +## Architecture + +The module is built on the ``o-spreadsheet`` OWL component bundled with +Odoo CE. Server-side features are implemented as standard Odoo models with +cron jobs, controllers, and security rules. + +### Key Files + +- ``models/spreadsheet_spreadsheet.py`` — main document model with pivot + data endpoint, XLSX export, and writeback rollback +- ``models/spreadsheet_oca_revision.py`` — collaborative editing revisions +- ``models/cell_ref.py`` — cell reference parsing and value reading utilities +- ``models/pivot_data.py`` — server-side pivot computation via ``read_group`` +- ``models/spreadsheet_xlsx_export.py`` — XLSX rendering with ``openpyxl`` +- ``controllers/`` — JSON endpoints for writeback and input parameters + +### Adding Custom Business Functions + +To add spreadsheet functions from another module, extend the JS function +registry. See the accounting functions in Odoo CE as a reference: + + +### Pivot Data Format + +Pivot definitions in the spreadsheet JSON use this structure: + +```json +{ + "type": "ODOO", + "model": "res.partner", + "domain": [["active", "=", true]], + "measures": [{"id": "__count", "fieldName": "__count"}], + "rows": [{"fieldName": "country_id", "order": "desc"}], + "columns": [{"fieldName": "is_company"}] +} +``` + +The measure ID format is ``fieldName:aggregator`` for real fields (e.g. +``amount_total:sum``) or just ``fieldName`` for virtual fields like +``__count``. The ``__count`` measure must **not** include an aggregator +suffix. Formulas must match the measure ID: +``=PIVOT.VALUE(1,"__count","#country_id",1)``. + +### Running Tests + +```bash +# Python unit tests +odoo -d test_db -i spreadsheet_oca --test-enable --stop-after-init + +# With pytest (if pytest-odoo is installed) +pytest odoo/custom/src/spreadsheet/spreadsheet_oca/tests/ +``` diff --git a/spreadsheet_oca/readme/USAGE.md b/spreadsheet_oca/readme/USAGE.md index 935707dc..532b42e9 100644 --- a/spreadsheet_oca/readme/USAGE.md +++ b/spreadsheet_oca/readme/USAGE.md @@ -1,39 +1,102 @@ -## **Create a new spreadsheet** +## Create a Spreadsheet -- Go to 'Spreadsheet' menu -- Click on 'Create' -- Put a name, then click on the "Edit" button +- Go to the **Spreadsheet** menu +- Click **Create**, enter a name, then click **Edit** to open the editor ![](../static/description/spreadsheet_create.png) -- At this point you switch to spreadsheet editing mode. The editor is - named `o-spreadsheet` and looks like another common spreadsheet web - editors. (OnlyOffice, Ethercalc, Google Sheets (non-free)). +The editor uses ``o-spreadsheet`` and supports standard functions like +``SUM()``, ``AVERAGE()``, etc. For the full function list, see + or . ![](../static/description/spreadsheet_edit.png) -- You can use common functions `SUM()`, `AVERAGE()`, etc. in the cells. - For a complete list of functions and their syntax, Refer to the - documentation or go to - and click on "Insert \> - Function". - -![](../static/description/o-spreadsheet.png) - -- Note: Business Odoo module can add "business functions". This is - currently the case for the accounting module, which adds the following - features: - - > - `ODOO.CREDIT(account_codes, date_range)`: Get the total credit for - > the specified account(s) and period. - > - `ODOO.DEBIT(account_codes, date_range)`: Get the total debit for - > the specified account(s) and period. - > - `ODOO.BALANCE(account_codes, date_range)`: Get the total balance - > for the specified account(s) and period. - > - `ODOO.FISCALYEAR.START(day)`: Returns the starting date of the - > fiscal year encompassing the provided date. - > - `ODOO.FISCALYEAR.END(day)`: Returns the ending date of the fiscal - > year encompassing the provided date. - > - `ODOO.ACCOUNT.GROUP(type)`: Returns the account ids of a given - > group where type should be a value of the `account_type` field of - > `account.account` model. (`income`, `asset_receivable`, etc.) +## Pivot Tables + +Insert an Odoo pivot into the spreadsheet using ``PIVOT.VALUE()`` and +``PIVOT.HEADER()`` formulas. The pivot definition (model, domain, measures, +row/column groupings) is stored in the spreadsheet JSON and evaluated +against live Odoo data. + +Use **Data > Refresh All Data** in the spreadsheet toolbar to reload pivot +values from the database. + +## Scheduled Refresh + +On any spreadsheet form, click **Refresh Schedules** to create a schedule: + +- Set the interval (hours, days, weeks, or months) +- Add notification partners who receive an email digest after each refresh +- Click **Activate** to create the background cron job +- Use **Run Now** for an immediate manual refresh + +If the spreadsheet has **Input Parameters** (see below), their current +values are substituted into pivot domains before each refresh cycle. + +## KPI Alerts + +Click **Alerts** on the spreadsheet form to define threshold watches: + +- Specify a **cell reference** (e.g. ``E8``) and **sheet name** +- Set the **operator** and **threshold** value +- Choose **edge** mode (notify once when the condition becomes true) or + **level** mode (notify every evaluation cycle while true) +- Add notification partners + +Alerts are evaluated by a shared cron job. Use **Evaluate Now** for +immediate checking, or **Reset State** to allow an edge alert to re-fire. + +## What-If Scenarios + +Click **Scenarios** on the spreadsheet form: + +- Create named scenarios with cell override values in JSON format: + ``{"B3": 125000, "Sheet1!C5": 0.15}`` +- Mark one scenario as the **Base Case** for comparison +- **Apply to Copy** creates a new spreadsheet with the overrides baked in +- **Export Comparison** shows a side-by-side table of base vs. override values + +## Input Parameters + +Click **Input Parameters** to register named cells: + +- Each parameter has a **name** (e.g. ``start_date``), a **cell reference** + (e.g. ``Parameters!B2``), and an optional description +- Parameter values are synced from the spreadsheet cell content +- During scheduled refresh, pivot domains containing ``%(start_date)s`` + tokens are automatically substituted with the current parameter value + +## Cell Writeback + +Enable **Writeback** on the spreadsheet form to allow direct edits to Odoo +records from list-view cells: + +- Edits are validated for field writeability and user permissions +- Each write creates an audit log entry (model, record, field, old/new value) +- Use **Rollback** on any log entry to restore the previous value +- The full audit trail is accessible from the **Writeback Log** button + +## Email Subscriptions + +Click **Subscriptions** to set up periodic email digests: + +- Each partner gets one subscription per spreadsheet +- Choose **daily**, **weekly**, or **monthly** frequency +- Optionally include a summary of pivot data in the email body + +## Customising Email Templates + +Alert notifications, subscription digests, and refresh summaries are +rendered with QWeb templates that can be overridden or customised: + +- **Settings > Technical > Views**, search for ``spreadsheet.alert.notification``, + ``spreadsheet.subscription.digest``, or ``spreadsheet.refresh.notification`` +- Inherit the template in a custom module using standard ```` overrides + +## XLSX Export + +Click **Export XLSX** on the spreadsheet form to download a ``.xlsx`` file: + +- Static cell content is rendered on the original sheets +- Each pivot gets a dedicated sheet with fresh data from the database +- Headers and totals are styled for readability diff --git a/spreadsheet_oca/security/ir.model.access.csv b/spreadsheet_oca/security/ir.model.access.csv index 1898b166..8aa1ddf0 100644 --- a/spreadsheet_oca/security/ir.model.access.csv +++ b/spreadsheet_oca/security/ir.model.access.csv @@ -1,8 +1,20 @@ id,name,model_id:id,group_id:id,perm_read,perm_write,perm_create,perm_unlink -access_spreadsheet_spreadsheet,access_spreadsheet_spreadsheet,model_spreadsheet_spreadsheet,group_user,1,1,1,1 +access_spreadsheet_spreadsheet,access_spreadsheet_spreadsheet,model_spreadsheet_spreadsheet,base.group_user,1,1,1,1 access_spreadsheet_oca_revision,access_spreadsheet_oca_revision,model_spreadsheet_oca_revision,base.group_user,1,1,1,1 spreadsheet_oca.access_spreadsheet_spreadsheet_import,access_spreadsheet_spreadsheet_import,spreadsheet_oca.model_spreadsheet_spreadsheet_import,base.group_user,1,1,1,1 -access_spreadsheet_import_mode,access_spreadsheet_oca_revision,model_spreadsheet_spreadsheet_import_mode,base.group_user,1,0,0,0 +access_spreadsheet_import_mode,access_spreadsheet_spreadsheet_import_mode,model_spreadsheet_spreadsheet_import_mode,base.group_user,1,0,0,0 access_spreadsheet_select_row_number,access_spreadsheet_select_row_number,model_spreadsheet_select_row_number,base.group_user,1,1,1,1 access_spreadsheet_spreadsheet_tag,access_spreadsheet_spreadsheet_tag,model_spreadsheet_spreadsheet_tag,spreadsheet_oca.group_user,1,0,0,0 access_spreadsheet_spreadsheet_manager_tag,access_spreadsheet_spreadsheet_manager_tag,model_spreadsheet_spreadsheet_tag,spreadsheet_oca.group_manager,1,1,1,1 +access_spreadsheet_refresh_schedule_user,access_spreadsheet_refresh_schedule_user,model_spreadsheet_refresh_schedule,base.group_user,1,0,0,0 +access_spreadsheet_refresh_schedule_manager,access_spreadsheet_refresh_schedule_manager,model_spreadsheet_refresh_schedule,spreadsheet_oca.group_manager,1,1,1,1 +access_spreadsheet_alert_user,access_spreadsheet_alert_user,model_spreadsheet_alert,base.group_user,1,0,0,0 +access_spreadsheet_alert_manager,access_spreadsheet_alert_manager,model_spreadsheet_alert,spreadsheet_oca.group_manager,1,1,1,1 +access_spreadsheet_subscription_user,access_spreadsheet_subscription_user,model_spreadsheet_subscription,base.group_user,1,0,0,0 +access_spreadsheet_subscription_manager,access_spreadsheet_subscription_manager,model_spreadsheet_subscription,spreadsheet_oca.group_manager,1,1,1,1 +access_spreadsheet_scenario_user,access_spreadsheet_scenario_user,model_spreadsheet_scenario,base.group_user,1,0,0,0 +access_spreadsheet_scenario_manager,access_spreadsheet_scenario_manager,model_spreadsheet_scenario,spreadsheet_oca.group_manager,1,1,1,1 +access_spreadsheet_writeback_log_user,access_spreadsheet_writeback_log_user,model_spreadsheet_writeback_log,base.group_user,1,0,0,0 +access_spreadsheet_writeback_log_manager,access_spreadsheet_writeback_log_manager,model_spreadsheet_writeback_log,spreadsheet_oca.group_manager,1,1,1,1 +access_spreadsheet_input_param_user,access_spreadsheet_input_param_user,model_spreadsheet_input_param,base.group_user,1,0,0,0 +access_spreadsheet_input_param_manager,access_spreadsheet_input_param_manager,model_spreadsheet_input_param,spreadsheet_oca.group_manager,1,1,1,1 diff --git a/spreadsheet_oca/security/security.xml b/spreadsheet_oca/security/security.xml index aa94100a..697c2416 100644 --- a/spreadsheet_oca/security/security.xml +++ b/spreadsheet_oca/security/security.xml @@ -62,4 +62,122 @@ [('group_ids','in', user.groups_id.ids)] + + + + + Refresh Schedule: follow spreadsheet access + + + [ + '|', '|', '|', + ('spreadsheet_id.owner_id', '=', user.id), + ('spreadsheet_id.contributor_ids', '=', user.id), + ('spreadsheet_id.contributor_group_ids', 'in', user.groups_id.ids), + ('spreadsheet_id.reader_ids', '=', user.id), + ] + + + Refresh Schedule: manager full access + + + [(1, '=', 1)] + + + + Alert: follow spreadsheet access + + + [ + '|', '|', '|', + ('spreadsheet_id.owner_id', '=', user.id), + ('spreadsheet_id.contributor_ids', '=', user.id), + ('spreadsheet_id.contributor_group_ids', 'in', user.groups_id.ids), + ('spreadsheet_id.reader_ids', '=', user.id), + ] + + + Alert: manager full access + + + [(1, '=', 1)] + + + + Subscription: follow spreadsheet access + + + [ + '|', '|', '|', + ('spreadsheet_id.owner_id', '=', user.id), + ('spreadsheet_id.contributor_ids', '=', user.id), + ('spreadsheet_id.contributor_group_ids', 'in', user.groups_id.ids), + ('spreadsheet_id.reader_ids', '=', user.id), + ] + + + Subscription: manager full access + + + [(1, '=', 1)] + + + + Scenario: follow spreadsheet access + + + [ + '|', '|', '|', + ('spreadsheet_id.owner_id', '=', user.id), + ('spreadsheet_id.contributor_ids', '=', user.id), + ('spreadsheet_id.contributor_group_ids', 'in', user.groups_id.ids), + ('spreadsheet_id.reader_ids', '=', user.id), + ] + + + Scenario: manager full access + + + [(1, '=', 1)] + + + + Writeback Log: follow spreadsheet access + + + [ + '|', '|', '|', + ('spreadsheet_id.owner_id', '=', user.id), + ('spreadsheet_id.contributor_ids', '=', user.id), + ('spreadsheet_id.contributor_group_ids', 'in', user.groups_id.ids), + ('spreadsheet_id.reader_ids', '=', user.id), + ] + + + Writeback Log: manager full access + + + [(1, '=', 1)] + + + + Input Param: follow spreadsheet access + + + [ + '|', '|', '|', + ('spreadsheet_id.owner_id', '=', user.id), + ('spreadsheet_id.contributor_ids', '=', user.id), + ('spreadsheet_id.contributor_group_ids', 'in', user.groups_id.ids), + ('spreadsheet_id.reader_ids', '=', user.id), + ] + + + Input Param: manager full access + + + [(1, '=', 1)] + diff --git a/spreadsheet_oca/static/description/index.html b/spreadsheet_oca/static/description/index.html index 69ccb105..4e5be220 100644 --- a/spreadsheet_oca/static/description/index.html +++ b/spreadsheet_oca/static/description/index.html @@ -3,7 +3,7 @@ -README.rst +Spreadsheet Oca -
+
+

Spreadsheet Oca

- - -Odoo Community Association - -
-

Spreadsheet Oca

-

Beta License: AGPL-3 OCA/spreadsheet Translate me on Weblate Try me on Runboat

-

This module adds a functionality for adding and editing Spreadsheets -using Odoo CE.

-

It is an alternative to the proprietary module spreadsheet_edition -of Odoo Enterprise Edition.

+

Beta License: AGPL-3 OCA/spreadsheet Translate me on Weblate Try me on Runboat

+

This module provides a full-featured spreadsheet editor for Odoo CE using +the o-spreadsheet engine. It serves as a community alternative that +requires only Odoo CE and OCA dependencies.

Table of contents

    @@ -397,9 +391,9 @@

    Spreadsheet Oca

-

Usage

+

Usage

-

Create a new spreadsheet

+

Create a new spreadsheet

-

Development

+

Development

If you want to develop custom business functions, you can add others, based on the file https://github.com/odoo/odoo/blob/16.0/addons/spreadsheet_account/static/src/accounting_functions.js

-

Bug Tracker

+

Bug Tracker

Bugs are tracked on GitHub Issues. In case of trouble, please check there if your issue has already been reported. If you spotted it first, help us to smash it by providing a detailed and welcomed @@ -461,15 +455,15 @@

Bug Tracker

Do not contact contributors directly about support or help with technical issues.

-

Credits

+

Credits

-

Authors

+

Authors

  • CreuBlanca
-

Contributors

+

Contributors

-

Maintainers

+

Maintainers

This module is maintained by the OCA.

Odoo Community Association @@ -500,6 +494,5 @@

Maintainers

-
diff --git a/spreadsheet_oca/static/src/spreadsheet/bundle/filter_panel_datasources.esm.js b/spreadsheet_oca/static/src/spreadsheet/bundle/filter_panel_datasources.esm.js index 3d6c4866..bdbc6690 100644 --- a/spreadsheet_oca/static/src/spreadsheet/bundle/filter_panel_datasources.esm.js +++ b/spreadsheet_oca/static/src/spreadsheet/bundle/filter_panel_datasources.esm.js @@ -14,6 +14,76 @@ const {useLocalStore, PivotSidePanelStore} = spreadsheet.stores; const {sidePanelRegistry, topbarMenuRegistry, pivotSidePanelRegistry} = spreadsheet.registries; +/** + * Scan every cell in every sheet to find the top-left anchor position of each + * pivot table. Returns a Map. + * + * @param {Object} getters o-spreadsheet getters + * @returns {Map} + */ +function _findPivotAnchors(getters) { + const anchors = new Map(); + for (const sheetId of getters.getSheetIds()) { + const cells = getters.getCells(sheetId); + for (const cellId in cells) { + const {col, row} = getters.getCellPosition(cellId); + const pivotId = getters.getPivotIdFromPosition({sheetId, col, row}); + if (!pivotId) { + continue; + } + const current = anchors.get(pivotId); + if ( + !current || + row < current.row || + (row === current.row && col < current.col) + ) { + anchors.set(pivotId, {sheetId, col, row}); + } + } + } + return anchors; +} + +/** + * Reload all pivot data sources from Odoo concurrently, then re-insert each + * pivot table at its current position using the fresh data. + * + * Also dispatches REFRESH_ALL_DATA_SOURCES so that lists and charts are + * handled by the CE plugin (which refreshes their data sources independently). + * + * @param {Object} env o-spreadsheet environment + */ +async function _refreshAndReinsertAllPivots(env) { + const getters = env.model.getters; + const pivotIds = getters.getPivotIds(); + + // Snapshot pivot positions before reload (cell layout doesn't change) + const anchors = _findPivotAnchors(getters); + + // Reload all pivots concurrently for speed + await Promise.all(pivotIds.map((id) => getters.getPivot(id).load({reload: true}))); + + // Re-insert each pivot at its anchor with the freshly-loaded table structure + for (const pivotId of pivotIds) { + const anchor = anchors.get(pivotId); + if (!anchor) { + continue; // Pivot was never inserted into a sheet — skip + } + const table = getters.getPivot(pivotId).getTableStructure().export(); + env.model.dispatch("INSERT_PIVOT_WITH_TABLE", { + pivotId, + table, + col: anchor.col, + row: anchor.row, + sheetId: anchor.sheetId, + pivotMode: "dynamic", + }); + } + + // Let the CE plugin handle lists and charts (REFRESH_ALL_DATA_SOURCES) + env.model.dispatch("REFRESH_ALL_DATA_SOURCES"); +} + topbarMenuRegistry.addChild("data_sources", ["data"], (env) => { let sequence = 53; const lists = env.model.getters.getListIds().map((listId, index) => ({ @@ -32,9 +102,7 @@ topbarMenuRegistry.addChild("data_sources", ["data"], (env) => { id: "refresh_all_data", name: _t("Refresh all data"), sequence: 110, - execute: (child_env) => { - child_env.model.dispatch("REFRESH_ALL_DATA_SOURCES"); - }, + execute: (child_env) => _refreshAndReinsertAllPivots(child_env), separator: true, }, ]); @@ -64,12 +132,14 @@ export class PivotTitleSectionInsertion extends PivotTitleSection { ); return res; } - reinsertTable(env, mode) { + async reinsertTable(env, mode) { + const pivot = env.model.getters.getPivot(this.props.pivotId); + // Reload fresh data from Odoo before reading the table structure. + // The previous implementation dispatched REFRESH_PIVOT *after* reading + // stale data, so the re-inserted table always contained old values. + await pivot.load({reload: true}); const zone = env.model.getters.getSelectedZone(); - const table = env.model.getters - .getPivot(this.props.pivotId) - .getTableStructure() - .export(); + const table = pivot.getTableStructure().export(); env.model.dispatch("INSERT_PIVOT_WITH_TABLE", { pivotId: this.props.pivotId, table, @@ -78,7 +148,6 @@ export class PivotTitleSectionInsertion extends PivotTitleSection { sheetId: env.model.getters.getActiveSheetId(), pivotMode: mode, }); - env.model.dispatch("REFRESH_PIVOT", {id: this.props.pivotId}); } } diff --git a/spreadsheet_oca/static/src/tours/spreadsheet_feature_tour.esm.js b/spreadsheet_oca/static/src/tours/spreadsheet_feature_tour.esm.js new file mode 100644 index 00000000..a2a5bba2 --- /dev/null +++ b/spreadsheet_oca/static/src/tours/spreadsheet_feature_tour.esm.js @@ -0,0 +1,135 @@ +/** @odoo-module */ + +import {registry} from "@web/core/registry"; + +registry.category("web_tour.tours").add("spreadsheet_oca_features", { + url: "/odoo", + steps: () => [ + // 1. Click the Spreadsheets menu + { + trigger: + ".o_menu_sections a:contains('Spreadsheets'), .o_navbar a:contains('Spreadsheets'), a.dropdown-item:contains('Spreadsheets')", + content: "Open the Spreadsheets menu to see your dashboards.", + run: "click", + }, + // 2. Welcome on kanban view + { + trigger: ".o_kanban_view", + content: + "Welcome to Spreadsheets! You're looking at your spreadsheet dashboards.", + run() { + // Observation step — no interaction + }, + }, + // 2. Click the Sales Pipeline Summary card + { + trigger: ".o_kanban_record:contains('Sales Pipeline Summary')", + content: + "Open the Sales Pipeline Summary spreadsheet to explore its features.", + run: "click", + }, + // 3. On form view — point out smart buttons row + { + trigger: ".o_form_view div[name='button_box'], .o_form_view #button_box", + content: + "These smart buttons give you quick access to all features: alerts, scenarios, parameters, and more.", + run() { + // Observation step — no interaction + }, + }, + // 4. Click KPI Alerts smart button + { + trigger: + "button[name='action_open_alerts'], div[name='alert_count'] .oe_stat_button", + content: "Click to view KPI Alerts configured for this spreadsheet.", + run: "click", + }, + // 5. Show the alert list + { + trigger: ".o_list_view, .o_kanban_view", + content: + "KPI Alerts monitor cell values and notify you when thresholds are crossed. You can set edge triggers (notify once) or level triggers (notify every cycle).", + run() { + // Observation step — no interaction + }, + }, + // 6. Go back via breadcrumb + { + trigger: ".o_back_button, .breadcrumb-item a", + content: "Go back to the spreadsheet form.", + run: "click", + }, + // 7. Click Scenarios smart button + { + trigger: + "button[name='action_open_scenarios'], div[name='scenario_count'] .oe_stat_button", + content: "Click to view What-If Scenarios.", + run: "click", + }, + // 8. Show scenarios list + { + trigger: ".o_list_view, .o_kanban_view", + content: + "What-If Scenarios let you model different outcomes without duplicating your spreadsheet. Try the Optimistic or Pessimistic variants.", + run() { + // Observation step — no interaction + }, + }, + // 9. Go back via breadcrumb + { + trigger: ".o_back_button, .breadcrumb-item a", + content: "Go back to the spreadsheet form.", + run: "click", + }, + // 10. Click Input Parameters smart button + { + trigger: + "button[name='action_open_input_params'], div[name='input_param_count'] .oe_stat_button", + content: "Click to view Input Parameters.", + run: "click", + }, + // 11. Show parameters list + { + trigger: ".o_list_view, .o_kanban_view", + content: + "Input Parameters bind named cells to server-side domain substitution for scheduled refreshes. Change a date here and the next refresh picks it up automatically.", + run() { + // Observation step — no interaction + }, + }, + // 12. Go back via breadcrumb + { + trigger: ".o_back_button, .breadcrumb-item a", + content: "Go back to the spreadsheet form.", + run: "click", + }, + // 13. Point to Export XLSX button + { + trigger: "button[name='action_export_xlsx']", + content: + "Export XLSX generates a server-side .xlsx file with fresh pivot data — no browser extension needed.", + run() { + // Observation step — no interaction + }, + }, + // 14. Point to Refresh Schedules smart button + { + trigger: + "button[name='action_open_refresh_schedules'], div[name='refresh_schedule_count'] .oe_stat_button", + content: + "Set up automated data refresh on a schedule. The cron fetches fresh pivot data and emails a summary to subscribed partners.", + run() { + // Observation step — no interaction + }, + }, + // 15. Final step + { + trigger: ".o_form_view", + content: + "You're all set! Explore each feature to unlock the full power of your spreadsheets.", + run() { + // Observation step — no interaction + }, + }, + ], +}); diff --git a/spreadsheet_oca/tests/__init__.py b/spreadsheet_oca/tests/__init__.py new file mode 100644 index 00000000..fd23b52a --- /dev/null +++ b/spreadsheet_oca/tests/__init__.py @@ -0,0 +1,8 @@ +from . import test_pivot_data +from . import test_refresh_schedule +from . import test_alert +from . import test_xlsx_export +from . import test_subscription +from . import test_writeback +from . import test_scenario +from . import test_input_param diff --git a/spreadsheet_oca/tests/test_alert.py b/spreadsheet_oca/tests/test_alert.py new file mode 100644 index 00000000..8a37e568 --- /dev/null +++ b/spreadsheet_oca/tests/test_alert.py @@ -0,0 +1,264 @@ +# Copyright 2025 Ledo Enterprises LLC +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). +""" +Tests for spreadsheet.alert (threshold alerts / KPI watches). +""" + +from odoo.exceptions import ValidationError +from odoo.tests import TransactionCase + +from ..models.cell_ref import parse_cell_ref as _parse_cell_ref + + +class TestParseCellRef(TransactionCase): + """Unit tests for the _parse_cell_ref helper (pure logic, no DB).""" + + def test_simple(self): + self.assertEqual(_parse_cell_ref("A1"), (0, 0)) + self.assertEqual(_parse_cell_ref("B3"), (1, 2)) + self.assertEqual(_parse_cell_ref("Z1"), (25, 0)) + + def test_multi_letter(self): + col, row = _parse_cell_ref("AA1") + self.assertEqual(col, 26) + self.assertEqual(row, 0) + + def test_case_insensitive(self): + self.assertEqual(_parse_cell_ref("b3"), _parse_cell_ref("B3")) + + def test_invalid(self): + self.assertEqual(_parse_cell_ref(""), (None, None)) + self.assertEqual(_parse_cell_ref("A0"), (None, None)) # row must be ≥ 1 + self.assertEqual(_parse_cell_ref("12"), (None, None)) + + +class TestSpreadsheetAlert(TransactionCase): + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.spreadsheet = cls.env["spreadsheet.spreadsheet"].create( + {"name": "Alert Test Spreadsheet"} + ) + cls.partner = cls.env["res.partner"].create({"name": "Alert Subscriber"}) + + def _make_alert(self, **kwargs): + defaults = { + "name": "Revenue Alert", + "spreadsheet_id": self.spreadsheet.id, + "cell_ref": "B3", + "operator": ">", + "threshold": 1000.0, + "trigger_mode": "edge", + } + defaults.update(kwargs) + return self.env["spreadsheet.alert"].create(defaults) + + def _set_cell_value(self, value, col="B", row=3): + """Write a cell value into spreadsheet_raw for testing.""" + cell_addr = f"{col.upper()}{row}" + raw = { + "version": 1, + "sheets": [ + { + "id": "sheet1", + "name": "Sheet1", + "cells": { + cell_addr: {"content": str(value)}, + }, + } + ], + } + self.spreadsheet.write({"spreadsheet_raw": raw}) + + # ── Validation ──────────────────────────────────────────────────────────── + + def test_invalid_cell_ref_raises(self): + with self.assertRaises(ValidationError): + self._make_alert(cell_ref="A0") + + def test_valid_cell_ref_accepted(self): + alert = self._make_alert(cell_ref="C12") + self.assertEqual(alert.cell_ref, "C12") + + # ── _check_condition ────────────────────────────────────────────────────── + + def test_operators(self): + alert = self._make_alert(operator=">", threshold=100.0) + self.assertTrue(alert._check_condition(101.0)) + self.assertFalse(alert._check_condition(100.0)) + + alert.operator = ">=" + self.assertTrue(alert._check_condition(100.0)) + + alert.operator = "<" + self.assertTrue(alert._check_condition(99.0)) + self.assertFalse(alert._check_condition(100.0)) + + alert.operator = "==" + self.assertTrue(alert._check_condition(100.0)) + self.assertFalse(alert._check_condition(99.9)) + + alert.operator = "!=" + self.assertTrue(alert._check_condition(99.0)) + self.assertFalse(alert._check_condition(100.0)) + + # ── _read_cell_value ────────────────────────────────────────────────────── + + def test_read_cell_value_numeric(self): + self._set_cell_value(42.5) + alert = self._make_alert(cell_ref="B3") + self.assertAlmostEqual(alert._read_cell_value(), 42.5) + + def test_read_cell_value_empty_sheet(self): + self.spreadsheet.write({"spreadsheet_raw": {}}) + alert = self._make_alert(cell_ref="B3") + self.assertIsNone(alert._read_cell_value()) + + def test_read_cell_value_missing_cell(self): + self._set_cell_value(10, col="A", row=1) + alert = self._make_alert(cell_ref="Z99") + self.assertIsNone(alert._read_cell_value()) + + def test_read_cell_value_by_sheet_name(self): + raw = { + "version": 1, + "sheets": [ + { + "id": "s1", + "name": "Summary", + "cells": {"B3": {"content": "77"}}, + }, + {"id": "s2", "name": "Data", "cells": {"B3": {"content": "99"}}}, + ], + } + self.spreadsheet.write({"spreadsheet_raw": raw}) + alert = self._make_alert(cell_ref="B3", sheet_name="Data") + self.assertAlmostEqual(alert._read_cell_value(), 99.0) + + # ── Edge mode ───────────────────────────────────────────────────────────── + + def test_edge_mode_fires_once_on_crossing(self): + self._set_cell_value(1500) + alert = self._make_alert(operator=">", threshold=1000.0, trigger_mode="edge") + self.assertFalse(alert.last_state) + + msg_count_before = self.env["mail.message"].search_count( + [ + ("res_id", "=", self.spreadsheet.id), + ("model", "=", "spreadsheet.spreadsheet"), + ] + ) + alert._evaluate() + msg_count_after = self.env["mail.message"].search_count( + [ + ("res_id", "=", self.spreadsheet.id), + ("model", "=", "spreadsheet.spreadsheet"), + ] + ) + self.assertGreater(msg_count_after, msg_count_before) + self.assertTrue(alert.last_state) + + # Second evaluation: already in alert state → no new notification + msg_count_before2 = msg_count_after + alert._evaluate() + msg_count_after2 = self.env["mail.message"].search_count( + [ + ("res_id", "=", self.spreadsheet.id), + ("model", "=", "spreadsheet.spreadsheet"), + ] + ) + self.assertEqual(msg_count_before2, msg_count_after2) + + def test_edge_mode_no_fire_below_threshold(self): + self._set_cell_value(500) + alert = self._make_alert(operator=">", threshold=1000.0, trigger_mode="edge") + msg_count_before = self.env["mail.message"].search_count( + [ + ("res_id", "=", self.spreadsheet.id), + ("model", "=", "spreadsheet.spreadsheet"), + ] + ) + alert._evaluate() + msg_count_after = self.env["mail.message"].search_count( + [ + ("res_id", "=", self.spreadsheet.id), + ("model", "=", "spreadsheet.spreadsheet"), + ] + ) + self.assertEqual(msg_count_before, msg_count_after) + self.assertFalse(alert.last_state) + + def test_edge_reset_allows_re_trigger(self): + self._set_cell_value(1500) + alert = self._make_alert(operator=">", threshold=1000.0, trigger_mode="edge") + alert._evaluate() # fires + self.assertTrue(alert.last_state) + alert.action_reset_state() + self.assertFalse(alert.last_state) + + msg_before = self.env["mail.message"].search_count( + [ + ("res_id", "=", self.spreadsheet.id), + ("model", "=", "spreadsheet.spreadsheet"), + ] + ) + alert._evaluate() # fires again after reset + msg_after = self.env["mail.message"].search_count( + [ + ("res_id", "=", self.spreadsheet.id), + ("model", "=", "spreadsheet.spreadsheet"), + ] + ) + self.assertGreater(msg_after, msg_before) + + # ── Level mode ──────────────────────────────────────────────────────────── + + def test_level_mode_fires_every_cycle(self): + self._set_cell_value(1500) + alert = self._make_alert(operator=">", threshold=1000.0, trigger_mode="level") + + for i in range(3): + msg_before = self.env["mail.message"].search_count( + [ + ("res_id", "=", self.spreadsheet.id), + ("model", "=", "spreadsheet.spreadsheet"), + ] + ) + alert._evaluate() + msg_after = self.env["mail.message"].search_count( + [ + ("res_id", "=", self.spreadsheet.id), + ("model", "=", "spreadsheet.spreadsheet"), + ] + ) + self.assertGreater( + msg_after, msg_before, f"Expected notification on cycle {i + 1}" + ) + + # ── Cron dispatcher ─────────────────────────────────────────────────────── + + def test_cron_evaluates_all_active(self): + self._set_cell_value(2000) + alert1 = self._make_alert(operator=">", threshold=1000.0) + alert2 = self._make_alert( + name="Low Stock", operator="<", threshold=5.0, cell_ref="B3" + ) + + # Both should update last_checked after cron run + self.env["spreadsheet.alert"]._cron_evaluate_all() + self.assertTrue(alert1.last_checked) + self.assertTrue(alert2.last_checked) + + def test_cron_skips_inactive(self): + alert = self._make_alert(active=False, operator=">", threshold=1000.0) + self.env["spreadsheet.alert"]._cron_evaluate_all() + self.assertFalse(alert.last_checked) + + # ── Smart button ────────────────────────────────────────────────────────── + + def test_smart_button_count(self): + self.assertEqual(self.spreadsheet.alert_count, 0) + self._make_alert() + self._make_alert(name="Alert 2", cell_ref="C5") + self.spreadsheet.invalidate_recordset() + self.assertEqual(self.spreadsheet.alert_count, 2) diff --git a/spreadsheet_oca/tests/test_input_param.py b/spreadsheet_oca/tests/test_input_param.py new file mode 100644 index 00000000..14319f24 --- /dev/null +++ b/spreadsheet_oca/tests/test_input_param.py @@ -0,0 +1,379 @@ +# Copyright 2025 Ledo Enterprises LLC +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). +""" +Tests for named input parameters. + +Covers: + - cell_ref.py helper functions (pure logic, no DB). + - spreadsheet.input_param model creation, constraints, and sync. + - _apply_param_substitution domain helper. + - Smart button count and action. + - Full refresh-cycle integration (param sync + domain substitution). + - Controller endpoint logic (model-level). +""" + +from odoo.exceptions import ValidationError +from odoo.tests import TransactionCase +from odoo.tools import mute_logger + +from ..models.cell_ref import ( + parse_cell_key, + parse_cell_ref, + read_cell_value, + write_cell_content, +) +from ..models.spreadsheet_refresh_schedule import _apply_param_substitution + + +class TestCellRefHelpers(TransactionCase): + """Unit tests for cell_ref.py — pure logic, no DB required.""" + + # ── parse_cell_ref ──────────────────────────────────────────────────────── + + def test_parse_cell_ref_simple(self): + self.assertEqual(parse_cell_ref("A1"), (0, 0)) + self.assertEqual(parse_cell_ref("B3"), (1, 2)) + self.assertEqual(parse_cell_ref("Z1"), (25, 0)) + + def test_parse_cell_ref_multi_letter(self): + col, row = parse_cell_ref("AA1") + self.assertEqual(col, 26) + self.assertEqual(row, 0) + col2, row2 = parse_cell_ref("AB12") + self.assertEqual(col2, 27) + self.assertEqual(row2, 11) + + def test_parse_cell_ref_invalid(self): + self.assertEqual(parse_cell_ref(""), (None, None)) + self.assertEqual(parse_cell_ref("A0"), (None, None)) # zero row forbidden + self.assertEqual(parse_cell_ref("12"), (None, None)) # no letters + self.assertEqual(parse_cell_ref("ZZZ"), (None, None)) # no row number + + def test_parse_cell_ref_case_insensitive(self): + self.assertEqual(parse_cell_ref("b3"), parse_cell_ref("B3")) + + # ── parse_cell_key ──────────────────────────────────────────────────────── + + def test_parse_cell_key_bare(self): + sheet, col, row = parse_cell_key("B3") + self.assertIsNone(sheet) + self.assertEqual(col, 1) + self.assertEqual(row, 2) + + def test_parse_cell_key_with_sheet(self): + sheet, col, row = parse_cell_key("Sheet1!B3") + self.assertEqual(sheet, "Sheet1") + self.assertEqual(col, 1) + self.assertEqual(row, 2) + + def test_parse_cell_key_invalid(self): + sheet, col, row = parse_cell_key("ZZZ") + self.assertIsNone(col) + + # ── read_cell_value ─────────────────────────────────────────────────────── + + def _make_raw(self, row, col, content=None, value=None): + cell = {} + if content is not None: + cell["content"] = content + if value is not None: + cell["value"] = value + # Convert 0-based (col, row) to "A1" format cell address + col_str = chr(ord("A") + col) + cell_addr = f"{col_str}{row + 1}" + return { + "sheets": [ + { + "id": "s1", + "name": "Sheet1", + "cells": {cell_addr: cell}, + } + ] + } + + def test_read_cell_value_numeric(self): + raw = self._make_raw(2, 1, content="42") # B3 + val = read_cell_value(raw, "B3") + self.assertEqual(val, "42") + + def test_read_cell_value_uses_evaluated_value(self): + raw = self._make_raw(2, 1, content="=1+1", value=2) + val = read_cell_value(raw, "B3") + self.assertEqual(val, 2) + + def test_read_cell_value_string(self): + raw = self._make_raw(0, 0, content="hello") # A1 + val = read_cell_value(raw, "A1") + self.assertEqual(val, "hello") + + def test_read_cell_value_missing(self): + raw = {"sheets": [{"id": "s1", "name": "Sheet1", "cells": {}}]} + val = read_cell_value(raw, "B3") + self.assertIsNone(val) + + def test_read_cell_value_empty_raw(self): + self.assertIsNone(read_cell_value({}, "B3")) + self.assertIsNone(read_cell_value(None, "B3")) + + # ── write_cell_content ──────────────────────────────────────────────────── + + def test_write_cell_content(self): + raw = {"sheets": [{"id": "s1", "name": "Sheet1", "cells": {}}]} + result = write_cell_content(raw, "B3", "hello") + cell = result["sheets"][0]["cells"]["B3"] + self.assertEqual(cell["content"], "hello") + + def test_write_cell_content_creates_nested_dicts(self): + raw = {"sheets": [{"id": "s1", "name": "Sheet1"}]} + result = write_cell_content(raw, "A1", 99) + self.assertIn("cells", result["sheets"][0]) + + +class TestInputParam(TransactionCase): + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.spreadsheet = cls.env["spreadsheet.spreadsheet"].create( + {"name": "Input Param Test Spreadsheet"} + ) + # Minimal spreadsheet raw with a known cell value at B3. + cls.raw_with_b3 = { + "version": 1, + "sheets": [ + { + "id": "s1", + "name": "Sheet1", + "cells": { + "B3": {"content": "2026-01-01"}, + }, + } + ], + } + + def _make_param(self, **kwargs): + defaults = { + "name": "start_date", + "spreadsheet_id": self.spreadsheet.id, + "cell_ref": "B3", + } + defaults.update(kwargs) + return self.env["spreadsheet.input_param"].create(defaults) + + # ── Basic creation ──────────────────────────────────────────────────────── + + def test_create_param(self): + p = self._make_param() + self.assertEqual(p.name, "start_date") + self.assertEqual(p.spreadsheet_id, self.spreadsheet) + self.assertEqual(p.cell_ref, "B3") + self.assertTrue(p.active) + self.assertFalse(p.current_value) + self.assertFalse(p.last_synced) + + # ── Constraint: cell_ref ───────────────────────────────────────────────── + + def test_invalid_cell_ref_raises(self): + with self.assertRaises(ValidationError): + self._make_param(cell_ref="ZZZ") # letters only, no row number + + def test_valid_qualified_cell_ref(self): + p = self._make_param(name="q_ref", cell_ref="Assumptions!C5") + self.assertEqual(p.cell_ref, "Assumptions!C5") + + # ── Constraint: name ───────────────────────────────────────────────────── + + def test_invalid_name_raises_uppercase(self): + with self.assertRaises(ValidationError): + self._make_param(name="StartDate") # uppercase forbidden + + def test_invalid_name_raises_spaces(self): + with self.assertRaises(ValidationError): + self._make_param(name="start date") # spaces forbidden + + def test_invalid_name_raises_leading_digit(self): + with self.assertRaises(ValidationError): + self._make_param(name="1param") # must start with letter + + # ── Constraint: unique name per spreadsheet ─────────────────────────────── + + @mute_logger("odoo.sql_db") + def test_duplicate_name_raises(self): + self._make_param(name="dup_param") + # SQL UNIQUE constraint propagates as psycopg2.errors.UniqueViolation + # (IntegrityError subclass). Catch broadly to stay decoupled from + # psycopg2 internals and compatible across Odoo versions. + with self.assertRaises(Exception): # noqa: B017 + self._make_param(name="dup_param") + + # ── Sync from spreadsheet ───────────────────────────────────────────────── + + def test_sync_reads_cell(self): + self.spreadsheet.write({"spreadsheet_raw": self.raw_with_b3}) + p = self._make_param(name="sd") + p._sync_from_spreadsheet() + self.assertEqual(p.current_value, "2026-01-01") + + def test_sync_updates_last_synced(self): + self.spreadsheet.write({"spreadsheet_raw": self.raw_with_b3}) + p = self._make_param(name="sd2") + self.assertFalse(p.last_synced) + p._sync_from_spreadsheet() + self.assertTrue(p.last_synced) + + def test_sync_missing_cell_no_error(self): + """Syncing a cell that doesn't exist: no error, current_value stays None.""" + self.spreadsheet.write( + { + "spreadsheet_raw": { + "sheets": [{"id": "s1", "name": "Sheet1", "cells": {}}] + } + } + ) + p = self._make_param(name="sd3", cell_ref="Z99") + p._sync_from_spreadsheet() + self.assertFalse(p.current_value) + self.assertTrue(p.last_synced) + + def test_action_sync_now(self): + """action_sync_now() is a thin wrapper that calls _sync_from_spreadsheet.""" + self.spreadsheet.write({"spreadsheet_raw": self.raw_with_b3}) + p = self._make_param(name="sd_action") + p.action_sync_now() + self.assertEqual(p.current_value, "2026-01-01") + + def test_sync_all_for_spreadsheet(self): + """_sync_all_for_spreadsheet syncs all active params for a given spreadsheet.""" + self.spreadsheet.write({"spreadsheet_raw": self.raw_with_b3}) + p1 = self._make_param(name="sa_p1") + p2 = self._make_param(name="sa_p2", cell_ref="B3") + p3 = self._make_param( + name="sa_p3", active=False + ) # archived — should be skipped + self.env["spreadsheet.input_param"]._sync_all_for_spreadsheet( + self.spreadsheet.id + ) + p1.invalidate_recordset() + p2.invalidate_recordset() + p3.invalidate_recordset() + self.assertEqual(p1.current_value, "2026-01-01") + self.assertEqual(p2.current_value, "2026-01-01") + self.assertFalse(p3.current_value) # archived param not synced + + # ── Domain template substitution ───────────────────────────────────────── + + def test_param_substitution_simple(self): + domain = [("date", ">=", "%(start_date)s")] + params = {"start_date": "2026-01-01"} + result = _apply_param_substitution(domain, params) + self.assertEqual(result, [("date", ">=", "2026-01-01")]) + + def test_param_substitution_no_match(self): + """Domain without tokens is returned unchanged.""" + domain = [("state", "=", "done")] + result = _apply_param_substitution(domain, {"start_date": "2026-01-01"}) + self.assertEqual(result, [("state", "=", "done")]) + + def test_param_substitution_nested(self): + """Operator strings and nested lists are handled correctly.""" + domain = ["&", ("date", ">=", "%(start)s"), ("date", "<=", "%(end)s")] + params = {"start": "2026-01-01", "end": "2026-12-31"} + result = _apply_param_substitution(domain, params) + self.assertEqual( + result, + ["&", ("date", ">=", "2026-01-01"), ("date", "<=", "2026-12-31")], + ) + + @mute_logger("odoo.addons.spreadsheet_oca.models.spreadsheet_refresh_schedule") + def test_param_substitution_unknown_param_no_error(self): + """Unknown param name logs a warning but does not raise.""" + domain = [("date", ">=", "%(missing)s")] + # Should not raise — logs a warning and leaves token in place. + result = _apply_param_substitution(domain, {}) + self.assertEqual(result, [("date", ">=", "%(missing)s")]) + + # ── Smart button count ──────────────────────────────────────────────────── + + def test_smart_button_count(self): + other_ss = self.env["spreadsheet.spreadsheet"].create({"name": "Other SS"}) + self._make_param(name="cnt_a") + p2 = self._make_param(name="cnt_b") + self.spreadsheet.invalidate_recordset() + self.assertEqual(self.spreadsheet.input_param_count, 2) + # Archiving reduces count. + p2.write({"active": False}) + self.spreadsheet.invalidate_recordset() + self.assertEqual(self.spreadsheet.input_param_count, 1) + # Other spreadsheet count unaffected. + self.assertEqual(other_ss.input_param_count, 0) + + # ── Action open input params ────────────────────────────────────────────── + + def test_action_open_input_params(self): + action = self.spreadsheet.action_open_input_params() + self.assertEqual(action["type"], "ir.actions.act_window") + self.assertEqual(action["res_model"], "spreadsheet.input_param") + self.assertIn(("spreadsheet_id", "=", self.spreadsheet.id), action["domain"]) + + # ── Full refresh-cycle integration ──────────────────────────────────────── + + def test_refresh_uses_params(self): + """Param is synced and substituted in the domain during a refresh cycle.""" + self.spreadsheet.write({"spreadsheet_raw": self.raw_with_b3}) + param = self._make_param(name="filter_name") + # Pivot domain references the param. + raw = { + "version": 1, + "sheets": [ + { + "id": "s1", + "name": "Sheet1", + "cells": {"B3": {"content": "Administrator"}}, + } + ], + "pivots": { + "1": { + "type": "ODOO", + "model": "res.partner", + "name": "Partners", + "domain": [("name", "=", "%(filter_name)s")], + "rows": [], + "columns": [], + "measures": [{"fieldName": "id", "aggregator": "count"}], + } + }, + } + self.spreadsheet.write({"spreadsheet_raw": raw}) + schedule = self.env["spreadsheet.refresh.schedule"].create( + { + "name": "Integration Test", + "spreadsheet_id": self.spreadsheet.id, + } + ) + schedule._run_refresh() + # Param should have been synced. + param.invalidate_recordset() + self.assertEqual(param.current_value, "Administrator") + self.assertTrue(schedule.last_run) + + # ── Controller endpoint logic ───────────────────────────────────────────── + + def test_controller_returns_param_dict(self): + """The endpoint returns {name: current_value} for active params.""" + other_ss = self.env["spreadsheet.spreadsheet"].create({"name": "Ctrl SS"}) + p = self.env["spreadsheet.input_param"].create( + { + "name": "ctrl_param", + "spreadsheet_id": other_ss.id, + "cell_ref": "A1", + } + ) + p.write({"current_value": "test_value"}) + # Simulate what the controller does (model-level, no HTTP layer). + params = self.env["spreadsheet.input_param"].search( + [ + ("spreadsheet_id", "=", other_ss.id), + ("active", "=", True), + ] + ) + result = {q.name: q.current_value for q in params} + self.assertEqual(result, {"ctrl_param": "test_value"}) diff --git a/spreadsheet_oca/tests/test_pivot_data.py b/spreadsheet_oca/tests/test_pivot_data.py new file mode 100644 index 00000000..c569ec5e --- /dev/null +++ b/spreadsheet_oca/tests/test_pivot_data.py @@ -0,0 +1,179 @@ +# Copyright 2025 Ledo Enterprises LLC +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). +""" +Tests for server-side pivot data computation. + +These tests verify that _get_pivot_data() produces the same grouping +structure that the Odoo JS PivotModel would produce via read_group. + +Run against odoo_test (which has sale, account installed): + docker exec -i odoo-prod odoo test -d odoo_test \ + --test-tags spreadsheet_oca.TestPivotData --stop-after-init +""" + +from odoo.tests import TransactionCase + +from ..models.pivot_data import _dimension_to_groupby, _get_pivot_data, _sections + + +class TestPivotDataHelpers(TransactionCase): + """Unit tests for the pure-Python helpers (no DB needed).""" + + def test_sections_empty(self): + self.assertEqual(_sections([]), [[]]) + + def test_sections_one(self): + self.assertEqual(_sections(["a"]), [[], ["a"]]) + + def test_sections_two(self): + self.assertEqual(_sections(["a", "b"]), [[], ["a"], ["a", "b"]]) + + def test_dimension_no_granularity(self): + self.assertEqual( + _dimension_to_groupby({"fieldName": "partner_id"}), "partner_id" + ) + + def test_dimension_with_granularity(self): + self.assertEqual( + _dimension_to_groupby({"fieldName": "date_order", "granularity": "month"}), + "date_order:month", + ) + + +class TestPivotData(TransactionCase): + """Integration tests using res.partner (always available, no demo needed).""" + + @classmethod + def setUpClass(cls): + super().setUpClass() + # Create a handful of partners in different countries to group by + cls.country_be = cls.env.ref("base.be") + cls.country_us = cls.env.ref("base.us") + cls.partners = cls.env["res.partner"].create( + [ + {"name": "Alpha", "country_id": cls.country_be.id, "is_company": True}, + {"name": "Beta", "country_id": cls.country_be.id, "is_company": True}, + {"name": "Gamma", "country_id": cls.country_us.id, "is_company": True}, + {"name": "Delta", "country_id": cls.country_us.id, "is_company": False}, + ] + ) + cls.domain = [("id", "in", cls.partners.ids)] + + # ── Helpers ───────────────────────────────────────────────────────────── + + def _run(self, row_dims, col_dims, measures): + return _get_pivot_data( + self.env, + "res.partner", + self.domain, + {}, + row_dims, + col_dims, + measures, + ) + + def _groups_for(self, result, row_prefix, col_prefix): + """Return groups matching the given row/col groupby prefix.""" + return [ + g + for g in result["groups"] + if g["rowGroupBy"] == row_prefix and g["colGroupBy"] == col_prefix + ] + + # ── Grand-total (no groupby) ──────────────────────────────────────────── + + def test_grand_total_count(self): + """With no dims, one group with count = number of partners.""" + result = self._run([], [], [{"fieldName": "__count"}]) + groups = self._groups_for(result, [], []) + self.assertEqual(len(groups), 1) + self.assertEqual(groups[0]["count"], 4) + + # ── Single row groupby ────────────────────────────────────────────────── + + def test_row_groupby_country(self): + """Row groupby country_id → one group per country + grand total.""" + row_dims = [{"fieldName": "country_id"}] + result = self._run(row_dims, [], [{"fieldName": "__count"}]) + + # Grand total (rowGroupBy=[], colGroupBy=[]) + totals = self._groups_for(result, [], []) + self.assertEqual(len(totals), 1) + self.assertEqual(totals[0]["count"], 4) + + # Per-country groups (rowGroupBy=["country_id"], colGroupBy=[]) + country_groups = self._groups_for(result, ["country_id"], []) + self.assertEqual(len(country_groups), 2) + counts_by_country = {g["rowValues"][0]: g["count"] for g in country_groups} + self.assertEqual(counts_by_country[self.country_be.id], 2) + self.assertEqual(counts_by_country[self.country_us.id], 2) + + # ── Row + col groupby ─────────────────────────────────────────────────── + + def test_row_and_col_groupby(self): + """Row=country_id, Col=is_company → 2×2 cell values.""" + row_dims = [{"fieldName": "country_id"}] + col_dims = [{"fieldName": "is_company"}] + result = self._run(row_dims, col_dims, [{"fieldName": "__count"}]) + + # Divisors: ([], []) ([], [is_company]) + # ([country_id], []) ([country_id], [is_company]) + # → 4 divisors, each producing N read_group rows + divisor_keys = { + (tuple(g["rowGroupBy"]), tuple(g["colGroupBy"])) for g in result["groups"] + } + self.assertIn(((), ()), divisor_keys) + self.assertIn(((), ("is_company",)), divisor_keys) + self.assertIn((("country_id",), ()), divisor_keys) + self.assertIn((("country_id",), ("is_company",)), divisor_keys) + + # BE / is_company=True → Alpha + Beta = 2 + cell_groups = self._groups_for(result, ["country_id"], ["is_company"]) + be_company = [ + g + for g in cell_groups + if g["rowValues"] == [self.country_be.id] and g["colValues"] == [True] + ] + self.assertEqual(len(be_company), 1) + self.assertEqual(be_company[0]["count"], 2) + + # US / is_company=False → Delta = 1 + us_individual = [ + g + for g in cell_groups + if g["rowValues"] == [self.country_us.id] and g["colValues"] == [False] + ] + self.assertEqual(len(us_individual), 1) + self.assertEqual(us_individual[0]["count"], 1) + + # ── Return structure ──────────────────────────────────────────────────── + + def test_return_fields_metadata(self): + """Result includes fields metadata for all used fields.""" + row_dims = [{"fieldName": "country_id"}] + result = self._run(row_dims, [], [{"fieldName": "__count"}]) + self.assertIn("country_id", result["fields"]) + self.assertEqual(result["fields"]["country_id"]["type"], "many2one") + + def test_return_dimensions_and_specs(self): + """Result echoes back row/col dims and measure specs.""" + row_dims = [{"fieldName": "country_id"}] + measures = [{"fieldName": "__count"}] + result = self._run(row_dims, [], measures) + self.assertEqual(result["rowDimensions"], row_dims) + self.assertEqual(result["colDimensions"], []) + self.assertEqual(result["measureSpecs"], ["__count"]) + + # ── Domain filtering ──────────────────────────────────────────────────── + + def test_domain_filters_correctly(self): + """Domain restricts records — only BE partners.""" + be_domain = [ + ("id", "in", self.partners.ids), + ("country_id", "=", self.country_be.id), + ] + result = _get_pivot_data( + self.env, "res.partner", be_domain, {}, [], [], [{"fieldName": "__count"}] + ) + totals = self._groups_for(result, [], []) + self.assertEqual(totals[0]["count"], 2) diff --git a/spreadsheet_oca/tests/test_refresh_schedule.py b/spreadsheet_oca/tests/test_refresh_schedule.py new file mode 100644 index 00000000..ff354aa1 --- /dev/null +++ b/spreadsheet_oca/tests/test_refresh_schedule.py @@ -0,0 +1,233 @@ +# Copyright 2025 Ledo Enterprises LLC +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). +""" +Tests for spreadsheet.refresh.schedule (scheduled cron refresh). + +These tests verify: + - Schedule creation and cron lifecycle (activate / deactivate / run now). + - _run_refresh() correctly reads pivot defs from spreadsheet_raw, + calls _get_pivot_data(), and posts a Chatter message. + - Graceful handling of edge cases (no pivots, unknown model). + - HTML renderer produces non-empty output. + - Smart button count on spreadsheet.spreadsheet. + +Run: + docker exec -i odoo-prod odoo test -d odoo_test \\ + --test-tags spreadsheet_oca.TestRefreshSchedule --stop-after-init +""" + +from odoo.tests import TransactionCase +from odoo.tools import mute_logger + + +class TestRefreshSchedule(TransactionCase): + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.spreadsheet = cls.env["spreadsheet.spreadsheet"].create( + {"name": "Test Spreadsheet"} + ) + cls.partner = cls.env["res.partner"].create({"name": "Test Subscriber"}) + + # ── Schedule creation ──────────────────────────────────────────────────── + + def _make_schedule(self, **kwargs): + defaults = { + "name": "Weekly Refresh", + "spreadsheet_id": self.spreadsheet.id, + "interval_number": 1, + "interval_type": "weeks", + } + defaults.update(kwargs) + return self.env["spreadsheet.refresh.schedule"].create(defaults) + + def test_create_schedule(self): + schedule = self._make_schedule() + self.assertEqual(schedule.spreadsheet_id, self.spreadsheet) + self.assertFalse(schedule.cron_id) + self.assertFalse(schedule.last_run) + + def test_activate_creates_cron(self): + schedule = self._make_schedule() + schedule.action_activate() + self.assertTrue(schedule.cron_id) + self.assertTrue(schedule.cron_id.active) + self.assertEqual(schedule.cron_id.interval_number, 1) + self.assertEqual(schedule.cron_id.interval_type, "weeks") + + def test_activate_twice_reuses_cron(self): + schedule = self._make_schedule() + schedule.action_activate() + cron_id_first = schedule.cron_id.id + schedule.action_deactivate() + schedule.action_activate() + self.assertEqual(schedule.cron_id.id, cron_id_first) + + def test_deactivate_pauses_cron(self): + schedule = self._make_schedule() + schedule.action_activate() + schedule.action_deactivate() + self.assertFalse(schedule.cron_id.active) + + def test_unlink_removes_cron(self): + schedule = self._make_schedule() + schedule.action_activate() + cron = schedule.cron_id + schedule.unlink() + self.assertFalse(cron.exists()) + + # ── _run_refresh — no pivots ───────────────────────────────────────────── + + def test_run_refresh_no_pivots(self): + """Spreadsheet with no pivots: last_run is still updated.""" + schedule = self._make_schedule() + before = self.env["mail.message"].search_count( + [ + ("res_id", "=", self.spreadsheet.id), + ("model", "=", "spreadsheet.spreadsheet"), + ] + ) + schedule._run_refresh() + self.assertTrue(schedule.last_run) + # A chatter note is NOT posted when there are no pivots + after = self.env["mail.message"].search_count( + [ + ("res_id", "=", self.spreadsheet.id), + ("model", "=", "spreadsheet.spreadsheet"), + ] + ) + self.assertEqual(before, after) + + # ── _run_refresh — with pivot data ─────────────────────────────────────── + + def _set_pivot_raw(self, pivot_def): + """Write a minimal spreadsheet_raw JSON with one ODOO pivot.""" + raw = { + "version": 1, + "sheets": [{"id": "sheet1", "name": "Sheet1"}], + "pivots": { + "1": pivot_def, + }, + } + self.spreadsheet.write({"spreadsheet_raw": raw}) + + def test_run_refresh_with_valid_pivot(self): + """Valid res.partner pivot → posts a Chatter note with HTML body.""" + country_be = self.env.ref("base.be") + partners = self.env["res.partner"].create( + [ + {"name": "A", "country_id": country_be.id, "is_company": True}, + {"name": "B", "country_id": country_be.id, "is_company": True}, + ] + ) + self._set_pivot_raw( + { + "type": "ODOO", + "model": "res.partner", + "domain": [("id", "in", partners.ids)], + "context": {}, + "rows": [{"fieldName": "country_id"}], + "columns": [], + "measures": [{"fieldName": "__count"}], + "name": "Partner Pivot", + } + ) + schedule = self._make_schedule(notify_partner_ids=[self.partner.id]) + msg_count_before = self.env["mail.message"].search_count( + [ + ("res_id", "=", self.spreadsheet.id), + ("model", "=", "spreadsheet.spreadsheet"), + ] + ) + schedule._run_refresh() + self.assertTrue(schedule.last_run) + msg_count_after = self.env["mail.message"].search_count( + [ + ("res_id", "=", self.spreadsheet.id), + ("model", "=", "spreadsheet.spreadsheet"), + ] + ) + self.assertGreater( + msg_count_after, msg_count_before, "Expected a Chatter message" + ) + # Verify HTML content + msg = self.env["mail.message"].search( + [ + ("res_id", "=", self.spreadsheet.id), + ("model", "=", "spreadsheet.spreadsheet"), + ], + order="id desc", + limit=1, + ) + self.assertIn("Partner Pivot", msg.body) + self.assertIn("res.partner", msg.body) + + @mute_logger("odoo.addons.spreadsheet_oca.models.pivot_data") + def test_run_refresh_unknown_model_skipped(self): + """Pivot with an unknown model is skipped; last_run still set.""" + self._set_pivot_raw( + { + "type": "ODOO", + "model": "nonexistent.model.xyz", + "domain": [], + "context": {}, + "rows": [], + "columns": [], + "measures": [{"fieldName": "__count"}], + "name": "Bad Pivot", + } + ) + schedule = self._make_schedule() + schedule._run_refresh() + self.assertTrue(schedule.last_run) + + def test_run_refresh_non_odoo_pivot_skipped(self): + """Non-ODOO type pivot is ignored.""" + raw = { + "version": 1, + "sheets": [{"id": "sheet1", "name": "Sheet1"}], + "pivots": { + "1": {"type": "STATIC", "model": "res.partner"}, + }, + } + self.spreadsheet.write({"spreadsheet_raw": raw}) + schedule = self._make_schedule() + schedule._run_refresh() + self.assertTrue(schedule.last_run) + + # ── HTML renderer ──────────────────────────────────────────────────────── + + def test_render_refresh_html_empty(self): + html = self.env["spreadsheet.refresh.schedule"]._render_refresh_html([]) + self.assertIn("No ODOO pivot", html) + + def test_render_refresh_html_with_data(self): + result = { + "groups": [ + { + "rowGroupBy": [], + "colGroupBy": [], + "rowValues": [], + "colValues": [], + "count": 42, + "measures": {"__count": 42}, + } + ], + "rowDimensions": [], + "colDimensions": [], + "measureSpecs": ["__count"], + } + summaries = [{"name": "My Pivot", "model": "res.partner", "result": result}] + html = self.env["spreadsheet.refresh.schedule"]._render_refresh_html(summaries) + self.assertIn("My Pivot", html) + self.assertIn("res.partner", html) + self.assertIn("42", html) + + # ── Smart button count ─────────────────────────────────────────────────── + + def test_smart_button_count(self): + self.assertEqual(self.spreadsheet.refresh_schedule_count, 0) + self._make_schedule() + self._make_schedule(name="Daily", interval_type="days") + self.spreadsheet.invalidate_recordset() + self.assertEqual(self.spreadsheet.refresh_schedule_count, 2) diff --git a/spreadsheet_oca/tests/test_scenario.py b/spreadsheet_oca/tests/test_scenario.py new file mode 100644 index 00000000..80e9c25a --- /dev/null +++ b/spreadsheet_oca/tests/test_scenario.py @@ -0,0 +1,301 @@ +# Copyright 2025 Ledo Enterprises LLC +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). +""" +Tests for spreadsheet.scenario (named scenarios / what-if manager). +""" + +import json + +from odoo.exceptions import ValidationError +from odoo.tests import TransactionCase + + +class TestSpreadsheetScenario(TransactionCase): + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.spreadsheet = cls.env["spreadsheet.spreadsheet"].create( + {"name": "Scenario Test Spreadsheet"} + ) + # Minimal valid o-spreadsheet raw JSON with two sheets. + cls.raw_with_sheets = { + "version": 1, + "sheets": [ + { + "id": "sheet1", + "name": "Sheet1", + "cells": { + "B3": {"content": "50000"}, + }, + }, + { + "id": "sheet2", + "name": "Assumptions", + "cells": { + "C5": {"content": "0.08"}, + }, + }, + ], + } + + def _make_scenario(self, **kwargs): + defaults = { + "name": "Base Case", + "spreadsheet_id": self.spreadsheet.id, + } + defaults.update(kwargs) + return self.env["spreadsheet.scenario"].create(defaults) + + # ── Basic create ────────────────────────────────────────────────────────── + + def test_create_scenario(self): + scenario = self._make_scenario(name="Optimistic") + self.assertEqual(scenario.name, "Optimistic") + self.assertEqual(scenario.spreadsheet_id, self.spreadsheet) + self.assertFalse(scenario.is_base) + self.assertTrue(scenario.active) + self.assertFalse(scenario.cell_overrides) + + # ── override_count ──────────────────────────────────────────────────────── + + def test_override_count(self): + overrides = json.dumps({"B3": 125000, "C5": 0.15, "D7": "hello"}) + scenario = self._make_scenario(cell_overrides=overrides) + self.assertEqual(scenario.override_count, 3) + + def test_override_count_empty(self): + scenario = self._make_scenario() + self.assertEqual(scenario.override_count, 0) + + def test_override_count_blank_string(self): + scenario = self._make_scenario(cell_overrides=" ") + self.assertEqual(scenario.override_count, 0) + + # ── Validation: JSON format ─────────────────────────────────────────────── + + def test_invalid_json_raises(self): + with self.assertRaises(ValidationError): + self._make_scenario(cell_overrides="this is not json!@#") + + def test_valid_json_non_dict_raises(self): + """Top-level JSON array should be rejected.""" + with self.assertRaises(ValidationError): + self._make_scenario(cell_overrides='["B3", "C5"]') + + def test_valid_json_accepted(self): + overrides = json.dumps({"B3": 100, "C5": 0.2, "D7": None, "E9": True}) + scenario = self._make_scenario(cell_overrides=overrides) + self.assertEqual(scenario.override_count, 4) + + # ── Validation: cell ref format ─────────────────────────────────────────── + + def test_invalid_cell_ref_raises(self): + """Keys that don't match the cell reference pattern must be rejected.""" + with self.assertRaises(ValidationError): + self._make_scenario(cell_overrides=json.dumps({"NOTACELL": 1})) + + def test_invalid_cell_ref_row_zero_raises(self): + """Row numbers must be >= 1.""" + with self.assertRaises(ValidationError): + self._make_scenario(cell_overrides=json.dumps({"B0": 1})) + + def test_invalid_cell_ref_no_letters_raises(self): + """Pure digit keys like '123' must be rejected.""" + with self.assertRaises(ValidationError): + self._make_scenario(cell_overrides=json.dumps({"123": 1})) + + def test_bare_cell_ref_accepted(self): + """'B3' without sheet prefix is valid.""" + scenario = self._make_scenario(cell_overrides=json.dumps({"B3": 99})) + self.assertEqual(scenario.override_count, 1) + + def test_sheet_prefixed_cell_ref_accepted(self): + """'Sheet1!B3' format is valid.""" + scenario = self._make_scenario( + cell_overrides=json.dumps({"Sheet1!B3": 125000, "Assumptions!C5": 0.12}) + ) + self.assertEqual(scenario.override_count, 2) + + def test_invalid_value_type_raises(self): + """Dict values must be int/float/str/bool/None. A list is not allowed.""" + with self.assertRaises(ValidationError): + self._make_scenario(cell_overrides=json.dumps({"B3": [1, 2, 3]})) + + # ── Constraint: single base per spreadsheet ─────────────────────────────── + + def test_duplicate_base_raises(self): + """Two is_base=True scenarios on the same spreadsheet must raise.""" + self._make_scenario(name="Base v1", is_base=True) + with self.assertRaises(ValidationError): + self._make_scenario(name="Base v2", is_base=True) + + def test_two_non_base_scenarios_allowed(self): + """Multiple non-base scenarios on the same spreadsheet are fine.""" + s1 = self._make_scenario(name="Optimistic") + s2 = self._make_scenario(name="Pessimistic") + self.assertTrue(s1.id) + self.assertTrue(s2.id) + + def test_base_on_different_spreadsheets_allowed(self): + """Each spreadsheet can have its own base scenario.""" + other = self.env["spreadsheet.spreadsheet"].create({"name": "Other Sheet"}) + s1 = self._make_scenario(name="Base for main", is_base=True) + s2 = self.env["spreadsheet.scenario"].create( + { + "name": "Base for other", + "spreadsheet_id": other.id, + "is_base": True, + } + ) + self.assertTrue(s1.is_base) + self.assertTrue(s2.is_base) + + # ── action_apply_to_copy ────────────────────────────────────────────────── + + def test_apply_to_copy_creates_new_spreadsheet(self): + """action_apply_to_copy returns an act_window and creates a new record.""" + self.spreadsheet.write({"spreadsheet_raw": self.raw_with_sheets}) + scenario = self._make_scenario( + name="High Revenue", + cell_overrides=json.dumps({"B3": 200000}), + ) + count_before = self.env["spreadsheet.spreadsheet"].search_count([]) + action = scenario.action_apply_to_copy() + count_after = self.env["spreadsheet.spreadsheet"].search_count([]) + + self.assertEqual(action["type"], "ir.actions.act_window") + self.assertEqual(count_after, count_before + 1) + + def test_apply_to_copy_correct_name(self): + """The new spreadsheet should include both parent and scenario names.""" + self.spreadsheet.write({"spreadsheet_raw": self.raw_with_sheets}) + scenario = self._make_scenario(name="Pessimistic") + action = scenario.action_apply_to_copy() + new_id = action["res_id"] + new_sheet = self.env["spreadsheet.spreadsheet"].browse(new_id) + self.assertIn("Scenario Test Spreadsheet", new_sheet.name) + self.assertIn("Pessimistic", new_sheet.name) + + def test_apply_to_copy_writes_cell(self): + """The new spreadsheet raw has the correct cell value from the override.""" + self.spreadsheet.write({"spreadsheet_raw": self.raw_with_sheets}) + # B3 → col_idx=1, row_idx=2 + scenario = self._make_scenario( + name="Override B3", + cell_overrides=json.dumps({"B3": 999999}), + ) + action = scenario.action_apply_to_copy() + new_id = action["res_id"] + new_sheet = self.env["spreadsheet.spreadsheet"].browse(new_id) + new_raw = new_sheet.spreadsheet_raw + cell_content = new_raw["sheets"][0]["cells"]["B3"]["content"] + self.assertEqual(cell_content, "999999") + + def test_apply_to_copy_with_sheet_prefix(self): + """'Sheet1!B3' prefix routes the override to the correct sheet.""" + self.spreadsheet.write({"spreadsheet_raw": self.raw_with_sheets}) + # Assumptions!C5 → sheet "Assumptions", col_idx=2, row_idx=4 + scenario = self._make_scenario( + name="Rate Change", + cell_overrides=json.dumps({"Assumptions!C5": 0.12}), + ) + action = scenario.action_apply_to_copy() + new_id = action["res_id"] + new_sheet = self.env["spreadsheet.spreadsheet"].browse(new_id) + new_raw = new_sheet.spreadsheet_raw + # Assumptions is sheets[1] + cell_content = new_raw["sheets"][1]["cells"]["C5"]["content"] + self.assertEqual(cell_content, "0.12") + + def test_apply_does_not_modify_original(self): + """action_apply_to_copy must leave the source spreadsheet_raw unchanged.""" + self.spreadsheet.write({"spreadsheet_raw": self.raw_with_sheets}) + original_raw = dict(self.spreadsheet.spreadsheet_raw) + original_cell = self.spreadsheet.spreadsheet_raw["sheets"][0]["cells"]["B3"][ + "content" + ] + scenario = self._make_scenario( + name="Change B3", + cell_overrides=json.dumps({"B3": 777}), + ) + scenario.action_apply_to_copy() + self.spreadsheet.invalidate_recordset() + after_cell = self.spreadsheet.spreadsheet_raw["sheets"][0]["cells"]["B3"][ + "content" + ] + self.assertEqual(original_cell, after_cell) + # Sheet count must not have changed + self.assertEqual( + len(original_raw.get("sheets", [])), + len(self.spreadsheet.spreadsheet_raw.get("sheets", [])), + ) + + def test_apply_to_copy_creates_cell_if_missing(self): + """Override of a cell not in the raw JSON should create it.""" + self.spreadsheet.write({"spreadsheet_raw": self.raw_with_sheets}) + # Z99 does not exist in raw_with_sheets + scenario = self._make_scenario( + name="New Cell", + cell_overrides=json.dumps({"Z99": 42}), + ) + action = scenario.action_apply_to_copy() + new_id = action["res_id"] + new_sheet = self.env["spreadsheet.spreadsheet"].browse(new_id) + new_raw = new_sheet.spreadsheet_raw + cell_content = new_raw["sheets"][0]["cells"]["Z99"]["content"] + self.assertEqual(cell_content, "42") + + def test_apply_to_copy_no_overrides(self): + """A scenario with no overrides still creates a copy successfully.""" + self.spreadsheet.write({"spreadsheet_raw": self.raw_with_sheets}) + scenario = self._make_scenario(name="Empty Scenario") + count_before = self.env["spreadsheet.spreadsheet"].search_count([]) + action = scenario.action_apply_to_copy() + count_after = self.env["spreadsheet.spreadsheet"].search_count([]) + self.assertEqual(action["type"], "ir.actions.act_window") + self.assertEqual(count_after, count_before + 1) + + # ── action_export_comparison ────────────────────────────────────────────── + + def test_export_comparison_returns_notification(self): + """action_export_comparison returns a display_notification action.""" + self.spreadsheet.write({"spreadsheet_raw": self.raw_with_sheets}) + scenario = self._make_scenario( + name="Compare Me", + cell_overrides=json.dumps({"B3": 150000}), + ) + action = scenario.action_export_comparison() + self.assertEqual(action["type"], "ir.actions.client") + self.assertEqual(action["tag"], "display_notification") + self.assertIn("params", action) + + def test_export_comparison_no_overrides(self): + """Comparison with no overrides returns an informational notification.""" + scenario = self._make_scenario(name="Empty") + action = scenario.action_export_comparison() + self.assertEqual(action["type"], "ir.actions.client") + self.assertEqual(action["tag"], "display_notification") + self.assertEqual(action["params"]["type"], "info") + + # ── Smart button / scenario_count ───────────────────────────────────────── + + def test_smart_button_count(self): + """scenario_count on spreadsheet reflects active scenario count.""" + self.assertEqual(self.spreadsheet.scenario_count, 0) + self._make_scenario(name="S1") + self._make_scenario(name="S2") + self._make_scenario(name="S3 Archived", active=False) + self.spreadsheet.invalidate_recordset() + self.assertEqual(self.spreadsheet.scenario_count, 2) + + # ── action_open_scenarios ───────────────────────────────────────────────── + + def test_action_open_scenarios(self): + """action_open_scenarios returns an act_window with correct domain.""" + action = self.spreadsheet.action_open_scenarios() + self.assertEqual(action["type"], "ir.actions.act_window") + self.assertEqual(action["res_model"], "spreadsheet.scenario") + self.assertIn(("spreadsheet_id", "=", self.spreadsheet.id), action["domain"]) + self.assertEqual( + action["context"].get("default_spreadsheet_id"), self.spreadsheet.id + ) diff --git a/spreadsheet_oca/tests/test_subscription.py b/spreadsheet_oca/tests/test_subscription.py new file mode 100644 index 00000000..1c3b96b2 --- /dev/null +++ b/spreadsheet_oca/tests/test_subscription.py @@ -0,0 +1,216 @@ +# Copyright 2025 Ledo Enterprises LLC +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). +""" +Tests for spreadsheet.subscription (dashboard subscriptions). +""" + +from datetime import timedelta + +from psycopg2 import IntegrityError + +from odoo import fields +from odoo.tests import TransactionCase +from odoo.tools import mute_logger + + +class TestSpreadsheetSubscription(TransactionCase): + @classmethod + def setUpClass(cls): + super().setUpClass() + # Archive any pre-existing demo subscriptions so they don't + # interfere with cron-based mail-count assertions. + cls.env["spreadsheet.subscription"].search([]).write({"active": False}) + cls.spreadsheet = cls.env["spreadsheet.spreadsheet"].create( + {"name": "Sub Test Spreadsheet"} + ) + cls.partner = cls.env["res.partner"].create( + {"name": "Digest Subscriber", "email": "sub@example.com"} + ) + cls.partner2 = cls.env["res.partner"].create( + {"name": "Second Subscriber", "email": "sub2@example.com"} + ) + + def _make_subscription(self, **kwargs): + defaults = { + "spreadsheet_id": self.spreadsheet.id, + "partner_id": self.partner.id, + "frequency": "weekly", + "include_pivot_data": False, + } + defaults.update(kwargs) + return self.env["spreadsheet.subscription"].create(defaults) + + def _make_raw_with_pivot(self): + """Return a spreadsheet_raw dict containing one ODOO pivot.""" + return { + "version": 1, + "pivots": { + "1": { + "type": "ODOO", + "model": "res.partner", + "name": "Partner Count", + "domain": [], + "context": {}, + "rows": [], + "columns": [], + "measures": [{"fieldName": "__count"}], + } + }, + } + + # ── Creation ────────────────────────────────────────────────────────────── + + def test_create_subscription(self): + sub = self._make_subscription(frequency="daily", include_pivot_data=True) + self.assertEqual(sub.spreadsheet_id, self.spreadsheet) + self.assertEqual(sub.partner_id, self.partner) + self.assertEqual(sub.frequency, "daily") + self.assertTrue(sub.include_pivot_data) + self.assertTrue(sub.active) + self.assertFalse(sub.last_sent) + # Name is auto-computed from spreadsheet + partner + self.assertIn("Sub Test Spreadsheet", sub.name) + self.assertIn("Digest Subscriber", sub.name) + + def test_unique_constraint(self): + self._make_subscription() + with self.assertRaises(IntegrityError), mute_logger("odoo.sql_db"): + # Creating a second subscription for the same spreadsheet + partner + # must raise a DB-level unique constraint violation. + self.env["spreadsheet.subscription"].create( + { + "spreadsheet_id": self.spreadsheet.id, + "partner_id": self.partner.id, + "frequency": "monthly", + } + ) + + # ── _send_digest: mail.mail creation ───────────────────────────────────── + + def test_send_digest_no_pivots(self): + """Digest is sent (last_sent updated) even when there are no pivot sources.""" + sub = self._make_subscription(include_pivot_data=False) + self.assertFalse(sub.last_sent) + sub._send_digest() + self.assertTrue(sub.last_sent) + + def test_send_digest_checks_subject(self): + """Digest email subject contains the spreadsheet name.""" + sub = self._make_subscription(include_pivot_data=False) + # Check rendered HTML directly instead of querying mail.mail + body = sub._render_digest_html(self.spreadsheet.sudo(), []) + self.assertIn(self.spreadsheet.name, body) + + def test_send_digest_with_pivot_data(self): + """Digest with include_pivot_data=True processes pivot data.""" + self.spreadsheet.write({"spreadsheet_raw": self._make_raw_with_pivot()}) + sub = self._make_subscription(include_pivot_data=True) + self.assertFalse(sub.last_sent) + sub._send_digest() + self.assertTrue(sub.last_sent) + + def test_send_digest_include_false(self): + """When include_pivot_data=False, pivot rendering is skipped.""" + self.spreadsheet.write({"spreadsheet_raw": self._make_raw_with_pivot()}) + sub = self._make_subscription(include_pivot_data=False) + # Check rendered HTML directly + body = sub._render_digest_html(self.spreadsheet.sudo(), []) + self.assertNotIn("'}. + """ + if env is None: + env = self.env + + log_vals_base = { + "spreadsheet_id": spreadsheet.id, + "res_model": model, + "record_id": record_id, + "field_name": field_name, + "new_value": str(new_value), + } + + try: + if not spreadsheet.exists(): + return {"error": "Spreadsheet not found."} + + if not spreadsheet.writeback_enabled: + return {"error": "Writeback not enabled for this spreadsheet."} + + try: + spreadsheet.check_access("read") + except AccessError: + return {"error": "Access denied to spreadsheet."} + + if model not in env: + return {"error": f"Model {model!r} is not available."} + + record = env[model].browse(record_id) + if not record.exists(): + return {"error": f"Record {model}({record_id}) not found."} + + try: + record.check_access("write") + except AccessError: + return { + "error": "Access denied: you do not have" + " write access on this record." + } + + old_value = record[field_name] + old_value_str = str(old_value) + + record.write({field_name: new_value}) + + log = ( + env["spreadsheet.writeback.log"] + .sudo() + .create( + dict( + log_vals_base, + old_value=old_value_str, + status="ok", + ) + ) + ) + + spreadsheet.sudo().message_post( + body=( + f"Writeback: field {field_name} on " + f"{model} #{record_id} changed " + f"from {old_value_str}" + f" to {new_value}." + ), + subtype_xmlid="mail.mt_note", + ) + + return { + "success": True, + "old_value": old_value_str, + "new_value": str(new_value), + "log_id": log.id, + } + + except Exception as exc: + try: + env["spreadsheet.writeback.log"].sudo().create( + dict( + log_vals_base, + status="error", + error_message=str(exc)[:255], + ) + ) + except Exception: + _logger.debug("Failed to create writeback error log entry") + return {"error": str(exc)} + + # ── test_writeback_disabled_returns_error ────────────────────────────────── + + def test_writeback_disabled_returns_error(self): + """Controller returns an error dict when writeback_enabled is False.""" + result = self._simulate_writeback( + self.spreadsheet_off, + "res.partner", + self.partner.id, + "name", + "Should Not Happen", + ) + self.assertIn("error", result) + self.assertNotIn("success", result) + self.assertIn("not enabled", result["error"].lower()) + + # ── test_writeback_creates_log ───────────────────────────────────────────── + + def test_writeback_creates_log(self): + """A successful writeback creates a spreadsheet.writeback.log record.""" + log_count_before = self.env["spreadsheet.writeback.log"].search_count( + [("spreadsheet_id", "=", self.spreadsheet.id)] + ) + result = self._simulate_writeback( + self.spreadsheet, + "res.partner", + self.partner.id, + "name", + "Writeback Log Test", + ) + self.assertTrue(result.get("success"), result) + log_count_after = self.env["spreadsheet.writeback.log"].search_count( + [("spreadsheet_id", "=", self.spreadsheet.id)] + ) + self.assertGreater(log_count_after, log_count_before) + + # ── test_writeback_updates_record ────────────────────────────────────────── + + def test_writeback_updates_record(self): + """The target record's field is actually changed after a writeback.""" + new_name = "Updated By Writeback" + result = self._simulate_writeback( + self.spreadsheet, + "res.partner", + self.partner.id, + "name", + new_name, + ) + self.assertTrue(result.get("success"), result) + self.partner.invalidate_recordset() + self.assertEqual(self.partner.name, new_name) + + # ── test_writeback_log_contains_old_value ───────────────────────────────── + + def test_writeback_log_contains_old_value(self): + """The log entry captures the old value before the write.""" + # Set a known starting name + self.partner.write({"name": "Known Old Name"}) + result = self._simulate_writeback( + self.spreadsheet, + "res.partner", + self.partner.id, + "name", + "New Name After Writeback", + ) + self.assertTrue(result.get("success"), result) + self.assertEqual(result["old_value"], "Known Old Name") + + log = self.env["spreadsheet.writeback.log"].browse(result["log_id"]) + self.assertEqual(log.old_value, "Known Old Name") + self.assertEqual(log.field_name, "name") + self.assertEqual(log.status, "ok") + + # ── test_writeback_access_denied ────────────────────────────────────────── + + def test_writeback_access_denied(self): + """ + A user without write access on the target record gets an error dict. + + We patch check_access on the record to raise AccessError, simulating + a restricted user without going through the full ir.rule machinery. + """ + # Create the record as admin + protected_partner = ( + self.env["res.partner"].sudo().create({"name": "Protected Partner"}) + ) + + # Patch check_access to always raise AccessError for this test + with patch.object( + type(protected_partner), + "check_access", + side_effect=AccessError("Access denied"), + ): + result = self._simulate_writeback( + self.spreadsheet, + "res.partner", + protected_partner.id, + "name", + "Should Fail", + ) + + self.assertIn("error", result) + self.assertIn("access denied", result["error"].lower()) + + # ── test_rollback_restores_value ─────────────────────────────────────────── + + def test_rollback_restores_value(self): + """action_rollback_writeback restores the old field value.""" + original_name = "Pre-Rollback Name" + self.partner.write({"name": original_name}) + + result = self._simulate_writeback( + self.spreadsheet, + "res.partner", + self.partner.id, + "name", + "Changed Name", + ) + self.assertTrue(result.get("success"), result) + self.partner.invalidate_recordset() + self.assertEqual(self.partner.name, "Changed Name") + + # Roll back via the spreadsheet model method + self.spreadsheet.action_rollback_writeback(result["log_id"]) + + self.partner.invalidate_recordset() + self.assertEqual(self.partner.name, original_name) + + # ── test_rollback_marks_log_rolled_back ─────────────────────────────────── + + def test_rollback_marks_log_rolled_back(self): + """The log entry status changes to 'rolled_back' after a rollback.""" + self.partner.write({"name": "Before Rollback Mark"}) + result = self._simulate_writeback( + self.spreadsheet, + "res.partner", + self.partner.id, + "name", + "After Write", + ) + self.assertTrue(result.get("success"), result) + + log = self.env["spreadsheet.writeback.log"].browse(result["log_id"]) + self.assertEqual(log.status, "ok") + + self.spreadsheet.action_rollback_writeback(result["log_id"]) + log.invalidate_recordset() + self.assertEqual(log.status, "rolled_back") + + # ── test_smart_button_count ──────────────────────────────────────────────── + + def test_smart_button_count(self): + """writeback_log_count reflects successful log entries.""" + self.spreadsheet.invalidate_recordset() + count_before = self.spreadsheet.writeback_log_count + + # Write once + self.partner.write({"name": "Count Test 1"}) + r1 = self._simulate_writeback( + self.spreadsheet, + "res.partner", + self.partner.id, + "name", + "Count Test 2", + ) + self.assertTrue(r1.get("success"), r1) + + # Write again + r2 = self._simulate_writeback( + self.spreadsheet, + "res.partner", + self.partner.id, + "name", + "Count Test 3", + ) + self.assertTrue(r2.get("success"), r2) + + self.spreadsheet.invalidate_recordset() + self.assertEqual(self.spreadsheet.writeback_log_count, count_before + 2) + + # ── test_writeback_posts_chatter_message ────────────────────────────────── + + def test_writeback_posts_chatter_message(self): + """A chatter note appears on the spreadsheet after a writeback.""" + self.partner.write({"name": "Chatter Test Start"}) + msg_count_before = self.env["mail.message"].search_count( + [ + ("res_id", "=", self.spreadsheet.id), + ("model", "=", "spreadsheet.spreadsheet"), + ] + ) + result = self._simulate_writeback( + self.spreadsheet, + "res.partner", + self.partner.id, + "name", + "Chatter Test End", + ) + self.assertTrue(result.get("success"), result) + + msg_count_after = self.env["mail.message"].search_count( + [ + ("res_id", "=", self.spreadsheet.id), + ("model", "=", "spreadsheet.spreadsheet"), + ] + ) + self.assertGreater(msg_count_after, msg_count_before) + + +# TODO: Add HttpCase tests for the /spreadsheet/writeback JSON-RPC endpoint. +# +# An HttpCase would POST to the controller route directly, verifying the full +# HTTP stack (routing, CSRF, JSON-RPC serialisation, auth). However, this +# requires: +# 1. A logged-in browser session (HttpCase.authenticate + url_open), or +# manually crafting a JSON-RPC request with session cookies. +# 2. The Odoo test HTTP server running (HttpCase spins one up, but it uses +# a separate transaction — test data created in setUpClass is not visible +# unless using ``@tagged("post_install", "-at_install")``) . +# 3. Careful handling of the JSON-RPC envelope ({"jsonrpc": "2.0", "method": +# "call", "params": {...}}) which Odoo's ``type="json"`` routes expect. +# +# For now the model-level tests above cover the business logic; the controller +# is a thin wrapper. A proper HttpCase should be added when the module gets +# integration / end-to-end test infrastructure. diff --git a/spreadsheet_oca/tests/test_xlsx_export.py b/spreadsheet_oca/tests/test_xlsx_export.py new file mode 100644 index 00000000..a8db69b5 --- /dev/null +++ b/spreadsheet_oca/tests/test_xlsx_export.py @@ -0,0 +1,314 @@ +# Copyright 2025 Ledo Enterprises LLC +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). +""" +Tests for headless XLSX export (spreadsheet_xlsx_export). +""" + +import base64 +import io + +import openpyxl + +from odoo.tests import TransactionCase +from odoo.tools import mute_logger + +from ..models.spreadsheet_xlsx_export import ( + SpreadsheetXlsxExporter, + _coerce_value, + _format_measure_name, +) + + +class TestHelpers(TransactionCase): + """Unit tests for pure helper functions.""" + + def test_coerce_value_int(self): + self.assertEqual(_coerce_value("42"), 42) + + def test_coerce_value_float(self): + self.assertAlmostEqual(_coerce_value("3.14"), 3.14) + + def test_coerce_value_float_comma(self): + # Commas stripped as thousands separators: "1,234.56" → 1234.56 + self.assertAlmostEqual(_coerce_value("1,234.56"), 1234.56) + + def test_coerce_value_string(self): + self.assertEqual(_coerce_value("hello"), "hello") + + def test_coerce_value_bool_passthrough(self): + self.assertTrue(_coerce_value(True)) + self.assertFalse(_coerce_value(False)) + + def test_format_measure_name_with_colon(self): + self.assertEqual( + _format_measure_name("amount_total:sum"), + "Amount Total (Sum)", + ) + + def test_format_measure_name_plain(self): + self.assertEqual(_format_measure_name("amount_total"), "Amount Total") + + +class TestXlsxExport(TransactionCase): + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.spreadsheet = cls.env["spreadsheet.spreadsheet"].create( + {"name": "Test Export Sheet"} + ) + + def _load_wb(self, xlsx_bytes): + """Open xlsx bytes as an openpyxl workbook.""" + return openpyxl.load_workbook(io.BytesIO(xlsx_bytes)) + + # ── render() basics ─────────────────────────────────────────────────────── + + def test_render_returns_bytes(self): + xlsx = SpreadsheetXlsxExporter(self.env, self.spreadsheet).render() + self.assertIsInstance(xlsx, bytes) + self.assertGreater(len(xlsx), 0) + + def test_render_empty_spreadsheet_creates_fallback_sheet(self): + self.spreadsheet.write({"spreadsheet_raw": {}}) + wb = self._load_wb(SpreadsheetXlsxExporter(self.env, self.spreadsheet).render()) + self.assertEqual(len(wb.sheetnames), 1) + self.assertEqual(wb.sheetnames[0], "Empty") + + def test_render_static_sheet_values(self): + raw = { + "sheets": [ + { + "id": "s1", + "name": "Summary", + "cells": { + "A1": {"content": "Revenue"}, + "B1": {"content": "12345"}, + "A2": {"content": "Cost"}, + "B2": {"content": "7000"}, + }, + } + ] + } + self.spreadsheet.write({"spreadsheet_raw": raw}) + wb = self._load_wb(SpreadsheetXlsxExporter(self.env, self.spreadsheet).render()) + + self.assertIn("Summary", wb.sheetnames) + ws = wb["Summary"] + self.assertEqual(ws.cell(1, 1).value, "Revenue") + self.assertEqual(ws.cell(1, 2).value, 12345) # numeric coercion + self.assertEqual(ws.cell(2, 1).value, "Cost") + self.assertEqual(ws.cell(2, 2).value, 7000) + + def test_render_formula_cells_written_verbatim(self): + raw = { + "sheets": [ + { + "id": "s1", + "name": "Formulas", + "cells": { + "A1": {"content": "=SUM(B1:B10)"}, + }, + } + ] + } + self.spreadsheet.write({"spreadsheet_raw": raw}) + wb = self._load_wb(SpreadsheetXlsxExporter(self.env, self.spreadsheet).render()) + ws = wb["Formulas"] + # Formula preserved as a string (server can't evaluate it) + self.assertEqual(ws.cell(1, 1).value, "=SUM(B1:B10)") + + def test_render_multiple_static_sheets(self): + raw = { + "sheets": [ + {"id": "s1", "name": "Sheet1", "cells": {"A1": {"content": "A"}}}, + {"id": "s2", "name": "Sheet2", "cells": {"A1": {"content": "B"}}}, + ] + } + self.spreadsheet.write({"spreadsheet_raw": raw}) + wb = self._load_wb(SpreadsheetXlsxExporter(self.env, self.spreadsheet).render()) + self.assertEqual(wb.sheetnames, ["Sheet1", "Sheet2"]) + + @mute_logger("odoo.addons.spreadsheet_oca.models.pivot_data") + def test_render_with_unknown_pivot_model_graceful(self): + """Unknown model in pivot definition should not crash; pivot is skipped.""" + raw = { + "sheets": [{"id": "s1", "name": "Data", "cells": {}}], + "pivots": { + "1": { + "type": "ODOO", + "model": "nonexistent.model.xyz", + "domain": [], + "context": {}, + "rows": [], + "columns": [], + "measures": [], + "name": "Bad Pivot", + } + }, + } + self.spreadsheet.write({"spreadsheet_raw": raw}) + # Should not raise — unknown model is silently skipped + wb = self._load_wb(SpreadsheetXlsxExporter(self.env, self.spreadsheet).render()) + # Only the static sheet, no pivot sheet for the invalid model + self.assertNotIn("Bad Pivot", wb.sheetnames) + self.assertIn("Data", wb.sheetnames) + + def test_render_pivot_non_odoo_type_skipped(self): + """Non-ODOO pivot types are skipped (no extra worksheet).""" + raw = { + "sheets": [{"id": "s1", "name": "Data", "cells": {}}], + "pivots": { + "1": {"type": "CUSTOM", "name": "Custom Pivot"}, + }, + } + self.spreadsheet.write({"spreadsheet_raw": raw}) + wb = self._load_wb(SpreadsheetXlsxExporter(self.env, self.spreadsheet).render()) + # Only the static sheet, no pivot sheet for CUSTOM type + self.assertEqual(wb.sheetnames, ["Data"]) + + def test_render_pivot_grand_total_only(self): + """ + A pivot with no row/col dimensions produces a grand-total table. + Use res.partner (always present) with count measure. + """ + raw = { + "sheets": [{"id": "s1", "name": "Data", "cells": {}}], + "pivots": { + "1": { + "type": "ODOO", + "model": "res.partner", + "domain": [["id", "=", 1]], # narrow domain for speed + "context": {}, + "rows": [], + "columns": [], + "measures": [], + "name": "Partner Count", + } + }, + } + self.spreadsheet.write({"spreadsheet_raw": raw}) + wb = self._load_wb(SpreadsheetXlsxExporter(self.env, self.spreadsheet).render()) + self.assertIn("Partner Count", wb.sheetnames) + + def test_render_pivot_row_breakdown(self): + """Row-only pivot creates a table with header + data rows.""" + raw = { + "sheets": [{"id": "s1", "name": "Data", "cells": {}}], + "pivots": { + "1": { + "type": "ODOO", + "model": "res.partner", + "domain": [["id", "in", [1, 3]]], + "context": {}, + "rows": [{"fieldName": "id", "order": "asc", "type": "integer"}], + "columns": [], + "measures": [], + "name": "By ID", + } + }, + } + self.spreadsheet.write({"spreadsheet_raw": raw}) + # Should not raise; pivot sheet created + wb = self._load_wb(SpreadsheetXlsxExporter(self.env, self.spreadsheet).render()) + self.assertIn("By ID", wb.sheetnames) + + def test_render_pivot_crosstab(self): + """Cross-tab pivot (both row and column groupby) produces correct layout. + + Uses res.partner with row=is_company and col=type, exercising the + _write_crosstab code path. + """ + raw = { + "sheets": [{"id": "s1", "name": "Data", "cells": {}}], + "pivots": { + "1": { + "type": "ODOO", + "model": "res.partner", + "domain": [], + "context": {}, + "rows": [ + {"fieldName": "is_company", "order": "asc", "type": "boolean"} + ], + "columns": [ + {"fieldName": "type", "order": "asc", "type": "selection"} + ], + "measures": [], + "name": "Cross-Tab Test", + } + }, + } + self.spreadsheet.write({"spreadsheet_raw": raw}) + wb = self._load_wb(SpreadsheetXlsxExporter(self.env, self.spreadsheet).render()) + self.assertIn("Cross-Tab Test", wb.sheetnames) + ws = wb["Cross-Tab Test"] + # Row 1 is the title row with the pivot name + self.assertEqual(ws.cell(1, 1).value, "Cross-Tab Test") + # Row 4+ should contain column headers — at minimum the row dimension + # header ("is_company") should appear somewhere in the header area. + header_values = [ws.cell(4, c).value for c in range(1, ws.max_column + 1)] + self.assertIn("is_company", header_values) + + # ── action_export_xlsx ──────────────────────────────────────────────────── + + def test_action_export_xlsx_returns_act_url(self): + self.spreadsheet.write({"spreadsheet_raw": {}}) + action = self.spreadsheet.action_export_xlsx() + self.assertEqual(action["type"], "ir.actions.act_url") + self.assertIn("/web/content/", action["url"]) + self.assertIn("download=true", action["url"]) + + def test_action_export_xlsx_creates_attachment(self): + self.spreadsheet.write({"spreadsheet_raw": {}}) + before = self.env["ir.attachment"].search_count( + [ + ("res_model", "=", "spreadsheet.spreadsheet"), + ("res_id", "=", self.spreadsheet.id), + ] + ) + self.spreadsheet.action_export_xlsx() + after = self.env["ir.attachment"].search_count( + [ + ("res_model", "=", "spreadsheet.spreadsheet"), + ("res_id", "=", self.spreadsheet.id), + ] + ) + self.assertEqual(after, before + 1) + + def test_action_export_xlsx_filename(self): + self.spreadsheet.write({"spreadsheet_raw": {}, "name": "Sales KPI"}) + self.spreadsheet.action_export_xlsx() + att = self.env["ir.attachment"].search( + [ + ("res_model", "=", "spreadsheet.spreadsheet"), + ("res_id", "=", self.spreadsheet.id), + ], + order="id desc", + limit=1, + ) + self.assertEqual(att.name, "Sales KPI.xlsx") + + def test_action_export_xlsx_attachment_is_valid_xlsx(self): + self.spreadsheet.write({"spreadsheet_raw": {}}) + self.spreadsheet.action_export_xlsx() + att = self.env["ir.attachment"].search( + [ + ("res_model", "=", "spreadsheet.spreadsheet"), + ("res_id", "=", self.spreadsheet.id), + ], + order="id desc", + limit=1, + ) + xlsx_bytes = base64.b64decode(att.datas) + # Should open as a valid workbook + wb = self._load_wb(xlsx_bytes) + self.assertGreater(len(wb.sheetnames), 0) + + # ── get_xlsx_bytes ──────────────────────────────────────────────────────── + + def test_get_xlsx_bytes_returns_base64(self): + self.spreadsheet.write({"spreadsheet_raw": {}}) + b64 = self.env["spreadsheet.spreadsheet"].get_xlsx_bytes(self.spreadsheet.id) + self.assertIsInstance(b64, str) + # Must be valid base64 + decoded = base64.b64decode(b64) + self._load_wb(decoded) # valid xlsx diff --git a/spreadsheet_oca/views/spreadsheet_alert_views.xml b/spreadsheet_oca/views/spreadsheet_alert_views.xml new file mode 100644 index 00000000..3373cfe5 --- /dev/null +++ b/spreadsheet_oca/views/spreadsheet_alert_views.xml @@ -0,0 +1,266 @@ + + + + + + spreadsheet.alert.search + spreadsheet.alert + + + + + + + + + + + + + + + + + + + spreadsheet.alert.list + spreadsheet.alert + + + + + + + + + + + + + + + + + diff --git a/spreadsheet_oca/views/spreadsheet_input_param_views.xml b/spreadsheet_oca/views/spreadsheet_input_param_views.xml new file mode 100644 index 00000000..37861d28 --- /dev/null +++ b/spreadsheet_oca/views/spreadsheet_input_param_views.xml @@ -0,0 +1,160 @@ + + + + + + spreadsheet.input_param.search + spreadsheet.input_param + + + + + + + + + + + + + + + + + spreadsheet.input_param.list + spreadsheet.input_param + + + + + + + + + + + + + + diff --git a/spreadsheet_oca/views/spreadsheet_refresh_schedule_views.xml b/spreadsheet_oca/views/spreadsheet_refresh_schedule_views.xml new file mode 100644 index 00000000..7791f6ce --- /dev/null +++ b/spreadsheet_oca/views/spreadsheet_refresh_schedule_views.xml @@ -0,0 +1,177 @@ + + + + + + spreadsheet.refresh.schedule.search + spreadsheet.refresh.schedule + + + + + + + + + + + + + + + + + + + spreadsheet.refresh.schedule.list + spreadsheet.refresh.schedule + + + + + + + + + + + + + + + + Refresh Schedules + + + + + + diff --git a/spreadsheet_oca/views/spreadsheet_scenario_views.xml b/spreadsheet_oca/views/spreadsheet_scenario_views.xml new file mode 100644 index 00000000..b10bc3ca --- /dev/null +++ b/spreadsheet_oca/views/spreadsheet_scenario_views.xml @@ -0,0 +1,168 @@ + + + + + + spreadsheet.scenario.search + spreadsheet.scenario + + + + + + + + + + + + + + + + + + spreadsheet.scenario.list + spreadsheet.scenario + + + + + + + + + + + + + + + spreadsheet.scenario.form + spreadsheet.scenario + +
+
+
+ + + + + + + + + + + + + + + + + + + + + +
+
+ + + + What-If Scenarios + spreadsheet.scenario + list,form + {'search_default_active': 1} + + + + + What-If Scenarios + + + + + + + + + spreadsheet.spreadsheet.form.scenario + spreadsheet.spreadsheet + + + + + + + +
diff --git a/spreadsheet_oca/views/spreadsheet_spreadsheet.xml b/spreadsheet_oca/views/spreadsheet_spreadsheet.xml index b2684e67..429ad506 100644 --- a/spreadsheet_oca/views/spreadsheet_spreadsheet.xml +++ b/spreadsheet_oca/views/spreadsheet_spreadsheet.xml @@ -47,7 +47,7 @@ - spreadsheet.spreadsheet.tree (in spreadsheet_oca) + spreadsheet.spreadsheet.form (in spreadsheet_oca) spreadsheet.spreadsheet
@@ -158,7 +158,7 @@ Spreasheet Image diff --git a/spreadsheet_oca/views/spreadsheet_subscription_views.xml b/spreadsheet_oca/views/spreadsheet_subscription_views.xml new file mode 100644 index 00000000..f1780506 --- /dev/null +++ b/spreadsheet_oca/views/spreadsheet_subscription_views.xml @@ -0,0 +1,221 @@ + + + + + + spreadsheet.subscription.search + spreadsheet.subscription + + + + + + + + + + + + + + + + + + + + + + + + spreadsheet.subscription.list + spreadsheet.subscription + + + + + + + + + + + + +
diff --git a/spreadsheet_oca/views/spreadsheet_writeback_views.xml b/spreadsheet_oca/views/spreadsheet_writeback_views.xml new file mode 100644 index 00000000..6529a7c9 --- /dev/null +++ b/spreadsheet_oca/views/spreadsheet_writeback_views.xml @@ -0,0 +1,127 @@ + + + + + + spreadsheet.writeback.log.list + spreadsheet.writeback.log + + + + + + + + + + + + + + + + + spreadsheet.writeback.log.form + spreadsheet.writeback.log + +
+
+
+ + + + + + + + + + + + + + + + + + + +
+
+
+ + + + Writeback Log + spreadsheet.writeback.log + list,form + {} + + + + + spreadsheet.spreadsheet.form.writeback + spreadsheet.spreadsheet + + + + + + + + + + + spreadsheet.spreadsheet.form.writeback_enabled + spreadsheet.spreadsheet + + + + + + + + + +
diff --git a/spreadsheet_oca/views/spreadsheet_xlsx_export_views.xml b/spreadsheet_oca/views/spreadsheet_xlsx_export_views.xml new file mode 100644 index 00000000..a1449729 --- /dev/null +++ b/spreadsheet_oca/views/spreadsheet_xlsx_export_views.xml @@ -0,0 +1,28 @@ + + + + + spreadsheet.spreadsheet.xlsx.export.form (in spreadsheet_oca) + spreadsheet.spreadsheet + + + +