diff --git a/spreadsheet_oca/README.rst b/spreadsheet_oca/README.rst index 5babde5d..0f9338d2 100644 --- a/spreadsheet_oca/README.rst +++ b/spreadsheet_oca/README.rst @@ -1,7 +1,3 @@ -.. image:: https://odoo-community.org/readme-banner-image - :target: https://odoo-community.org/get-involved?utm_source=readme - :alt: Odoo Community Association - =============== Spreadsheet Oca =============== @@ -17,7 +13,7 @@ Spreadsheet Oca .. |badge1| image:: https://img.shields.io/badge/maturity-Beta-yellow.png :target: https://odoo-community.org/page/development-status :alt: Beta -.. |badge2| image:: https://img.shields.io/badge/license-AGPL--3-blue.png +.. |badge2| image:: https://img.shields.io/badge/licence-AGPL--3-blue.png :target: http://www.gnu.org/licenses/agpl-3.0-standalone.html :alt: License: AGPL-3 .. |badge3| image:: https://img.shields.io/badge/github-OCA%2Fspreadsheet-lightgray.png?logo=github diff --git a/spreadsheet_oca/__manifest__.py b/spreadsheet_oca/__manifest__.py index be5be297..6497b63f 100644 --- a/spreadsheet_oca/__manifest__.py +++ b/spreadsheet_oca/__manifest__.py @@ -5,7 +5,7 @@ "name": "Spreadsheet Oca", "summary": """ Allow to edit spreadsheets""", - "version": "18.0.1.2.3", + "version": "18.0.2.0.0", "license": "AGPL-3", "author": "CreuBlanca,Odoo Community Association (OCA)", "website": "https://github.com/OCA/spreadsheet", @@ -14,6 +14,9 @@ "security/security.xml", "security/ir.model.access.csv", "views/spreadsheet_spreadsheet.xml", + "views/spreadsheet_refresh_schedule_views.xml", + "views/spreadsheet_input_param_views.xml", + "data/mail_templates.xml", "data/spreadsheet_spreadsheet_import_mode.xml", "wizards/spreadsheet_select_row_number.xml", "wizards/spreadsheet_spreadsheet_import.xml", diff --git a/spreadsheet_oca/controllers/__init__.py b/spreadsheet_oca/controllers/__init__.py index 12a7e529..7a129da5 100644 --- a/spreadsheet_oca/controllers/__init__.py +++ b/spreadsheet_oca/controllers/__init__.py @@ -1 +1,2 @@ from . import main +from . import spreadsheet_input_params diff --git a/spreadsheet_oca/controllers/spreadsheet_input_params.py b/spreadsheet_oca/controllers/spreadsheet_input_params.py new file mode 100644 index 00000000..fbea9415 --- /dev/null +++ b/spreadsheet_oca/controllers/spreadsheet_input_params.py @@ -0,0 +1,31 @@ +# Copyright 2025 Ledo Enterprises LLC +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). +""" +JSON endpoint for spreadsheet input parameters. + +Provides a lightweight API so that a future JS plugin can read the current +named-parameter values without having to parse spreadsheet_raw itself. +""" + +from odoo.http import Controller, request, route + + +class SpreadsheetInputParamsController(Controller): + @route( + "/spreadsheet/input_params/", + type="json", + auth="user", + ) + def get_input_params(self, spreadsheet_id): + """Return {name: current_value} for all active parameters.""" + spreadsheet = request.env["spreadsheet.spreadsheet"].browse(spreadsheet_id) + if not spreadsheet.exists(): + return {"error": "Spreadsheet not found."} + spreadsheet.check_access("read") + params = request.env["spreadsheet.input_param"].search( + [ + ("spreadsheet_id", "=", spreadsheet_id), + ("active", "=", True), + ] + ) + return {p.name: p.current_value for p in params} diff --git a/spreadsheet_oca/data/mail_templates.xml b/spreadsheet_oca/data/mail_templates.xml new file mode 100644 index 00000000..6929b935 --- /dev/null +++ b/spreadsheet_oca/data/mail_templates.xml @@ -0,0 +1,56 @@ + + + + + + spreadsheet.refresh.notification + qweb + + +
+ +

No ODOO pivot data sources found in this spreadsheet.

+
+ + + +
+
+
+
+
+
+
+ + + + spreadsheet.refresh.warning + qweb + + +

+ [Warning] Refresh schedule + + + could not load pivot(s): + + + + + , + . + Check server logs for details. +

+
+
+
+
diff --git a/spreadsheet_oca/demo/demo_pivot_dashboard.json b/spreadsheet_oca/demo/demo_pivot_dashboard.json new file mode 100644 index 00000000..2ed2f20e --- /dev/null +++ b/spreadsheet_oca/demo/demo_pivot_dashboard.json @@ -0,0 +1,98 @@ +{ + "version": 21, + "sheets": [ + { + "id": "sheet_partners", + "name": "Partners by Country", + "colNumber": 26, + "rowNumber": 100, + "rows": {}, + "cols": { + "0": {"size": 220}, + "1": {"size": 140}, + "2": {"size": 140}, + "3": {"size": 140} + }, + "merges": [], + "cells": { + "A1": {"content": "=PIVOT(1)"} + }, + "conditionalFormats": [], + "figures": [], + "filterTables": [], + "tables": [], + "dataValidationRules": [], + "comments": {}, + "headerGroups": {"ROW": [], "COL": []}, + "areGridLinesVisible": true, + "isVisible": true + }, + { + "id": "sheet_regions", + "name": "Regions per Country", + "colNumber": 26, + "rowNumber": 100, + "rows": {}, + "cols": { + "0": {"size": 220}, + "1": {"size": 140} + }, + "merges": [], + "cells": { + "A1": {"content": "=PIVOT(2)"} + }, + "conditionalFormats": [], + "figures": [], + "filterTables": [], + "tables": [], + "dataValidationRules": [], + "comments": {}, + "headerGroups": {"ROW": [], "COL": []}, + "areGridLinesVisible": true, + "isVisible": true + } + ], + "settings": {}, + "customTableStyles": {}, + "styles": {}, + "formats": {}, + "borders": {}, + "revisionId": "START_REVISION", + "uniqueFigureIds": true, + "odooVersion": 12, + "globalFilters": [], + "pivots": { + "1": { + "type": "ODOO", + "id": "1", + "formulaId": "1", + "name": "Partners by Country & Type", + "model": "res.partner", + "domain": [["active", "=", true]], + "context": {}, + "measures": [{"id": "__count", "fieldName": "__count"}], + "rows": [{"fieldName": "country_id", "order": "desc"}], + "columns": [{"fieldName": "is_company"}], + "sortedColumn": null, + "fieldMatching": {} + }, + "2": { + "type": "ODOO", + "id": "2", + "formulaId": "2", + "name": "Regions per Country", + "model": "res.country.state", + "domain": [], + "context": {}, + "measures": [{"id": "__count", "fieldName": "__count"}], + "rows": [{"fieldName": "country_id", "order": "desc"}], + "columns": [], + "sortedColumn": null, + "fieldMatching": {} + } + }, + "pivotNextId": 3, + "lists": {}, + "listNextId": 1, + "chartOdooMenusReferences": {} +} diff --git a/spreadsheet_oca/demo/spreadsheet_spreadsheet.xml b/spreadsheet_oca/demo/spreadsheet_spreadsheet.xml index 11222ed5..7ea6fb5b 100644 --- a/spreadsheet_oca/demo/spreadsheet_spreadsheet.xml +++ b/spreadsheet_oca/demo/spreadsheet_spreadsheet.xml @@ -1,11 +1,130 @@ + + + + Müller GmbH + + + + + Hans Weber + + + + + Dupont SA + + + + + Marie Leclerc + + + + + British Solutions Ltd + + + + + James Clarke + + + + + Tanaka Industries + + + + + Silva Comércio Ltda + + + + + Ana Costa + + + + Patel Technologies Pvt Ltd + + + + + Priya Sharma + + + + + Outback Systems Pty Ltd + + + + + + - Demo spreadsheet + Sales Pipeline Summary + + + + Partner Pivot Dashboard + + + + + + + + Weekly Pipeline Refresh + + 1 + weeks + + + + + + + start_date + + Parameters!B2 + Start of reporting period + 2026-01-01 + + + + end_date + + Parameters!B3 + End of reporting period + 2026-12-31 + + + + growth_rate + + Parameters!B4 + Expected annual growth rate + 0.15 + diff --git a/spreadsheet_oca/models/__init__.py b/spreadsheet_oca/models/__init__.py index c5ec2360..8c45ed0c 100644 --- a/spreadsheet_oca/models/__init__.py +++ b/spreadsheet_oca/models/__init__.py @@ -1,6 +1,10 @@ +from . import cell_ref # noqa: F401 — shared helpers; must be first from . import spreadsheet_abstract from . import spreadsheet_spreadsheet_tag from . import spreadsheet_spreadsheet from . import spreadsheet_oca_revision from . import ir_websocket from . import spreadsheet_spreadsheet_import_mode +from . import pivot_data +from . import spreadsheet_refresh_schedule +from . import spreadsheet_input_param diff --git a/spreadsheet_oca/models/cell_ref.py b/spreadsheet_oca/models/cell_ref.py new file mode 100644 index 00000000..0e258ff1 --- /dev/null +++ b/spreadsheet_oca/models/cell_ref.py @@ -0,0 +1,140 @@ +# Copyright 2025 Ledo Enterprises LLC +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). +""" +Shared cell-reference helpers for spreadsheet_oca. + +Used by spreadsheet_alert, spreadsheet_scenario, and spreadsheet_input_param +to avoid duplicating cell-address parsing and raw-JSON access logic. +""" + +import re + +# Pre-compiled pattern: column letters + row number (1-based, no zero row). +_CELL_REF_RE = re.compile(r"^([A-Za-z]+)([1-9][0-9]*)$") + + +def _idx_to_cell_address(col_idx, row_idx): + """Convert 0-based (col, row) to cell address like 'A1', 'B3', 'AA12'.""" + col_str = "" + c = col_idx + while True: + col_str = chr(ord("A") + c % 26) + col_str + c = c // 26 - 1 + if c < 0: + break + return f"{col_str}{row_idx + 1}" + + +def parse_cell_ref(ref): + """ + Parse a bare cell reference like 'B3' or 'AA12' into (col_index, row_index). + + Both indices are 0-based to match the o-spreadsheet JSON cell-map format. + Returns (None, None) on invalid input (empty string, zero row, etc.). + """ + m = _CELL_REF_RE.match(ref.strip()) + if not m: + return None, None + col_str, row_str = m.group(1).upper(), m.group(2) + col_idx = 0 + for ch in col_str: + col_idx = col_idx * 26 + (ord(ch) - ord("A") + 1) + col_idx -= 1 # convert to 0-based + row_idx = int(row_str) - 1 # convert to 0-based + return col_idx, row_idx + + +def parse_cell_key(key): + """ + Parse a possibly-qualified cell key into (sheet_name_or_None, col_idx, row_idx). + + Supported formats: + - ``"B3"`` — no sheet qualifier; sheet_name = None + - ``"Sheet1!B3"`` — explicit sheet qualifier + """ + key = key.strip() + if "!" in key: + sheet_part, addr_part = key.split("!", 1) + sheet_name = sheet_part.strip() + else: + sheet_name = None + addr_part = key + col_idx, row_idx = parse_cell_ref(addr_part) + return sheet_name, col_idx, row_idx + + +def _resolve_sheet(sheets, sheet_name=None): + """Return the target sheet dict from a list of sheets. + + If *sheet_name* is given, searches case-insensitively; falls back to the + first sheet if not found. Returns None when *sheets* is empty. + """ + if not sheets: + return None + if sheet_name: + for s in sheets: + if s.get("name", "").lower() == sheet_name.lower(): + return s + return sheets[0] + + +def read_cell_value(spreadsheet_raw, cell_ref, sheet_name=None): + """ + Read the value of a cell from a spreadsheet_raw JSON dict. + + *cell_ref* may be bare (``"B3"``) or sheet-qualified (``"Sheet1!B3"``). + *sheet_name*, when provided, overrides any sheet qualifier embedded in + *cell_ref* and forces lookup in the named sheet (falling back to sheet 0). + + Return value priority: + 1. The cell's evaluated ``"value"`` key (set by o-spreadsheet when the + workbook is saved after formula evaluation in the browser). + 2. The cell's ``"content"`` string (for static / hand-typed cells). + 3. ``None`` when the cell, sheet, or raw JSON is absent. + """ + sheets = (spreadsheet_raw or {}).get("sheets", []) + ref_sheet, col_idx, row_idx = parse_cell_key(cell_ref) + if col_idx is None: + return None + + target_name = sheet_name or ref_sheet + target_sheet = _resolve_sheet(sheets, target_name) + if target_sheet is None: + return None + + cells = target_sheet.get("cells", {}) + cell_addr = _idx_to_cell_address(col_idx, row_idx) + cell_data = cells.get(cell_addr, {}) + if not cell_data: + return None + + value = cell_data.get("value") + if value is None: + value = cell_data.get("content") + return value if value != "" else None + + +def write_cell_content(spreadsheet_raw, cell_ref, value, sheet_name=None): + """ + Write a value into ``cells[row][col]["content"]`` of *spreadsheet_raw* in-place. + + Creates nested dicts as needed. *cell_ref* and *sheet_name* follow the + same conventions as :func:`read_cell_value`. + + Returns the (mutated) *spreadsheet_raw* dict. + """ + sheets = (spreadsheet_raw or {}).get("sheets", []) + ref_sheet, col_idx, row_idx = parse_cell_key(cell_ref) + if col_idx is None: + return spreadsheet_raw + + target_name = sheet_name or ref_sheet + target_sheet = _resolve_sheet(sheets, target_name) + if target_sheet is None: + return spreadsheet_raw + + cells = target_sheet.setdefault("cells", {}) + cell_addr = _idx_to_cell_address(col_idx, row_idx) + cell_data = cells.setdefault(cell_addr, {}) + cell_data["content"] = str(value) if value is not None else "" + return spreadsheet_raw diff --git a/spreadsheet_oca/models/pivot_data.py b/spreadsheet_oca/models/pivot_data.py new file mode 100644 index 00000000..0233cc69 --- /dev/null +++ b/spreadsheet_oca/models/pivot_data.py @@ -0,0 +1,366 @@ +# Copyright 2025 Ledo Enterprises LLC +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). +""" +Server-side pivot data helper. + +Replicates the read_group strategy used by the Odoo web PivotModel +(addons/web/static/src/views/pivot/pivot_model.js) to produce pivot table +data server-side, without executing any JavaScript. + +The JS pivot loads data by: + 1. Computing all row-groupby prefixes ("sections"): + rows=["partner_id","date:month"] → [[], ["partner_id"], + ["partner_id","date:month"]] + 2. Computing all col-groupby prefixes ("sections"): + cols=["stage_id"] → [[], ["stage_id"]] + 3. Taking the cartesian product (row_prefix × col_prefix) for "divisors". + 4. For each divisor [rowPrefix, colPrefix], calling: + read_group(domain, fields=measureSpecs, + groupby=rowPrefix+colPrefix, lazy=False) + +This module replicates that strategy in Python and exposes: + - ``get_pivot_data(model, domain, context, rows, columns, measures)`` + +Rows / columns are lists of dimension dicts: + {"fieldName": "date_order", "granularity": "month"} + {"fieldName": "partner_id"} (no granularity) + +Measures are lists of measure dicts: + {"fieldName": "amount_total", "aggregator": "sum"} + {"fieldName": "__count"} +""" + +import itertools +import logging + +_logger = logging.getLogger(__name__) + +# --------------------------------------------------------------------------- +# Helpers mirroring the JS helpers in pivot_model.js +# --------------------------------------------------------------------------- + +DATE_GRANULARITIES = {"day", "week", "month", "quarter", "year"} + + +def _dimension_to_groupby(dim): + """Convert a dimension dict to an Odoo read_group groupby string. + + {"fieldName": "date_order", "granularity": "month"} → "date_order:month" + {"fieldName": "partner_id"} → "partner_id" + """ + name = dim["fieldName"] + gran = dim.get("granularity") + return f"{name}:{gran}" if gran else name + + +def _sections(lst): + """Return all prefixes of lst including the empty prefix. + + sections(["a", "b", "c"]) → [[], ["a"], ["a", "b"], ["a", "b", "c"]] + + Mirrors the JS ``sections()`` helper. + """ + return [lst[:i] for i in range(len(lst) + 1)] + + +def _measure_to_field_spec(measure): + """Convert a measure dict to a read_group ``fields`` element. + + {"fieldName": "amount_total", "aggregator": "sum"} → "amount_total:sum" + {"fieldName": "__count"} → "__count" + """ + if measure["fieldName"] == "__count": + return "__count" + agg = measure.get("aggregator") or "sum" + return f"{measure['fieldName']}:{agg}" + + +# --------------------------------------------------------------------------- +# Main computation +# --------------------------------------------------------------------------- + + +def _get_pivot_data(env, model_name, domain, context, row_dims, col_dims, measures): + """Compute pivot table data using the same read_group strategy as the JS. + + Returns a dict: + { + "fields": {fieldName: {type, string, ...}}, + "groups": [ + { + "rowValues": ["2026-01", ...], # normalised group key values + "colValues": ["Confirmed", ...], + "rowGroupBy": ["date_order:month"], + "colGroupBy": ["stage_id"], + "count": 12, + "measures": {"amount_total:sum": 9800.0, ...}, + }, + ... + ], + "rowDimensions": [{"fieldName": ..., "granularity": ...}, ...], + "colDimensions": [{"fieldName": ..., "granularity": ...}, ...], + "measureSpecs": ["amount_total:sum", ...], + } + """ + Model = env[model_name].with_context(**(context or {})) + + # ── 1. Fields metadata (needed for label resolution) ───────────────── + all_field_names = [d["fieldName"] for d in row_dims + col_dims] + [ + m["fieldName"] for m in measures if m["fieldName"] != "__count" + ] + # fields_get returns {fieldName: {type, string, selection, ...}} + fields_meta = Model.fields_get( + all_field_names, attributes=["type", "string", "selection"] + ) + + # ── 2. Build groupby strings ────────────────────────────────────────── + row_groupbys = [_dimension_to_groupby(d) for d in row_dims] + col_groupbys = [_dimension_to_groupby(d) for d in col_dims] + measure_specs = [_measure_to_field_spec(m) for m in measures] + + # Ensure count is always fetched (JS always adds __count implicitly) + field_specs_with_count = measure_specs + ( + [] if "__count" in measure_specs else ["__count"] + ) + + # ── 3. Compute divisors (cartesian product of all prefixes) ────────── + row_sections = _sections(row_groupbys) + col_sections = _sections(col_groupbys) + divisors = list(itertools.product(row_sections, col_sections)) + + # ── 4. Fire read_group for each divisor ────────────────────────────── + groups = [] + for row_prefix, col_prefix in divisors: + groupby = row_prefix + col_prefix + try: + results = Model.read_group( + domain=domain or [], + fields=field_specs_with_count, + groupby=groupby, + lazy=False, + ) + except Exception: + _logger.exception( + "read_group failed for model=%s groupby=%s", model_name, groupby + ) + continue + + for rg in results: + group_entry = { + "rowGroupBy": row_prefix, + "colGroupBy": col_prefix, + "rowValues": _extract_group_values(rg, row_prefix, fields_meta), + "colValues": _extract_group_values(rg, col_prefix, fields_meta), + "count": rg.get("__count", 0), + "measures": _extract_measures(rg, measures, fields_meta), + "domain": rg.get("__domain", []), + } + groups.append(group_entry) + + return { + "fields": fields_meta, + "groups": groups, + "rowDimensions": row_dims, + "colDimensions": col_dims, + "measureSpecs": measure_specs, + } + + +def _extract_group_values(rg_row, groupby_list, fields_meta): + """Extract normalised group values from a read_group result row. + + Many2one fields return (id, display_name) — we normalise to the id (int). + Date/datetime fields return a formatted string (Odoo already handles + granularity in the groupby key). + """ + values = [] + for gb_spec in groupby_list: + field_name = gb_spec.split(":")[0] + raw = rg_row.get(gb_spec) or rg_row.get(field_name) + if raw is False or raw is None: + values.append(False) + elif isinstance(raw, list | tuple) and len(raw) == 2: + # Many2one: (id, display_name) — store id; JS uses id for grouping + values.append(raw[0]) + else: + values.append(raw) + return values + + +def _extract_measures(rg_row, measures, fields_meta): + """Extract measure values from a read_group result row.""" + result = {} + for measure in measures: + fname = measure["fieldName"] + agg = measure.get("aggregator") + if fname == "__count": + result["__count"] = rg_row.get("__count", 0) + continue + # read_group key: field_name (no aggregator suffix in result keys) + raw = rg_row.get(fname, 0) + if isinstance(raw, list | tuple): + # Many2one used as measure — count distinct occurrences + raw = 1 if raw else 0 + if raw is False: + raw = 0 + spec_key = f"{fname}:{agg}" if agg else fname + result[spec_key] = raw + return result + + +# --------------------------------------------------------------------------- +# Shared helpers for pivot iteration and HTML rendering +# --------------------------------------------------------------------------- + + +def collect_pivot_summaries(env, spreadsheet_raw, domain_transform=None): + """Iterate over ODOO-type pivots and return fresh data for each. + + Args: + env: Odoo environment. + spreadsheet_raw: dict — the spreadsheet's raw JSON data. + domain_transform: optional callable(domain) -> domain, applied to each + pivot's domain before querying (e.g. parameter substitution). + + Returns: + A tuple ``(summaries, failed_names)`` where *summaries* is a list of + ``{"name": ..., "model": ..., "result": ...}`` dicts, and + *failed_names* is a list of pivot display names that could not be loaded. + """ + pivots = spreadsheet_raw.get("pivots", {}) + summaries = [] + failed_names = [] + for pivot_id, pivot_def in pivots.items(): + if pivot_def.get("type") != "ODOO": + continue + model_name = pivot_def.get("model") + pivot_name = pivot_def.get("name") or f"Pivot #{pivot_id}" + if not model_name or model_name not in env: + _logger.warning( + "collect_pivot_summaries: unknown model %r — skipping pivot %s", + model_name, + pivot_id, + ) + failed_names.append(pivot_name) + continue + try: + domain = pivot_def.get("domain", []) + if domain_transform: + domain = domain_transform(domain) + result = _get_pivot_data( + env, + model_name, + domain, + pivot_def.get("context", {}), + pivot_def.get("rows", []), + pivot_def.get("columns", []), + pivot_def.get("measures", []), + ) + summaries.append( + { + "name": pivot_name, + "model": model_name, + "result": result, + } + ) + except Exception: + _logger.exception( + "collect_pivot_summaries: failed to compute pivot %s", + pivot_id, + ) + failed_names.append(pivot_name) + return summaries, failed_names + + +def render_pivot_table_html(summary, max_rows=10): + """Render a single pivot summary as an HTML table string. + + Args: + summary: dict with keys ``"name"``, ``"model"``, ``"result"`` + (as returned by ``collect_pivot_summaries``). + max_rows: maximum number of detail rows to include before truncating. + + Returns: + str — HTML fragment for the pivot table. + """ + result = summary["result"] + name = summary["name"] + model = summary["model"] + parts = [] + + parts.append( + f'

{name}' + f' ({model})

' + ) + + row_dims = result.get("rowDimensions", []) + groups = result.get("groups", []) + + # Grand total row + grand_totals = [ + g for g in groups if g["rowGroupBy"] == [] and g["colGroupBy"] == [] + ] + if grand_totals: + gt = grand_totals[0] + count = gt.get("count", 0) + parts.append( + f'

Total records: {count}

' + ) + for key, val in gt.get("measures", {}).items(): + if key != "__count" and val is not None: + parts.append( + f'

{key}: {val}

' + ) + + # Row breakdown table + if row_dims: + row_gb = [d["fieldName"] for d in row_dims] + row_groups = [ + g for g in groups if g["rowGroupBy"] == row_gb and g["colGroupBy"] == [] + ] + if row_groups: + measure_keys = [ + k for k in (row_groups[0].get("measures") or {}) if k != "__count" + ] + headers = ["Group"] + measure_keys + ["Count"] + parts.append( + '' + ) + parts.append("") + for h in headers: + parts.append( + '' + ) + parts.append("") + for g in row_groups[:max_rows]: + label = ", ".join(str(v) for v in g["rowValues"]) + parts.append("") + parts.append( + f'' + ) + for mk in measure_keys: + val = g.get("measures", {}).get(mk, "") + parts.append( + '' + ) + parts.append( + ''.format(g.get("count", "")) + ) + parts.append("") + if len(row_groups) > max_rows: + colspan = len(headers) + extra = len(row_groups) - max_rows + more_text = f"and {extra} more rows" + parts.append( + f'" + ) + parts.append("
{h}
{label}{val}{}
' + f"… {more_text}
") + + return "".join(parts) diff --git a/spreadsheet_oca/models/spreadsheet_input_param.py b/spreadsheet_oca/models/spreadsheet_input_param.py new file mode 100644 index 00000000..c6c2e34e --- /dev/null +++ b/spreadsheet_oca/models/spreadsheet_input_param.py @@ -0,0 +1,166 @@ +# Copyright 2025 Ledo Enterprises LLC +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). +""" +Named input parameters. + +Users mark specific cells as named "input parameters." A parameter stores +the cell's current value so that: + + 1. Server-side domain substitution can reference it during scheduled refresh + (e.g. ``[("date", ">=", "%(start_date)s")]``). + 2. A future JS side-panel can re-query pivot data sources in real time when + an input cell changes (the JSON endpoint is already wired up). + +This is the Python backend layer only. The o-spreadsheet JS plugin work +required for live client-side re-query is a planned follow-on. +""" + +import logging +import re + +from odoo import _, api, fields, models +from odoo.exceptions import ValidationError + +from .cell_ref import parse_cell_key, read_cell_value + +_logger = logging.getLogger(__name__) + +# Valid parameter names: start with a lowercase letter, then lowercase letters, +# digits, or underscores. Mirrors Python %(name)s identifier conventions. +_NAME_RE = re.compile(r"^[a-z][a-z0-9_]*$") + + +class SpreadsheetInputParam(models.Model): + _name = "spreadsheet.input_param" + _description = "Spreadsheet Input Parameter" + _inherit = ["mail.thread"] + _order = "spreadsheet_id, name" + + name = fields.Char( + required=True, + tracking=True, + help=( + "Identifier used in domain templates as %(name)s.\n" + "Must start with a lowercase letter and contain only lowercase letters, " + "digits, and underscores." + ), + ) + spreadsheet_id = fields.Many2one( + "spreadsheet.spreadsheet", + required=True, + ondelete="cascade", + index=True, + ) + cell_ref = fields.Char( + string="Cell Reference", + required=True, + tracking=True, + help=( + "Cell to read the parameter value from.\n" + "Use a bare reference (e.g. B3) or include the sheet name " + "(e.g. Sheet1!B3)." + ), + ) + description = fields.Char( + help="Optional human note explaining what this parameter controls.", + ) + active = fields.Boolean(default=True, tracking=True) + + # ── Synced value ────────────────────────────────────────────────────────── + current_value = fields.Char( + readonly=True, + copy=False, + help="Last value read from the spreadsheet cell.", + ) + last_synced = fields.Datetime( + readonly=True, + copy=False, + ) + + # ── Unique name per spreadsheet ─────────────────────────────────────────── + _sql_constraints = [ + ( + "unique_name_per_spreadsheet", + "UNIQUE(spreadsheet_id, name)", + "A parameter with this name already exists for this spreadsheet.", + ), + ] + + # ── Constraints ─────────────────────────────────────────────────────────── + + @api.constrains("cell_ref") + def _check_cell_ref(self): + for rec in self: + if not rec.cell_ref: + continue + _sheet, col, row = parse_cell_key(rec.cell_ref.strip()) + if col is None: + raise ValidationError( + _("Cell reference %(ref)r is not valid. Use 'B3' or 'Sheet1!B3'.") + % {"ref": rec.cell_ref} + ) + + @api.constrains("name") + def _check_name(self): + for rec in self: + if rec.name and not _NAME_RE.match(rec.name): + raise ValidationError( + _( + "Parameter name %(name)r is not valid. " + "It must start with a lowercase letter and contain only " + "lowercase letters, digits, and underscores." + ) + % {"name": rec.name} + ) + + # ── Sync logic ──────────────────────────────────────────────────────────── + + def _sync_from_spreadsheet(self): + """Read this parameter's cell from spreadsheet_raw and store the value.""" + self.ensure_one() + raw = self.spreadsheet_id.sudo().spreadsheet_raw or {} + value = read_cell_value(raw, self.cell_ref.strip()) + now = fields.Datetime.now() + if value is not None: + self.write({"current_value": str(value), "last_synced": now}) + else: + self.write({"last_synced": now}) + + def action_sync_now(self): + """Manually trigger a sync for this parameter.""" + self.ensure_one() + self._sync_from_spreadsheet() + + @api.model + def _sync_all_for_spreadsheet(self, spreadsheet_id): + """Sync all active input parameters for the given spreadsheet record ID.""" + params = self.search( + [ + ("spreadsheet_id", "=", spreadsheet_id), + ("active", "=", True), + ] + ) + for param in params: + try: + param._sync_from_spreadsheet() + except Exception: + _logger.exception( + "Failed to sync input param %s (%s) for spreadsheet %s", + param.id, + param.name, + spreadsheet_id, + ) + + +class SpreadsheetRefreshScheduleInputParams(models.Model): + """Override _get_param_dict to provide input parameter values.""" + + _inherit = "spreadsheet.refresh.schedule" + + def _get_param_dict(self, spreadsheet): + """Sync all input params and return ``{name: value}`` for substitution.""" + self.env["spreadsheet.input_param"]._sync_all_for_spreadsheet(spreadsheet.id) + input_params = self.env["spreadsheet.input_param"].search( + [("spreadsheet_id", "=", spreadsheet.id), ("active", "=", True)] + ) + return {p.name: p.current_value or "" for p in input_params} diff --git a/spreadsheet_oca/models/spreadsheet_refresh_schedule.py b/spreadsheet_oca/models/spreadsheet_refresh_schedule.py new file mode 100644 index 00000000..8654f4fa --- /dev/null +++ b/spreadsheet_oca/models/spreadsheet_refresh_schedule.py @@ -0,0 +1,256 @@ +# Copyright 2025 Ledo Enterprises LLC +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). +""" +Scheduled data refresh. + +Allows users to configure a cron-based schedule that periodically: + 1. Reads all ODOO-type pivot definitions from a spreadsheet's JSON. + 2. Fetches fresh aggregate data via _get_pivot_data(). + 3. Posts a Chatter summary on the spreadsheet record and emails + subscribed partners. + +This fills a gap that neither Odoo CE nor Enterprise address: +auto-refresh without a user opening the browser. +""" + +import logging + +from markupsafe import Markup + +from odoo import _, api, fields, models +from odoo.exceptions import ValidationError + +from .pivot_data import collect_pivot_summaries, render_pivot_table_html + +_logger = logging.getLogger(__name__) + + +def _apply_param_substitution(domain, params): + """ + Replace ``%(name)s`` tokens in string leaf values of an Odoo domain list. + + Only the *value* position (index 2) of ``(field, operator, value)`` tuples + is touched — field names and operators are never modified. Nested domain + lists (e.g. ``["&", cond1, cond2]``) are handled recursively. + + *params* is a ``{name: value}`` dict (values are already strings). + Safe: substituted values come from the DB, not from user input at runtime. + + Example:: + + domain = [("date", ">=", "%(start_date)s")] + params = {"start_date": "2026-01-01"} + → [("date", ">=", "2026-01-01")] + """ + if not isinstance(domain, list): + return domain + result = [] + for item in domain: + if isinstance(item, tuple | list) and len(item) == 3: + field, op, value = item + if isinstance(value, str): + try: + value = value % params + except KeyError as exc: + _logger.warning( + "Domain substitution: unknown param %s — token left as-is", exc + ) + except TypeError: + _logger.debug( + "Domain substitution: value %r has lone %%", + value, + ) + result.append((field, op, value)) + elif isinstance(item, list): + result.append(_apply_param_substitution(item, params)) + else: + result.append(item) + return result + + +_INTERVAL_TYPES = [ + ("hours", "Hour(s)"), + ("days", "Day(s)"), + ("weeks", "Week(s)"), + ("months", "Month(s)"), +] + + +class SpreadsheetRefreshSchedule(models.Model): + _name = "spreadsheet.refresh.schedule" + _description = "Spreadsheet Scheduled Data Refresh" + _inherit = ["mail.thread"] + _order = "spreadsheet_id, name" + + name = fields.Char(required=True, tracking=True) + spreadsheet_id = fields.Many2one( + "spreadsheet.spreadsheet", + required=True, + ondelete="cascade", + index=True, + ) + active = fields.Boolean(default=True, tracking=True) + cron_id = fields.Many2one( + "ir.cron", + string="Cron Job", + ondelete="set null", + readonly=True, + copy=False, + ) + last_run = fields.Datetime(readonly=True, copy=False) + notify_partner_ids = fields.Many2many( + "res.partner", + string="Notify Partners", + help="These partners receive an email summary after each refresh.", + ) + interval_number = fields.Integer( + default=1, + string="Every", + tracking=True, + ) + interval_type = fields.Selection( + _INTERVAL_TYPES, + default="weeks", + string="Interval", + tracking=True, + required=True, + ) + + @api.constrains("interval_number") + def _check_interval_number(self): + for rec in self: + if rec.interval_number < 1: + raise ValidationError(_("Interval must be at least 1.")) + + # ── Cron lifecycle ──────────────────────────────────────────────────────── + + def action_activate(self): + """Create or reactivate the cron job for this schedule.""" + for rec in self: + if rec.cron_id: + rec.cron_id.sudo().write( + { + "active": True, + "interval_number": rec.interval_number, + "interval_type": rec.interval_type, + } + ) + else: + model_id = self.env["ir.model"].sudo()._get(self._name).id + cron = ( + self.env["ir.cron"] + .sudo() + .create( + { + "name": _( + "Spreadsheet Refresh: %(name)s", + name=rec.spreadsheet_id.name, + ), + "model_id": model_id, + "state": "code", + "code": f"model.browse({rec.id})._run_refresh()", + "interval_number": rec.interval_number, + "interval_type": rec.interval_type, + "active": True, + } + ) + ) + rec.cron_id = cron + + def action_deactivate(self): + """Pause (deactivate) the cron job without deleting it.""" + for rec in self: + if rec.cron_id: + rec.cron_id.sudo().write({"active": False}) + + def action_run_now(self): + """Manually trigger a refresh immediately.""" + self.ensure_one() + self._run_refresh() + + def unlink(self): + crons = self.mapped("cron_id").sudo() + result = super().unlink() + crons.unlink() + return result + + def _get_param_dict(self, spreadsheet): + """Return ``{name: value}`` dict for domain-template substitution. + + Default implementation returns an empty dict. The ``input_params`` + feature overrides this to sync named parameters from the spreadsheet + and return their current values. + """ + return {} + + # ── Refresh execution ──────────────────────────────────────────────────── + + def _run_refresh(self): + """ + Execute one refresh cycle: + - Read pivot definitions from spreadsheet_raw JSON. + - Compute fresh data for each ODOO pivot. + - Post Chatter summary; email notify_partner_ids. + - Record last_run timestamp. + """ + self.ensure_one() + spreadsheet = self.spreadsheet_id + + param_dict = self._get_param_dict(spreadsheet) + + raw = spreadsheet.sudo().spreadsheet_raw or {} + + if not raw.get("pivots"): + _logger.info( + "Spreadsheet refresh %s: no pivots found in spreadsheet %s", + self.id, + spreadsheet.id, + ) + self.sudo().write({"last_run": fields.Datetime.now()}) + return + + summaries, failed_pivot_names = collect_pivot_summaries( + self.env, + raw, + domain_transform=lambda d: _apply_param_substitution(d, param_dict), + ) + + body = self._render_refresh_html(summaries) + partner_ids = self.notify_partner_ids.ids + + spreadsheet.sudo().message_post( + body=body, + subject=_("Data refresh: %(name)s", name=spreadsheet.name), + partner_ids=partner_ids, + subtype_xmlid="mail.mt_comment" if partner_ids else "mail.mt_note", + ) + + if failed_pivot_names: + warning_body = self.env["ir.qweb"]._render( + "spreadsheet_oca.spreadsheet_refresh_warning_template", + { + "schedule_name": self.name, + "failed_names": failed_pivot_names, + }, + ) + spreadsheet.sudo().message_post( + body=warning_body, + subtype_xmlid="mail.mt_note", + ) + + self.sudo().write({"last_run": fields.Datetime.now()}) + + # ── HTML rendering ──────────────────────────────────────────────────────── + + @api.model + def _render_refresh_html(self, summaries): + """Render a compact HTML summary of fresh pivot data. + + Uses the QWeb template ``spreadsheet_refresh_notification_template`` + which can be customised via Settings > Technical > Views. + """ + pivot_html_list = [Markup(render_pivot_table_html(s)) for s in summaries] + return self.env["ir.qweb"]._render( + "spreadsheet_oca.spreadsheet_refresh_notification_template", + {"pivot_html_list": pivot_html_list}, + ) diff --git a/spreadsheet_oca/models/spreadsheet_spreadsheet.py b/spreadsheet_oca/models/spreadsheet_spreadsheet.py index 55a9ae9f..c1d2254a 100644 --- a/spreadsheet_oca/models/spreadsheet_spreadsheet.py +++ b/spreadsheet_oca/models/spreadsheet_spreadsheet.py @@ -56,10 +56,83 @@ class SpreadsheetSpreadsheet(models.Model): string="Tags", comodel_name="spreadsheet.spreadsheet.tag" ) + # ── DRY helper for read_group-based count fields ───────────────────────── + + def _compute_related_count(self, comodel, field_name, extra_domain=None): + """Compute a count field by grouping *comodel* on ``spreadsheet_id``. + + By default the domain filters on ``active=True``; pass *extra_domain* + to override (e.g. ``[("status", "!=", "error")]`` for writeback logs). + """ + domain = [("spreadsheet_id", "in", self.ids)] + if extra_domain is not None: + domain += extra_domain + else: + domain.append(("active", "=", True)) + counts = self.env[comodel].read_group( + domain, ["spreadsheet_id"], ["spreadsheet_id"] + ) + count_map = {c["spreadsheet_id"][0]: c["spreadsheet_id_count"] for c in counts} + for rec in self: + rec[field_name] = count_map.get(rec.id, 0) + + # ── Input Parameters ──────────────────────────────────────────────────── + input_param_count = fields.Integer( + compute="_compute_input_param_count", string="Input Parameters" + ) + + def _compute_input_param_count(self): + self._compute_related_count("spreadsheet.input_param", "input_param_count") + + def action_open_input_params(self): + self.ensure_one() + return { + "type": "ir.actions.act_window", + "name": _("Input Parameters"), + "res_model": "spreadsheet.input_param", + "view_mode": "list,form", + "domain": [("spreadsheet_id", "=", self.id)], + "context": { + "default_spreadsheet_id": self.id, + "search_default_active": 1, + }, + } + @api.depends("name") def _compute_filename(self): for record in self: - record.filename = "%s.json" % (self.name or _("Unnamed")) + record.filename = f"{record.name or _('Unnamed')}.json" + + # ── Refresh Schedules ─────────────────────────────────────────────────── + refresh_schedule_count = fields.Integer( + compute="_compute_refresh_schedule_count", string="Refresh Schedules" + ) + + def _compute_refresh_schedule_count(self): + self._compute_related_count( + "spreadsheet.refresh.schedule", "refresh_schedule_count" + ) + + def action_open_refresh_schedules(self): + self.ensure_one() + return { + "type": "ir.actions.act_window", + "name": _("Refresh Schedules"), + "res_model": "spreadsheet.refresh.schedule", + "view_mode": "list,form", + "domain": [("spreadsheet_id", "=", self.id)], + "context": {"default_spreadsheet_id": self.id}, + } + + # ── Pivot Data ──────────────────────────────────────────────────────────── + @api.model + def get_pivot_data(self, model_name, domain, context, row_dims, col_dims, measures): + """Return pivot table data computed server-side (JSON-RPC entry point).""" + from .pivot_data import _get_pivot_data + + return _get_pivot_data( + self.env, model_name, domain, context, row_dims, col_dims, measures + ) def create_document_from_attachment(self, attachment_ids): attachments = self.env["ir.attachment"].browse(attachment_ids) diff --git a/spreadsheet_oca/security/ir.model.access.csv b/spreadsheet_oca/security/ir.model.access.csv index 1898b166..c0bf0ee9 100644 --- a/spreadsheet_oca/security/ir.model.access.csv +++ b/spreadsheet_oca/security/ir.model.access.csv @@ -6,3 +6,7 @@ access_spreadsheet_import_mode,access_spreadsheet_oca_revision,model_spreadsheet access_spreadsheet_select_row_number,access_spreadsheet_select_row_number,model_spreadsheet_select_row_number,base.group_user,1,1,1,1 access_spreadsheet_spreadsheet_tag,access_spreadsheet_spreadsheet_tag,model_spreadsheet_spreadsheet_tag,spreadsheet_oca.group_user,1,0,0,0 access_spreadsheet_spreadsheet_manager_tag,access_spreadsheet_spreadsheet_manager_tag,model_spreadsheet_spreadsheet_tag,spreadsheet_oca.group_manager,1,1,1,1 +access_spreadsheet_refresh_schedule_user,access_spreadsheet_refresh_schedule_user,model_spreadsheet_refresh_schedule,base.group_user,1,0,0,0 +access_spreadsheet_refresh_schedule_manager,access_spreadsheet_refresh_schedule_manager,model_spreadsheet_refresh_schedule,spreadsheet_oca.group_manager,1,1,1,1 +access_spreadsheet_input_param_user,access_spreadsheet_input_param_user,model_spreadsheet_input_param,base.group_user,1,0,0,0 +access_spreadsheet_input_param_manager,access_spreadsheet_input_param_manager,model_spreadsheet_input_param,spreadsheet_oca.group_manager,1,1,1,1 diff --git a/spreadsheet_oca/security/security.xml b/spreadsheet_oca/security/security.xml index aa94100a..edea2998 100644 --- a/spreadsheet_oca/security/security.xml +++ b/spreadsheet_oca/security/security.xml @@ -62,4 +62,46 @@ [('group_ids','in', user.groups_id.ids)] + + + + + Refresh Schedule: follow spreadsheet access + + + [ + '|', '|', '|', + ('spreadsheet_id.owner_id', '=', user.id), + ('spreadsheet_id.contributor_ids', '=', user.id), + ('spreadsheet_id.contributor_group_ids', 'in', user.groups_id.ids), + ('spreadsheet_id.reader_ids', '=', user.id), + ] + + + Refresh Schedule: manager full access + + + [(1, '=', 1)] + + + + Input Param: follow spreadsheet access + + + [ + '|', '|', '|', + ('spreadsheet_id.owner_id', '=', user.id), + ('spreadsheet_id.contributor_ids', '=', user.id), + ('spreadsheet_id.contributor_group_ids', 'in', user.groups_id.ids), + ('spreadsheet_id.reader_ids', '=', user.id), + ] + + + Input Param: manager full access + + + [(1, '=', 1)] + diff --git a/spreadsheet_oca/static/description/index.html b/spreadsheet_oca/static/description/index.html index 69ccb105..c6303f28 100644 --- a/spreadsheet_oca/static/description/index.html +++ b/spreadsheet_oca/static/description/index.html @@ -3,7 +3,7 @@ -README.rst +Spreadsheet Oca -
+
+

Spreadsheet Oca

- - -Odoo Community Association - -
-

Spreadsheet Oca

-

Beta License: AGPL-3 OCA/spreadsheet Translate me on Weblate Try me on Runboat

+

Beta License: AGPL-3 OCA/spreadsheet Translate me on Weblate Try me on Runboat

This module adds a functionality for adding and editing Spreadsheets using Odoo CE.

It is an alternative to the proprietary module spreadsheet_edition @@ -397,9 +392,9 @@

Spreadsheet Oca

-

Usage

+

Usage

-

Create a new spreadsheet

+

Create a new spreadsheet

-

Development

+

Development

If you want to develop custom business functions, you can add others, based on the file https://github.com/odoo/odoo/blob/16.0/addons/spreadsheet_account/static/src/accounting_functions.js

-

Bug Tracker

+

Bug Tracker

Bugs are tracked on GitHub Issues. In case of trouble, please check there if your issue has already been reported. If you spotted it first, help us to smash it by providing a detailed and welcomed @@ -461,15 +456,15 @@

Bug Tracker

Do not contact contributors directly about support or help with technical issues.

-

Credits

+

Credits

-

Authors

+

Authors

  • CreuBlanca
-

Contributors

+

Contributors

-

Maintainers

+

Maintainers

This module is maintained by the OCA.

Odoo Community Association @@ -500,6 +495,5 @@

Maintainers

-
diff --git a/spreadsheet_oca/tests/__init__.py b/spreadsheet_oca/tests/__init__.py new file mode 100644 index 00000000..16ad3d23 --- /dev/null +++ b/spreadsheet_oca/tests/__init__.py @@ -0,0 +1,3 @@ +from . import test_pivot_data +from . import test_refresh_schedule +from . import test_input_param diff --git a/spreadsheet_oca/tests/test_input_param.py b/spreadsheet_oca/tests/test_input_param.py new file mode 100644 index 00000000..14319f24 --- /dev/null +++ b/spreadsheet_oca/tests/test_input_param.py @@ -0,0 +1,379 @@ +# Copyright 2025 Ledo Enterprises LLC +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). +""" +Tests for named input parameters. + +Covers: + - cell_ref.py helper functions (pure logic, no DB). + - spreadsheet.input_param model creation, constraints, and sync. + - _apply_param_substitution domain helper. + - Smart button count and action. + - Full refresh-cycle integration (param sync + domain substitution). + - Controller endpoint logic (model-level). +""" + +from odoo.exceptions import ValidationError +from odoo.tests import TransactionCase +from odoo.tools import mute_logger + +from ..models.cell_ref import ( + parse_cell_key, + parse_cell_ref, + read_cell_value, + write_cell_content, +) +from ..models.spreadsheet_refresh_schedule import _apply_param_substitution + + +class TestCellRefHelpers(TransactionCase): + """Unit tests for cell_ref.py — pure logic, no DB required.""" + + # ── parse_cell_ref ──────────────────────────────────────────────────────── + + def test_parse_cell_ref_simple(self): + self.assertEqual(parse_cell_ref("A1"), (0, 0)) + self.assertEqual(parse_cell_ref("B3"), (1, 2)) + self.assertEqual(parse_cell_ref("Z1"), (25, 0)) + + def test_parse_cell_ref_multi_letter(self): + col, row = parse_cell_ref("AA1") + self.assertEqual(col, 26) + self.assertEqual(row, 0) + col2, row2 = parse_cell_ref("AB12") + self.assertEqual(col2, 27) + self.assertEqual(row2, 11) + + def test_parse_cell_ref_invalid(self): + self.assertEqual(parse_cell_ref(""), (None, None)) + self.assertEqual(parse_cell_ref("A0"), (None, None)) # zero row forbidden + self.assertEqual(parse_cell_ref("12"), (None, None)) # no letters + self.assertEqual(parse_cell_ref("ZZZ"), (None, None)) # no row number + + def test_parse_cell_ref_case_insensitive(self): + self.assertEqual(parse_cell_ref("b3"), parse_cell_ref("B3")) + + # ── parse_cell_key ──────────────────────────────────────────────────────── + + def test_parse_cell_key_bare(self): + sheet, col, row = parse_cell_key("B3") + self.assertIsNone(sheet) + self.assertEqual(col, 1) + self.assertEqual(row, 2) + + def test_parse_cell_key_with_sheet(self): + sheet, col, row = parse_cell_key("Sheet1!B3") + self.assertEqual(sheet, "Sheet1") + self.assertEqual(col, 1) + self.assertEqual(row, 2) + + def test_parse_cell_key_invalid(self): + sheet, col, row = parse_cell_key("ZZZ") + self.assertIsNone(col) + + # ── read_cell_value ─────────────────────────────────────────────────────── + + def _make_raw(self, row, col, content=None, value=None): + cell = {} + if content is not None: + cell["content"] = content + if value is not None: + cell["value"] = value + # Convert 0-based (col, row) to "A1" format cell address + col_str = chr(ord("A") + col) + cell_addr = f"{col_str}{row + 1}" + return { + "sheets": [ + { + "id": "s1", + "name": "Sheet1", + "cells": {cell_addr: cell}, + } + ] + } + + def test_read_cell_value_numeric(self): + raw = self._make_raw(2, 1, content="42") # B3 + val = read_cell_value(raw, "B3") + self.assertEqual(val, "42") + + def test_read_cell_value_uses_evaluated_value(self): + raw = self._make_raw(2, 1, content="=1+1", value=2) + val = read_cell_value(raw, "B3") + self.assertEqual(val, 2) + + def test_read_cell_value_string(self): + raw = self._make_raw(0, 0, content="hello") # A1 + val = read_cell_value(raw, "A1") + self.assertEqual(val, "hello") + + def test_read_cell_value_missing(self): + raw = {"sheets": [{"id": "s1", "name": "Sheet1", "cells": {}}]} + val = read_cell_value(raw, "B3") + self.assertIsNone(val) + + def test_read_cell_value_empty_raw(self): + self.assertIsNone(read_cell_value({}, "B3")) + self.assertIsNone(read_cell_value(None, "B3")) + + # ── write_cell_content ──────────────────────────────────────────────────── + + def test_write_cell_content(self): + raw = {"sheets": [{"id": "s1", "name": "Sheet1", "cells": {}}]} + result = write_cell_content(raw, "B3", "hello") + cell = result["sheets"][0]["cells"]["B3"] + self.assertEqual(cell["content"], "hello") + + def test_write_cell_content_creates_nested_dicts(self): + raw = {"sheets": [{"id": "s1", "name": "Sheet1"}]} + result = write_cell_content(raw, "A1", 99) + self.assertIn("cells", result["sheets"][0]) + + +class TestInputParam(TransactionCase): + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.spreadsheet = cls.env["spreadsheet.spreadsheet"].create( + {"name": "Input Param Test Spreadsheet"} + ) + # Minimal spreadsheet raw with a known cell value at B3. + cls.raw_with_b3 = { + "version": 1, + "sheets": [ + { + "id": "s1", + "name": "Sheet1", + "cells": { + "B3": {"content": "2026-01-01"}, + }, + } + ], + } + + def _make_param(self, **kwargs): + defaults = { + "name": "start_date", + "spreadsheet_id": self.spreadsheet.id, + "cell_ref": "B3", + } + defaults.update(kwargs) + return self.env["spreadsheet.input_param"].create(defaults) + + # ── Basic creation ──────────────────────────────────────────────────────── + + def test_create_param(self): + p = self._make_param() + self.assertEqual(p.name, "start_date") + self.assertEqual(p.spreadsheet_id, self.spreadsheet) + self.assertEqual(p.cell_ref, "B3") + self.assertTrue(p.active) + self.assertFalse(p.current_value) + self.assertFalse(p.last_synced) + + # ── Constraint: cell_ref ───────────────────────────────────────────────── + + def test_invalid_cell_ref_raises(self): + with self.assertRaises(ValidationError): + self._make_param(cell_ref="ZZZ") # letters only, no row number + + def test_valid_qualified_cell_ref(self): + p = self._make_param(name="q_ref", cell_ref="Assumptions!C5") + self.assertEqual(p.cell_ref, "Assumptions!C5") + + # ── Constraint: name ───────────────────────────────────────────────────── + + def test_invalid_name_raises_uppercase(self): + with self.assertRaises(ValidationError): + self._make_param(name="StartDate") # uppercase forbidden + + def test_invalid_name_raises_spaces(self): + with self.assertRaises(ValidationError): + self._make_param(name="start date") # spaces forbidden + + def test_invalid_name_raises_leading_digit(self): + with self.assertRaises(ValidationError): + self._make_param(name="1param") # must start with letter + + # ── Constraint: unique name per spreadsheet ─────────────────────────────── + + @mute_logger("odoo.sql_db") + def test_duplicate_name_raises(self): + self._make_param(name="dup_param") + # SQL UNIQUE constraint propagates as psycopg2.errors.UniqueViolation + # (IntegrityError subclass). Catch broadly to stay decoupled from + # psycopg2 internals and compatible across Odoo versions. + with self.assertRaises(Exception): # noqa: B017 + self._make_param(name="dup_param") + + # ── Sync from spreadsheet ───────────────────────────────────────────────── + + def test_sync_reads_cell(self): + self.spreadsheet.write({"spreadsheet_raw": self.raw_with_b3}) + p = self._make_param(name="sd") + p._sync_from_spreadsheet() + self.assertEqual(p.current_value, "2026-01-01") + + def test_sync_updates_last_synced(self): + self.spreadsheet.write({"spreadsheet_raw": self.raw_with_b3}) + p = self._make_param(name="sd2") + self.assertFalse(p.last_synced) + p._sync_from_spreadsheet() + self.assertTrue(p.last_synced) + + def test_sync_missing_cell_no_error(self): + """Syncing a cell that doesn't exist: no error, current_value stays None.""" + self.spreadsheet.write( + { + "spreadsheet_raw": { + "sheets": [{"id": "s1", "name": "Sheet1", "cells": {}}] + } + } + ) + p = self._make_param(name="sd3", cell_ref="Z99") + p._sync_from_spreadsheet() + self.assertFalse(p.current_value) + self.assertTrue(p.last_synced) + + def test_action_sync_now(self): + """action_sync_now() is a thin wrapper that calls _sync_from_spreadsheet.""" + self.spreadsheet.write({"spreadsheet_raw": self.raw_with_b3}) + p = self._make_param(name="sd_action") + p.action_sync_now() + self.assertEqual(p.current_value, "2026-01-01") + + def test_sync_all_for_spreadsheet(self): + """_sync_all_for_spreadsheet syncs all active params for a given spreadsheet.""" + self.spreadsheet.write({"spreadsheet_raw": self.raw_with_b3}) + p1 = self._make_param(name="sa_p1") + p2 = self._make_param(name="sa_p2", cell_ref="B3") + p3 = self._make_param( + name="sa_p3", active=False + ) # archived — should be skipped + self.env["spreadsheet.input_param"]._sync_all_for_spreadsheet( + self.spreadsheet.id + ) + p1.invalidate_recordset() + p2.invalidate_recordset() + p3.invalidate_recordset() + self.assertEqual(p1.current_value, "2026-01-01") + self.assertEqual(p2.current_value, "2026-01-01") + self.assertFalse(p3.current_value) # archived param not synced + + # ── Domain template substitution ───────────────────────────────────────── + + def test_param_substitution_simple(self): + domain = [("date", ">=", "%(start_date)s")] + params = {"start_date": "2026-01-01"} + result = _apply_param_substitution(domain, params) + self.assertEqual(result, [("date", ">=", "2026-01-01")]) + + def test_param_substitution_no_match(self): + """Domain without tokens is returned unchanged.""" + domain = [("state", "=", "done")] + result = _apply_param_substitution(domain, {"start_date": "2026-01-01"}) + self.assertEqual(result, [("state", "=", "done")]) + + def test_param_substitution_nested(self): + """Operator strings and nested lists are handled correctly.""" + domain = ["&", ("date", ">=", "%(start)s"), ("date", "<=", "%(end)s")] + params = {"start": "2026-01-01", "end": "2026-12-31"} + result = _apply_param_substitution(domain, params) + self.assertEqual( + result, + ["&", ("date", ">=", "2026-01-01"), ("date", "<=", "2026-12-31")], + ) + + @mute_logger("odoo.addons.spreadsheet_oca.models.spreadsheet_refresh_schedule") + def test_param_substitution_unknown_param_no_error(self): + """Unknown param name logs a warning but does not raise.""" + domain = [("date", ">=", "%(missing)s")] + # Should not raise — logs a warning and leaves token in place. + result = _apply_param_substitution(domain, {}) + self.assertEqual(result, [("date", ">=", "%(missing)s")]) + + # ── Smart button count ──────────────────────────────────────────────────── + + def test_smart_button_count(self): + other_ss = self.env["spreadsheet.spreadsheet"].create({"name": "Other SS"}) + self._make_param(name="cnt_a") + p2 = self._make_param(name="cnt_b") + self.spreadsheet.invalidate_recordset() + self.assertEqual(self.spreadsheet.input_param_count, 2) + # Archiving reduces count. + p2.write({"active": False}) + self.spreadsheet.invalidate_recordset() + self.assertEqual(self.spreadsheet.input_param_count, 1) + # Other spreadsheet count unaffected. + self.assertEqual(other_ss.input_param_count, 0) + + # ── Action open input params ────────────────────────────────────────────── + + def test_action_open_input_params(self): + action = self.spreadsheet.action_open_input_params() + self.assertEqual(action["type"], "ir.actions.act_window") + self.assertEqual(action["res_model"], "spreadsheet.input_param") + self.assertIn(("spreadsheet_id", "=", self.spreadsheet.id), action["domain"]) + + # ── Full refresh-cycle integration ──────────────────────────────────────── + + def test_refresh_uses_params(self): + """Param is synced and substituted in the domain during a refresh cycle.""" + self.spreadsheet.write({"spreadsheet_raw": self.raw_with_b3}) + param = self._make_param(name="filter_name") + # Pivot domain references the param. + raw = { + "version": 1, + "sheets": [ + { + "id": "s1", + "name": "Sheet1", + "cells": {"B3": {"content": "Administrator"}}, + } + ], + "pivots": { + "1": { + "type": "ODOO", + "model": "res.partner", + "name": "Partners", + "domain": [("name", "=", "%(filter_name)s")], + "rows": [], + "columns": [], + "measures": [{"fieldName": "id", "aggregator": "count"}], + } + }, + } + self.spreadsheet.write({"spreadsheet_raw": raw}) + schedule = self.env["spreadsheet.refresh.schedule"].create( + { + "name": "Integration Test", + "spreadsheet_id": self.spreadsheet.id, + } + ) + schedule._run_refresh() + # Param should have been synced. + param.invalidate_recordset() + self.assertEqual(param.current_value, "Administrator") + self.assertTrue(schedule.last_run) + + # ── Controller endpoint logic ───────────────────────────────────────────── + + def test_controller_returns_param_dict(self): + """The endpoint returns {name: current_value} for active params.""" + other_ss = self.env["spreadsheet.spreadsheet"].create({"name": "Ctrl SS"}) + p = self.env["spreadsheet.input_param"].create( + { + "name": "ctrl_param", + "spreadsheet_id": other_ss.id, + "cell_ref": "A1", + } + ) + p.write({"current_value": "test_value"}) + # Simulate what the controller does (model-level, no HTTP layer). + params = self.env["spreadsheet.input_param"].search( + [ + ("spreadsheet_id", "=", other_ss.id), + ("active", "=", True), + ] + ) + result = {q.name: q.current_value for q in params} + self.assertEqual(result, {"ctrl_param": "test_value"}) diff --git a/spreadsheet_oca/tests/test_pivot_data.py b/spreadsheet_oca/tests/test_pivot_data.py new file mode 100644 index 00000000..c569ec5e --- /dev/null +++ b/spreadsheet_oca/tests/test_pivot_data.py @@ -0,0 +1,179 @@ +# Copyright 2025 Ledo Enterprises LLC +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). +""" +Tests for server-side pivot data computation. + +These tests verify that _get_pivot_data() produces the same grouping +structure that the Odoo JS PivotModel would produce via read_group. + +Run against odoo_test (which has sale, account installed): + docker exec -i odoo-prod odoo test -d odoo_test \ + --test-tags spreadsheet_oca.TestPivotData --stop-after-init +""" + +from odoo.tests import TransactionCase + +from ..models.pivot_data import _dimension_to_groupby, _get_pivot_data, _sections + + +class TestPivotDataHelpers(TransactionCase): + """Unit tests for the pure-Python helpers (no DB needed).""" + + def test_sections_empty(self): + self.assertEqual(_sections([]), [[]]) + + def test_sections_one(self): + self.assertEqual(_sections(["a"]), [[], ["a"]]) + + def test_sections_two(self): + self.assertEqual(_sections(["a", "b"]), [[], ["a"], ["a", "b"]]) + + def test_dimension_no_granularity(self): + self.assertEqual( + _dimension_to_groupby({"fieldName": "partner_id"}), "partner_id" + ) + + def test_dimension_with_granularity(self): + self.assertEqual( + _dimension_to_groupby({"fieldName": "date_order", "granularity": "month"}), + "date_order:month", + ) + + +class TestPivotData(TransactionCase): + """Integration tests using res.partner (always available, no demo needed).""" + + @classmethod + def setUpClass(cls): + super().setUpClass() + # Create a handful of partners in different countries to group by + cls.country_be = cls.env.ref("base.be") + cls.country_us = cls.env.ref("base.us") + cls.partners = cls.env["res.partner"].create( + [ + {"name": "Alpha", "country_id": cls.country_be.id, "is_company": True}, + {"name": "Beta", "country_id": cls.country_be.id, "is_company": True}, + {"name": "Gamma", "country_id": cls.country_us.id, "is_company": True}, + {"name": "Delta", "country_id": cls.country_us.id, "is_company": False}, + ] + ) + cls.domain = [("id", "in", cls.partners.ids)] + + # ── Helpers ───────────────────────────────────────────────────────────── + + def _run(self, row_dims, col_dims, measures): + return _get_pivot_data( + self.env, + "res.partner", + self.domain, + {}, + row_dims, + col_dims, + measures, + ) + + def _groups_for(self, result, row_prefix, col_prefix): + """Return groups matching the given row/col groupby prefix.""" + return [ + g + for g in result["groups"] + if g["rowGroupBy"] == row_prefix and g["colGroupBy"] == col_prefix + ] + + # ── Grand-total (no groupby) ──────────────────────────────────────────── + + def test_grand_total_count(self): + """With no dims, one group with count = number of partners.""" + result = self._run([], [], [{"fieldName": "__count"}]) + groups = self._groups_for(result, [], []) + self.assertEqual(len(groups), 1) + self.assertEqual(groups[0]["count"], 4) + + # ── Single row groupby ────────────────────────────────────────────────── + + def test_row_groupby_country(self): + """Row groupby country_id → one group per country + grand total.""" + row_dims = [{"fieldName": "country_id"}] + result = self._run(row_dims, [], [{"fieldName": "__count"}]) + + # Grand total (rowGroupBy=[], colGroupBy=[]) + totals = self._groups_for(result, [], []) + self.assertEqual(len(totals), 1) + self.assertEqual(totals[0]["count"], 4) + + # Per-country groups (rowGroupBy=["country_id"], colGroupBy=[]) + country_groups = self._groups_for(result, ["country_id"], []) + self.assertEqual(len(country_groups), 2) + counts_by_country = {g["rowValues"][0]: g["count"] for g in country_groups} + self.assertEqual(counts_by_country[self.country_be.id], 2) + self.assertEqual(counts_by_country[self.country_us.id], 2) + + # ── Row + col groupby ─────────────────────────────────────────────────── + + def test_row_and_col_groupby(self): + """Row=country_id, Col=is_company → 2×2 cell values.""" + row_dims = [{"fieldName": "country_id"}] + col_dims = [{"fieldName": "is_company"}] + result = self._run(row_dims, col_dims, [{"fieldName": "__count"}]) + + # Divisors: ([], []) ([], [is_company]) + # ([country_id], []) ([country_id], [is_company]) + # → 4 divisors, each producing N read_group rows + divisor_keys = { + (tuple(g["rowGroupBy"]), tuple(g["colGroupBy"])) for g in result["groups"] + } + self.assertIn(((), ()), divisor_keys) + self.assertIn(((), ("is_company",)), divisor_keys) + self.assertIn((("country_id",), ()), divisor_keys) + self.assertIn((("country_id",), ("is_company",)), divisor_keys) + + # BE / is_company=True → Alpha + Beta = 2 + cell_groups = self._groups_for(result, ["country_id"], ["is_company"]) + be_company = [ + g + for g in cell_groups + if g["rowValues"] == [self.country_be.id] and g["colValues"] == [True] + ] + self.assertEqual(len(be_company), 1) + self.assertEqual(be_company[0]["count"], 2) + + # US / is_company=False → Delta = 1 + us_individual = [ + g + for g in cell_groups + if g["rowValues"] == [self.country_us.id] and g["colValues"] == [False] + ] + self.assertEqual(len(us_individual), 1) + self.assertEqual(us_individual[0]["count"], 1) + + # ── Return structure ──────────────────────────────────────────────────── + + def test_return_fields_metadata(self): + """Result includes fields metadata for all used fields.""" + row_dims = [{"fieldName": "country_id"}] + result = self._run(row_dims, [], [{"fieldName": "__count"}]) + self.assertIn("country_id", result["fields"]) + self.assertEqual(result["fields"]["country_id"]["type"], "many2one") + + def test_return_dimensions_and_specs(self): + """Result echoes back row/col dims and measure specs.""" + row_dims = [{"fieldName": "country_id"}] + measures = [{"fieldName": "__count"}] + result = self._run(row_dims, [], measures) + self.assertEqual(result["rowDimensions"], row_dims) + self.assertEqual(result["colDimensions"], []) + self.assertEqual(result["measureSpecs"], ["__count"]) + + # ── Domain filtering ──────────────────────────────────────────────────── + + def test_domain_filters_correctly(self): + """Domain restricts records — only BE partners.""" + be_domain = [ + ("id", "in", self.partners.ids), + ("country_id", "=", self.country_be.id), + ] + result = _get_pivot_data( + self.env, "res.partner", be_domain, {}, [], [], [{"fieldName": "__count"}] + ) + totals = self._groups_for(result, [], []) + self.assertEqual(totals[0]["count"], 2) diff --git a/spreadsheet_oca/tests/test_refresh_schedule.py b/spreadsheet_oca/tests/test_refresh_schedule.py new file mode 100644 index 00000000..ff354aa1 --- /dev/null +++ b/spreadsheet_oca/tests/test_refresh_schedule.py @@ -0,0 +1,233 @@ +# Copyright 2025 Ledo Enterprises LLC +# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). +""" +Tests for spreadsheet.refresh.schedule (scheduled cron refresh). + +These tests verify: + - Schedule creation and cron lifecycle (activate / deactivate / run now). + - _run_refresh() correctly reads pivot defs from spreadsheet_raw, + calls _get_pivot_data(), and posts a Chatter message. + - Graceful handling of edge cases (no pivots, unknown model). + - HTML renderer produces non-empty output. + - Smart button count on spreadsheet.spreadsheet. + +Run: + docker exec -i odoo-prod odoo test -d odoo_test \\ + --test-tags spreadsheet_oca.TestRefreshSchedule --stop-after-init +""" + +from odoo.tests import TransactionCase +from odoo.tools import mute_logger + + +class TestRefreshSchedule(TransactionCase): + @classmethod + def setUpClass(cls): + super().setUpClass() + cls.spreadsheet = cls.env["spreadsheet.spreadsheet"].create( + {"name": "Test Spreadsheet"} + ) + cls.partner = cls.env["res.partner"].create({"name": "Test Subscriber"}) + + # ── Schedule creation ──────────────────────────────────────────────────── + + def _make_schedule(self, **kwargs): + defaults = { + "name": "Weekly Refresh", + "spreadsheet_id": self.spreadsheet.id, + "interval_number": 1, + "interval_type": "weeks", + } + defaults.update(kwargs) + return self.env["spreadsheet.refresh.schedule"].create(defaults) + + def test_create_schedule(self): + schedule = self._make_schedule() + self.assertEqual(schedule.spreadsheet_id, self.spreadsheet) + self.assertFalse(schedule.cron_id) + self.assertFalse(schedule.last_run) + + def test_activate_creates_cron(self): + schedule = self._make_schedule() + schedule.action_activate() + self.assertTrue(schedule.cron_id) + self.assertTrue(schedule.cron_id.active) + self.assertEqual(schedule.cron_id.interval_number, 1) + self.assertEqual(schedule.cron_id.interval_type, "weeks") + + def test_activate_twice_reuses_cron(self): + schedule = self._make_schedule() + schedule.action_activate() + cron_id_first = schedule.cron_id.id + schedule.action_deactivate() + schedule.action_activate() + self.assertEqual(schedule.cron_id.id, cron_id_first) + + def test_deactivate_pauses_cron(self): + schedule = self._make_schedule() + schedule.action_activate() + schedule.action_deactivate() + self.assertFalse(schedule.cron_id.active) + + def test_unlink_removes_cron(self): + schedule = self._make_schedule() + schedule.action_activate() + cron = schedule.cron_id + schedule.unlink() + self.assertFalse(cron.exists()) + + # ── _run_refresh — no pivots ───────────────────────────────────────────── + + def test_run_refresh_no_pivots(self): + """Spreadsheet with no pivots: last_run is still updated.""" + schedule = self._make_schedule() + before = self.env["mail.message"].search_count( + [ + ("res_id", "=", self.spreadsheet.id), + ("model", "=", "spreadsheet.spreadsheet"), + ] + ) + schedule._run_refresh() + self.assertTrue(schedule.last_run) + # A chatter note is NOT posted when there are no pivots + after = self.env["mail.message"].search_count( + [ + ("res_id", "=", self.spreadsheet.id), + ("model", "=", "spreadsheet.spreadsheet"), + ] + ) + self.assertEqual(before, after) + + # ── _run_refresh — with pivot data ─────────────────────────────────────── + + def _set_pivot_raw(self, pivot_def): + """Write a minimal spreadsheet_raw JSON with one ODOO pivot.""" + raw = { + "version": 1, + "sheets": [{"id": "sheet1", "name": "Sheet1"}], + "pivots": { + "1": pivot_def, + }, + } + self.spreadsheet.write({"spreadsheet_raw": raw}) + + def test_run_refresh_with_valid_pivot(self): + """Valid res.partner pivot → posts a Chatter note with HTML body.""" + country_be = self.env.ref("base.be") + partners = self.env["res.partner"].create( + [ + {"name": "A", "country_id": country_be.id, "is_company": True}, + {"name": "B", "country_id": country_be.id, "is_company": True}, + ] + ) + self._set_pivot_raw( + { + "type": "ODOO", + "model": "res.partner", + "domain": [("id", "in", partners.ids)], + "context": {}, + "rows": [{"fieldName": "country_id"}], + "columns": [], + "measures": [{"fieldName": "__count"}], + "name": "Partner Pivot", + } + ) + schedule = self._make_schedule(notify_partner_ids=[self.partner.id]) + msg_count_before = self.env["mail.message"].search_count( + [ + ("res_id", "=", self.spreadsheet.id), + ("model", "=", "spreadsheet.spreadsheet"), + ] + ) + schedule._run_refresh() + self.assertTrue(schedule.last_run) + msg_count_after = self.env["mail.message"].search_count( + [ + ("res_id", "=", self.spreadsheet.id), + ("model", "=", "spreadsheet.spreadsheet"), + ] + ) + self.assertGreater( + msg_count_after, msg_count_before, "Expected a Chatter message" + ) + # Verify HTML content + msg = self.env["mail.message"].search( + [ + ("res_id", "=", self.spreadsheet.id), + ("model", "=", "spreadsheet.spreadsheet"), + ], + order="id desc", + limit=1, + ) + self.assertIn("Partner Pivot", msg.body) + self.assertIn("res.partner", msg.body) + + @mute_logger("odoo.addons.spreadsheet_oca.models.pivot_data") + def test_run_refresh_unknown_model_skipped(self): + """Pivot with an unknown model is skipped; last_run still set.""" + self._set_pivot_raw( + { + "type": "ODOO", + "model": "nonexistent.model.xyz", + "domain": [], + "context": {}, + "rows": [], + "columns": [], + "measures": [{"fieldName": "__count"}], + "name": "Bad Pivot", + } + ) + schedule = self._make_schedule() + schedule._run_refresh() + self.assertTrue(schedule.last_run) + + def test_run_refresh_non_odoo_pivot_skipped(self): + """Non-ODOO type pivot is ignored.""" + raw = { + "version": 1, + "sheets": [{"id": "sheet1", "name": "Sheet1"}], + "pivots": { + "1": {"type": "STATIC", "model": "res.partner"}, + }, + } + self.spreadsheet.write({"spreadsheet_raw": raw}) + schedule = self._make_schedule() + schedule._run_refresh() + self.assertTrue(schedule.last_run) + + # ── HTML renderer ──────────────────────────────────────────────────────── + + def test_render_refresh_html_empty(self): + html = self.env["spreadsheet.refresh.schedule"]._render_refresh_html([]) + self.assertIn("No ODOO pivot", html) + + def test_render_refresh_html_with_data(self): + result = { + "groups": [ + { + "rowGroupBy": [], + "colGroupBy": [], + "rowValues": [], + "colValues": [], + "count": 42, + "measures": {"__count": 42}, + } + ], + "rowDimensions": [], + "colDimensions": [], + "measureSpecs": ["__count"], + } + summaries = [{"name": "My Pivot", "model": "res.partner", "result": result}] + html = self.env["spreadsheet.refresh.schedule"]._render_refresh_html(summaries) + self.assertIn("My Pivot", html) + self.assertIn("res.partner", html) + self.assertIn("42", html) + + # ── Smart button count ─────────────────────────────────────────────────── + + def test_smart_button_count(self): + self.assertEqual(self.spreadsheet.refresh_schedule_count, 0) + self._make_schedule() + self._make_schedule(name="Daily", interval_type="days") + self.spreadsheet.invalidate_recordset() + self.assertEqual(self.spreadsheet.refresh_schedule_count, 2) diff --git a/spreadsheet_oca/views/spreadsheet_input_param_views.xml b/spreadsheet_oca/views/spreadsheet_input_param_views.xml new file mode 100644 index 00000000..37861d28 --- /dev/null +++ b/spreadsheet_oca/views/spreadsheet_input_param_views.xml @@ -0,0 +1,160 @@ + + + + + + spreadsheet.input_param.search + spreadsheet.input_param + + + + + + + + + + + + + + + + + spreadsheet.input_param.list + spreadsheet.input_param + + + + + + + + + + + + + + diff --git a/spreadsheet_oca/views/spreadsheet_refresh_schedule_views.xml b/spreadsheet_oca/views/spreadsheet_refresh_schedule_views.xml new file mode 100644 index 00000000..7791f6ce --- /dev/null +++ b/spreadsheet_oca/views/spreadsheet_refresh_schedule_views.xml @@ -0,0 +1,177 @@ + + + + + + spreadsheet.refresh.schedule.search + spreadsheet.refresh.schedule + + + + + + + + + + + + + + + + + + + spreadsheet.refresh.schedule.list + spreadsheet.refresh.schedule + + + + + + + + + + + + + + + + Refresh Schedules + + + + + +