Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
68 changes: 48 additions & 20 deletions OpenOrchestrator/database/db_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,9 +200,13 @@ def delete_trigger(trigger_id: UUID | str) -> None:
session.commit()


# pylint: disable=too-many-positional-arguments
def get_logs(offset: int, limit: int,
from_date: datetime | None = None, to_date: datetime | None = None,
process_name: str | None = None, log_level: LogLevel | None = None, job_id: str | UUID | None = None) -> tuple[Log, ...]:
process_name: str | None = None, log_level: LogLevel | None = None,
job_id: str | UUID | None = None,
order_by: str | None = None, order_desc: bool = True,
include_count: bool = False) -> tuple[Log, ...] | tuple[tuple[Log, ...], int]:
"""Get the logs from the database using filters and pagination.

Args:
Expand All @@ -213,38 +217,62 @@ def get_logs(offset: int, limit: int,
process_name: The process name to filter on. If none the filter is disabled.
log_level: The log level to filter on. If none the filter is disabled.
job_id: The job ID to filter on. If none the filter is disabled.
order_by: Column to order the result by. If None, will use log_time.
order_desc: Should result be in descending order. Defaults to True.
include_count: Return a tuple with results as well as the total count of logs without limit applied.

Returns:
A list of logs matching the given filters.
"""
if isinstance(job_id, str):
job_id = UUID(job_id)

query = (
select(Log)
.order_by(desc(Log.log_time))
.offset(offset)
.limit(limit)
)
def _apply_filters(_query):
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Kinda weird, man

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not weird! We use _apply_filters as an optional extra count_query if return needs it.

if from_date:
_query = _query.where(Log.log_time >= from_date)
if to_date:
_query = _query.where(Log.log_time <= to_date)
if process_name:
_query = _query.where(Log.process_name == process_name)
if log_level:
_query = _query.where(Log.log_level == log_level)
if job_id:
_query = _query.where(Log.job_id == job_id)
return _query

if from_date:
query = query.where(Log.log_time >= from_date)
with _get_session() as session:
query = _apply_filters(select(Log))

if to_date:
query = query.where(Log.log_time <= to_date)
# Sort mapping
if order_by:
key = order_by.lower().replace(" ", "_")
if key == 'level':
sort_col = Log.log_level
elif key == 'message':
sort_col = Log.log_message
elif key in ('short_job_id', 'job_id', 'full_job_id'):
sort_col = Log.job_id
elif key == 'process_name':
sort_col = Log.process_name
else:
sort_col = Log.log_time
else:
sort_col = Log.log_time

if process_name:
query = query.where(Log.process_name == process_name)
query = query.order_by(desc(sort_col) if order_desc else sort_col)
query = query.offset(offset).select_from(Log)
if limit > 0:
query = query.limit(limit)

if log_level:
query = query.where(Log.log_level == log_level)
result = session.scalars(query).all()
logs_tuple = tuple(result)

if job_id:
query = query.where(Log.job_id == job_id)
if include_count:
count_query = _apply_filters(select(alc_func.count()).select_from(Log)) # pylint: disable=not-callable
total_count = session.scalar(count_query)
return logs_tuple, total_count

with _get_session() as session:
result = session.scalars(query).all()
return tuple(result)
return logs_tuple


def create_log(process_name: str, level: LogLevel, job_id: str | UUID | None, message: str) -> None:
Expand Down
63 changes: 51 additions & 12 deletions OpenOrchestrator/orchestrator/tabs/logging_tab.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,28 +20,37 @@
]


# pylint: disable-next=too-few-public-methods
# pylint: disable-next=too-few-public-methods, too-many-instance-attributes
class LoggingTab():
"""The 'Logs' tab object."""
def __init__(self, tab_name: str) -> None:
self.current_job_id: str | None = None
# TODO: Serverside pagination like queue tab maybe?
self.order_by = "Log Time"
self.order_descending = True
self.page = 1
self.rows_per_page = 25
self.log_count = 0

with ui.tab_panel(tab_name):
with ui.row().classes("w-full justify-between"):
with ui.row():
self.from_input = DatetimeInput("From Date", on_change=self.update, allow_empty=True)
self.to_input = DatetimeInput("To Date", on_change=self.update, allow_empty=True)
self.level_input = ui.select(["All", "Trace", "Info", "Error"], value="All", label="Level", on_change=self.update).classes("w-48")
self.process_input = ui.select(["All"], label="Process Name", value="All", on_change=self.update).classes("w-48")
self.limit_input = ui.select([100, 200, 500, 1000], value=100, label="Limit", on_change=self.update).classes("w-24")
with ui.column().classes("items-end") as self.job_filter_container:
self.job_filter_label = ui.label("")
self.all_jobs_button = ui.button("Show all jobs", on_click=self._show_all_jobs)
with ui.column().classes("items-end") as self.job_filter_container:
self.job_filter_label = ui.label("")
self.all_jobs_button = ui.button("Show all jobs", on_click=self._show_all_jobs)

self.logs_table = ui.table(title="Logs", columns=COLUMNS, rows=[], row_key='ID', pagination=50).classes("w-full")
self.logs_table.on("rowClick", self._row_click)
self.logs_table = ui.table(title="Logs", columns=COLUMNS, rows=[], row_key='ID',
pagination={'rowsPerPage': self.rows_per_page,
'rowsNumber': self.log_count})
self.logs_table.classes("w-full sticky-header h-[calc(100vh-200px)] overflow-auto")
self.logs_table.props(":rows-per-page-options='[10, 25, 50, 100, 1000]' rows-per-page-label='Logs per page:'")
self.logs_table.on("rowClick", self._row_click)
self.logs_table.on('request', self._on_table_request)

test_helper.set_automation_ids(self, "logs_tab")
test_helper.set_automation_ids(self, "logs_tab")

def update(self):
"""Update the logs table and Process input list"""
Expand All @@ -63,10 +72,40 @@ def _update_table(self):
to_date = self.to_input.get_datetime()
level = LogLevel(self.level_input.value) if self.level_input.value != "All" else None
process_name = self.process_input.value if self.process_input.value != 'All' else None
limit = self.limit_input.value

logs = db_util.get_logs(0, limit=limit, from_date=from_date, to_date=to_date, log_level=level, process_name=process_name, job_id=self.current_job_id)
self.logs_table.rows = [log.to_row_dict() for log in logs]
offset = (self.page - 1) * self.rows_per_page
order_by = str(self.order_by).lower().replace(" ", "_")

logs, count = db_util.get_logs(offset, limit=self.rows_per_page, from_date=from_date, to_date=to_date, log_level=level, process_name=process_name, job_id=self.current_job_id, order_by=order_by, order_desc=self.order_descending, include_count=True)
self._update_pagination(count)
self.logs_table.update_rows([log.to_row_dict() for log in logs])

def _on_table_request(self, e):
"""Called when updating table pagination and sorting, to handle these manually and allow for server side pagination.

Args:
e: The event triggering the request.
"""
pagination = e.args['pagination']
self.page = pagination.get('page')
self.rows_per_page = pagination.get('rowsPerPage')
self.order_by = pagination.get('sortBy')
self.order_descending = pagination.get('descending', False)
self._update_table()

def _update_pagination(self, log_count):
"""Update pagination element.

Args:
log_count: The element count of the current filtered table.
"""
self.log_count = log_count
self.logs_table.pagination = {"rowsNumber": self.log_count,
"page": self.page,
"rowsPerPage": self.rows_per_page,
"sortBy": self.order_by,
"descending": self.order_descending}
self.logs_table.update()

def _update_process_input(self):
"""Update the process input with names from the database."""
Expand Down
5 changes: 4 additions & 1 deletion OpenOrchestrator/orchestrator/tabs/queue_tab.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,8 +95,11 @@ def __init__(self, queue_name: str, update_callback):
ui.button(icon='refresh', on_click=self._update)
self.close_button = ui.button(icon="close", on_click=lambda: (dialog.close(), self.update_callback()))
with ui.scroll_area().classes("h-full"):
self.table = ui.table(columns=ELEMENT_COLUMNS, rows=[], row_key='ID', title=queue_name, pagination={'rowsPerPage': self.rows_per_page, 'rowsNumber': self.queue_count}).classes("w-full sticky-header h-[calc(100vh-200px)] overflow-auto")
self.table = ui.table(columns=ELEMENT_COLUMNS, rows=[], row_key='ID', title=queue_name, pagination={'rowsPerPage': self.rows_per_page, 'rowsNumber': self.queue_count})
self.table.classes("w-full sticky-header h-[calc(100vh-200px)] overflow-auto")
self.table.on('rowClick', lambda e: self._open_queue_element_popup(e.args[1]))
self.table.props(
":rows-per-page-options='[10, 25, 50, 100, 1000]' rows-per-page-label='Queue elements per page:'")
self.table.on('request', self._on_table_request)

with self.table.add_slot("top"):
Expand Down
7 changes: 1 addition & 6 deletions OpenOrchestrator/tests/test_scheduler_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,15 +110,10 @@ def test_run_trigger(self, mock_clone_git_repo: MagicMock, mock_find_main_file:
mock_clone_git_repo.assert_called_once_with(trigger.process_path, trigger.git_branch)
mock_find_main_file.assert_called_once_with("folder_path")
mock_isfile.assert_called_once_with("main.py")
mock_popen.assert_called_once_with(['python', "main.py", trigger.process_name, db_util.get_conn_string(), crypto_util.get_key(), trigger.process_args, str(trigger.id), scheduler_job.job.id],
mock_popen.assert_called_once_with(['python', "main.py", trigger.process_name, db_util.get_conn_string(), crypto_util.get_key(), trigger.process_args, str(trigger.id), str(scheduler_job.job.id)],
stderr=subprocess.PIPE, text=True)
mock_get_scheduler_name.assert_called_once()

# Check Popen was called with job ID as last argument
call_args = mock_popen.call_args[0][0]
job_id_arg = call_args[-1]
self.assertEqual(scheduler_job.job.id, job_id_arg)

# Check that trigger status was set
trigger = db_util.get_trigger(trigger_id)
self.assertEqual(trigger.process_status, TriggerStatus.RUNNING)
Expand Down
1 change: 1 addition & 0 deletions changelog.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Queue element list pagination moved to server side.
- Reordered columns in Orchestrator app.
- Reordered columns in logs.
- Pagination of logs is now handled server side.

### Dev

Expand Down