From 28f094f80b8fe13ac6a8ecba5b074f90d9be56e3 Mon Sep 17 00:00:00 2001 From: Ivo Oskamp Date: Fri, 6 Feb 2026 13:41:08 +0100 Subject: [PATCH] Merge branches v20260203-01 through v20260205-13 into main This commit consolidates all development work from the following branch series: - v20260203-* (13 branches): Initial Autotask integration, graph config UI improvements - v20260204-* (3 branches): Dashboard redirect setting, additional refinements - v20260205-* (13 branches): Autotask resolution improvements, changelog restructuring Key features merged: - Autotask PSA integration with ticket creation, resolution, and search - Graph/mail configuration UI improvements with credential testing - Daily dashboard redirect setting (optional navigation control) - Changelog restructuring with improved Python structure - Various bug fixes and UI enhancements All functionality has been consolidated from the final state of branch v20260205-13-changelog-python-structure to preserve working features. Co-Authored-By: Claude Sonnet 4.5 --- .last-branch | 2 +- README.md | 121 --- .../backupchecks/src/backend/app/changelog.py | 870 ++++++++++++++++++ .../app/integrations/autotask/client.py | 247 +++-- .../src/backend/app/main/routes_changelog.py | 39 +- .../src/backend/app/main/routes_daily_jobs.py | 376 ++------ .../src/backend/app/main/routes_run_checks.py | 72 +- .../src/backend/app/main/routes_settings.py | 5 +- .../src/backend/app/main/routes_shared.py | 15 +- .../src/backend/app/migrations.py | 300 +++--- .../backupchecks/src/backend/app/models.py | 27 +- .../backupchecks/src/static/css/changelog.css | 212 +++++ .../src/templates/layout/base.html | 1 + .../src/templates/main/changelog.html | 109 ++- .../src/templates/main/customers.html | 121 +-- .../src/templates/main/run_checks.html | 3 +- docs/autotask_rest_api.md | 325 +++++++ docs/changelog-claude.md | 105 +++ docs/changelog.md | 625 ++----------- version.txt | 2 +- 20 files changed, 2204 insertions(+), 1373 deletions(-) create mode 100644 containers/backupchecks/src/backend/app/changelog.py create mode 100644 containers/backupchecks/src/static/css/changelog.css create mode 100644 docs/autotask_rest_api.md diff --git a/.last-branch b/.last-branch index f35a538..c1c0a1c 100644 --- a/.last-branch +++ b/.last-branch @@ -1 +1 @@ -v20260204-03-dashboard-redirect-setting +v20260205-13-changelog-python-structure diff --git a/README.md b/README.md index f2a781f..e69de29 100644 --- a/README.md +++ b/README.md @@ -1,121 +0,0 @@ -# BackupChecks - -A backup monitoring and compliance application designed for Managed Service Providers (MSPs) and IT departments. - -## Features - -### Mail Ingestion & Parsing -- Automated email import from Microsoft Graph API (Office 365) -- Supports 11 backup software platforms: - - Veeam (including SOBR capacity monitoring) - - Synology - - QNAP - - Nakivo - - Syncovery - - Boxafe - - R-Drive - - 3CX - - NTFS Auditing - - And more -- Intelligent email parsing extracts backup metadata -- Raw EML storage for debugging with configurable retention - -### Backup Job Tracking -- Dashboard with daily backup job status summaries -- Expected vs. actual job runs based on schedules -- Missed backup detection -- Status tracking: Success, Warning, Error, Missed -- Timezone-aware calculations (defaults to Europe/Amsterdam) - -### Run Checks & Review Workflow -- Manual review interface for backup failures -- Mark runs as "reviewed" -- Approval workflow for backup email processing -- Storage capacity monitoring -- Autotask PSA ticket integration - -### Override Rules -- Exception rules at global or job-level -- Matching criteria: status, error message patterns -- Validity windows for temporary overrides - -### Multi-Customer Management -- Organize jobs by customer -- Autotask company mapping -- Customer-scoped reporting and permissions - -### Tickets & Remarks -- Internal ticket system for backup issues -- Automatic linking to affected job runs -- Scope-based ticket resolution -- Feedback/feature request board - -### Reporting & Analytics -- Snapshot-based reporting with configurable periods -- Historical success rates and trend analysis -- CSV export functionality - -### Autotask Integration -- Create PSA tickets from failed backup runs -- Link internal tickets to Autotask -- Deep links back to BackupChecks - -### User Management -- Role-based access control (admin, viewer, custom roles) -- Theme preferences (light/dark/auto) -- In-app news/announcements - -## Technology Stack - -**Backend:** -- Flask 3.0.3 (Python) -- SQLAlchemy with PostgreSQL 16 -- Flask-Login for authentication -- Gunicorn server - -**Frontend:** -- Jinja2 templates (server-side rendering) -- Bootstrap-based responsive UI -- JavaScript/AJAX for dynamic interactions - -**Infrastructure:** -- Docker containerized -- PostgreSQL database -- Adminer for database management - -## Project Structure - -``` -backupchecks/ -├── containers/backupchecks/ -│ ├── Dockerfile -│ ├── requirements.txt -│ └── src/ -│ ├── backend/app/ -│ │ ├── main.py # Entry point -│ │ ├── models.py # Database models -│ │ ├── parsers/ # Backup software parsers -│ │ ├── integrations/ # Autotask integration -│ │ └── main/ # Route handlers -│ ├── templates/ # Jinja2 templates -│ └── static/ # CSS, JS, assets -├── deploy/ -│ └── backupchecks-stack.yml # Docker Compose -└── docs/ # Documentation -``` - -## Deployment - -The application runs as a Docker stack in Portainer. - -## Configuration - -Key settings are managed via the web interface under Settings: -- Mail import configuration (Microsoft Graph API) -- Autotask integration credentials -- Timezone settings -- User management - -## License - -See [LICENSE](LICENSE) file. diff --git a/containers/backupchecks/src/backend/app/changelog.py b/containers/backupchecks/src/backend/app/changelog.py new file mode 100644 index 0000000..bdb7518 --- /dev/null +++ b/containers/backupchecks/src/backend/app/changelog.py @@ -0,0 +1,870 @@ +""" +Changelog data structure for Backupchecks +""" + +CHANGELOG = [ + { + "version": "v0.1.22", + "date": "2026-02-05", + "summary": "This major release introduces comprehensive Autotask PSA integration, enabling seamless ticket management, customer company mapping, and automated ticket lifecycle handling directly from Backupchecks. The integration includes extensive settings configuration, robust API client implementation, intelligent ticket linking across job runs, and conditional ticket status updates based on time entries.", + "sections": [ + { + "title": "Autotask Integration Core Features", + "type": "feature", + "subsections": [ + { + "subtitle": "Settings and Configuration", + "changes": [ + "Complete Autotask integration settings in Settings → Integrations", + "Environment selection (Sandbox/Production) with automatic zone discovery", + "API authentication with fallback support for different tenant configurations", + "Tracking identifier (Integration Code) configuration for ticket attribution", + "Connection testing and diagnostics", + "Reference data synchronization (queues, sources, priorities, statuses)", + "Configurable ticket defaults (queue, source, status, priority)", + "Autotask integration and automatic mail import can now be properly disabled after being enabled (fixed unchecked checkbox processing)" + ] + }, + { + "subtitle": "Customer Company Mapping", + "changes": [ + "Explicit Autotask company mapping for customers using ID-based linkage", + "Company search with auto-suggestions when opening unmapped customers", + "Automatically populates search box with customer name and displays matching Autotask companies", + "Mapping status tracking (ok/renamed/missing/invalid)", + "Bulk mapping refresh for all customers", + "Clear search boxes when opening modals for better user experience" + ] + }, + { + "subtitle": "Ticket Creation and Management", + "changes": [ + "Create Autotask tickets directly from Run Checks page", + "Automatic ticket number assignment and storage", + "Link existing Autotask tickets to job runs", + "Cross-company ticket search for overarching infrastructure issues (search by ticket number finds tickets across all companies)", + "Ticket propagation to all active runs of the same job", + "Internal ticket registration for legacy compatibility (Tickets, Tickets/Remarks, Job Details)", + "Real-time ticket status polling and updates", + "Deleted ticket detection and audit tracking (deletion date/time and deleted-by resource information)" + ] + }, + { + "subtitle": "Ticket Resolution and Status Management", + "changes": [ + "Conditional ticket status updates based on time entries:", + " - Tickets without time entries: automatically closed (status 5 - Complete)", + " - Tickets with time entries: remain open for time tracking continuation", + "Dynamic confirmation messages indicating closure behavior based on time entry presence", + "Safe resolution updates preserving stabilizing fields (issueType, subIssueType, source)", + "Resolution field mirroring from internal ticket notes", + "Ticket notes created via `/Tickets/{id}/Notes` endpoint with timezone-aware timestamps", + "Deleted ticket handling with complete audit trail" + ] + }, + { + "subtitle": "Technical Implementation", + "changes": [ + "Full-featured Autotask REST API client (`integrations/autotask/client.py`)", + "Zone information discovery and endpoint resolution", + "Robust authentication handling with header-based fallback for sandbox environments", + "Picklist-based reference data retrieval (queues, sources, priorities, statuses)", + "Entity metadata parsing with tenant-specific field detection", + "Database migrations for Autotask linkage fields across SystemSettings, Customer, JobRun, and Ticket models", + "Ticketing utilities for internal/external ticket synchronization", + "Comprehensive API contract documentation (`docs/autotask_rest_api.md`)", + "Functional design living document for integration architecture" + ] + } + ] + }, + { + "title": "User Interface Improvements", + "type": "improvement", + "changes": [ + "Search boxes now clear automatically when opening modals (Run Checks Link existing, Customer mapping)", + "Auto-search for similar company names when mapping unmapped customers", + "Cross-company ticket search when using ticket numbers (e.g., \"T20260205.0001\")", + "Dynamic confirmation messages for ticket resolution based on time entries", + "Improved visibility of Autotask ticket information in Run Checks", + "Status labels displayed instead of numeric codes in ticket lists", + "\"Deleted in PSA\" status display with deletion audit information", + "\"Resolved by PSA (Autotask)\" differentiation from Backupchecks-driven resolution" + ] + }, + { + "title": "Bug Fixes and Stability", + "type": "fixed", + "changes": [ + "Fixed Autotask REST API base URL casing (ATServicesRest/V1.0)", + "Fixed reference data retrieval using correct picklist endpoints", + "Fixed authentication fallback for sandbox-specific behavior", + "Fixed company name display from nested API responses", + "Fixed ticket ID normalization and response unwrapping (itemId handling)", + "Fixed TicketJobRun linkage for legacy ticket behavior", + "Fixed unchecked checkbox processing for enable/disable toggles (Autotask integration, automatic mail import)", + "Fixed ticket resolution updates to preserve exact field values from GET response", + "Fixed picklist field detection for tenant-specific metadata", + "Fixed migration stability with idempotent column checks", + "Fixed settings page crash with local helper functions", + "Fixed Run Checks modal stacking and Bootstrap 4/5 compatibility", + "Fixed JavaScript errors (renderModal → renderRun)", + "Fixed indentation errors preventing application startup", + "Fixed ticket propagation to ensure all active runs receive ticket linkage", + "Fixed polling to use read-only operations without state mutation" + ] + }, + { + "title": "Documentation", + "type": "documentation", + "changes": [ + "Added comprehensive Autotask REST API contract documentation (`docs/autotask_rest_api.md`)", + "Created functional design living document for integration architecture", + "Documented ticket lifecycle, status management, and time entry considerations", + "Added changelog tracking for Claude Code changes (`docs/changelog-claude.md`)" + ] + } + ] + }, + { + "version": "v0.1.21", + "date": "2026-01-20", + "summary": "This release focuses on improving correctness, consistency, and access control across core application workflows, with particular attention to changelog rendering, browser-specific mail readability, Run Checks visibility, role-based access restrictions, override flexibility, and VSPC object linking reliability. The goal is to ensure predictable behavior, clearer diagnostics, and safer administration across both day-to-day operations and complex multi-entity reports.", + "sections": [ + { + "title": "Changelog Rendering and Documentation Accuracy", + "type": "improvement", + "changes": [ + "Updated the Changelog route to render remote Markdown content instead of plain text", + "Enabled full Markdown parsing so headings, lists, links, and code blocks are displayed correctly", + "Ensured the changelog always fetches the latest version directly from the source repository at request time", + "Removed legacy plain-text rendering to prevent loss of structure and formatting" + ] + }, + { + "title": "Mail Rendering and Browser Compatibility", + "type": "improvement", + "changes": [ + "Forced a light color scheme for embedded mail content to prevent Microsoft Edge from applying automatic dark mode styling", + "Added explicit `color-scheme` and `forced-color-adjust` rules so original mail CSS is respected", + "Ensured consistent mail readability across Edge and Firefox", + "Applied these fixes consistently across Inbox, Deleted Inbox, Job Details, Run Checks, Daily Jobs, and Admin All Mail views" + ] + }, + { + "title": "Run Checks Visibility and Consistency", + "type": "improvement", + "changes": [ + "Added support for displaying the overall remark (overall_message) directly on the Run Checks page", + "Ensured consistency between Run Checks and Job Details, where the overall remark was already available", + "Improved operator visibility of high-level run context without requiring navigation to job details" + ] + }, + { + "title": "Initial Setup and User Existence Safeguards", + "type": "fixed", + "changes": [ + "Fixed an incorrect redirect to the \"Initial admin setup\" page when users already exist", + "Changed setup detection logic from \"admin user exists\" to \"any user exists\"", + "Ensured existing environments always show the login page instead of allowing a new initial admin to be created", + "Prevented direct access to the initial setup route when at least one user is present" + ] + }, + { + "title": "Role-Based Access Control and Menu Restrictions", + "type": "improvement", + "changes": [ + "Restricted the Reporter role to only access Dashboard, Reports, Changelog, and Feedback", + "Updated menu rendering to fully hide unauthorized menu items for Reporter users", + "Adjusted route access to ensure Feedback pages remain accessible for the Reporter role", + "Improved overall consistency between visible navigation and backend access rules" + ] + }, + { + "title": "Override Matching Flexibility and Maintainability", + "type": "feature", + "changes": [ + "Added configurable error text matching modes for overrides: contains, exact, starts with, and ends with", + "Updated override evaluation logic to apply the selected match mode across run remarks and object error messages", + "Extended the overrides UI with a match type selector and improved edit support for existing overrides", + "Added a database migration to create and backfill the `overrides.match_error_mode` field for existing records" + ] + }, + { + "title": "Job Deletion Stability", + "type": "fixed", + "changes": [ + "Fixed an error that occurred during job deletion", + "Corrected backend deletion logic to prevent runtime exceptions", + "Ensured related records are handled safely to avoid constraint or reference errors during removal" + ] + }, + { + "title": "VSPC Object Linking and Normalization", + "type": "fixed", + "changes": [ + "Fixed VSPC company name normalization so detection and object prefixing behave consistently", + "Ensured filtered object persistence respects the UNIQUE(customer_id, object_name) constraint", + "Correctly update `last_seen` timestamps for existing objects", + "Added automatic object persistence routing for VSPC per-company runs, ensuring objects are linked to the correct customer and job", + "Improved auto-approval for VSPC Active Alarms summaries with per-company run creation and case-insensitive object matching", + "Added best-effort retroactive processing to automatically link older inbox messages once company mappings are approved" + ] + }, + { + "title": "VSPC Normalization Bug Fixes and Backward Compatibility", + "type": "fixed", + "changes": [ + "Removed duplicate definitions of VSPC Active Alarms company extraction logic that caused inconsistent normalization", + "Ensured a single, consistent normalization path is used when creating jobs and linking objects", + "Improved object linking so real objects (e.g. HV01, USB Disk) are reliably associated with their jobs", + "Restored automatic re-linking for both new and historical VSPC mails", + "Added backward-compatible matching to prevent existing VSPC jobs from breaking due to earlier inconsistent company naming" + ] + } + ] + }, + { + "version": "v0.1.20", + "date": "2026-01-15", + "summary": "This release delivers a comprehensive set of improvements focused on parser correctness, data consistency, and clearer operator workflows across Inbox handling, Run Checks, and administrative tooling. The main goal of these changes is to ensure that backup notifications are parsed reliably, presented consistently, and handled through predictable and auditable workflows, even for complex or multi-entity reports.", + "sections": [ + { + "title": "Mail Parsing and Data Integrity", + "type": "improvement", + "changes": [ + "Fixed Veeam Backup for Microsoft 365 parsing where the overall summary message was not consistently stored", + "Improved extraction of overall detail messages so permission and role warnings are reliably captured", + "Ensured the extracted overall message is always available across Job Details, Run Checks, and reporting views", + "Added decoding of HTML entities in parsed object fields (name, type, status, error message) before storage, ensuring characters such as ampersands are displayed correctly", + "Improved robustness of parsing logic to prevent partial or misleading data from being stored when mails contain mixed or malformed content" + ] + }, + { + "title": "Object Classification and Sorting", + "type": "improvement", + "changes": [ + "Updated object list sorting to improve readability and prioritization", + "Objects are now grouped by severity in a fixed order: Errors first, then Warnings, followed by all other statuses", + "Within each severity group, objects are sorted alphabetically (A–Z)", + "Applied the same sorting logic consistently across Inbox, Job Details, Run Checks, Daily Jobs, and the Admin All Mail view", + "Improved overall run status determination by reliably deriving the worst detected object state" + ] + }, + { + "title": "Parsers Overview and Maintainability", + "type": "improvement", + "changes": [ + "Refactored the Parsers overview page to use the central parser registry instead of a static, hardcoded list", + "All available parsers are now displayed automatically, ensuring the page stays in sync as parsers are added or removed", + "Removed hardcoded parser definitions from templates to improve long-term maintainability", + "Fixed a startup crash in the parsers route caused by an invalid absolute import by switching to a package-relative import", + "Prevented Gunicorn worker boot failures and Bad Gateway errors during application initialization" + ] + }, + { + "title": "User Management and Feedback Handling", + "type": "feature", + "changes": [ + "Added support for editing user roles directly from the User Management interface", + "Implemented backend logic to update existing role assignments without requiring user deletion", + "Ensured role changes are applied immediately and reflected correctly in permissions and access control", + "Updated feedback listings to show only Open items by default", + "Ensured Resolved items are always sorted to the bottom when viewing all feedback", + "Preserved existing filtering, searching, and user-controlled sorting behavior" + ] + }, + { + "title": "UI Improvements and Usability Enhancements", + "type": "improvement", + "changes": [ + "Introduced reusable ellipsis handling for long detail fields to prevent layout overlap", + "Added click-to-expand behavior for truncated fields, with double-click support to expand and select all text", + "Added automatic tooltips showing the full value when a field is truncated", + "Removed the redundant \"Objects\" heading above objects tables to reduce visual clutter", + "Applied truncation and expansion behavior consistently across Inbox, Deleted Mail, Run Checks, Daily Jobs, Job Detail views, and Admin All Mail", + "Reset expanded ellipsis fields when Bootstrap modals or offcanvas components are opened or closed to prevent state leakage", + "Fixed layout issues where the Objects table could overlap mail content in the Run Checks popup" + ] + }, + { + "title": "Veeam Cloud Connect and VSPC Parsing", + "type": "improvement", + "changes": [ + "Improved the Veeam Cloud Connect Report parser by combining User and Repository Name into a single object identifier", + "Excluded \"TOTAL\" rows from object processing", + "Correctly classified red rows as Errors and yellow/orange rows as Warnings", + "Ensured overall status is set to Error when one or more objects are in error state", + "Added support for Veeam Service Provider Console daily alarm summary emails", + "Implemented per-company object aggregation and derived overall status from the worst detected state", + "Improved detection of VSPC Active Alarms emails to prevent incorrect fallback to other Veeam parsers", + "Fixed a SyntaxError in the VSPC parser that caused application startup failures" + ] + }, + { + "title": "VSPC Company Mapping Workflow", + "type": "feature", + "changes": [ + "Introduced a dedicated company-mapping popup for VSPC Active Alarms summary reports", + "Enabled manual mapping of companies found in mails to existing customers", + "Implemented per-company job and run creation using the format \"Active alarms summary | \"", + "Disabled the standard approval flow for this report type and replaced it with a dedicated mapping workflow", + "Required all detected companies to be mapped before full approval, while still allowing partial approvals", + "Prevented duplicate run creation on repeated approvals", + "Improved visibility and usability of the mapping popup with scroll support for large company lists", + "Ensured only alarms belonging to the selected company are attached to the corresponding run" + ] + }, + { + "title": "NTFS Auditing and Synology ABB Enhancements", + "type": "improvement", + "changes": [ + "Added full parser support for NTFS Auditing reports", + "Improved hostname and FQDN extraction from subject lines, supporting multiple subject formats and prefixes", + "Ensured consistent job name generation as \" file audits\"", + "Set overall status to Warning when detected change counts are greater than zero", + "Improved Synology Active Backup for Business parsing to detect partially completed jobs as Warning", + "Added support for localized completion messages and subject variants", + "Improved per-device object extraction and ensured specific device statuses take precedence over generic listings" + ] + }, + { + "title": "Workflow Simplification and Cleanup", + "type": "improvement", + "changes": [ + "Removed the \"Mark success (override)\" button from the Run Checks popup", + "Prevented creation of unintended overrides when marking individual runs as successful", + "Simplified override handling so Run Checks actions no longer affect override administration", + "Ensured firmware update notifications (QNAP) are treated as informational warnings and excluded from missing-run detection and reporting" + ] + } + ] + }, + { + "version": "v0.1.19", + "date": "2026-01-10", + "summary": "This release delivers a broad set of improvements focused on reliability, transparency, and operational control across mail processing, administrative auditing, and Run Checks workflows.", + "sections": [ + { + "title": "Mail Import Reliability and Data Integrity", + "type": "improvement", + "changes": [ + "Updated the mail import flow so messages are only moved to the processed folder after a successful database store and commit", + "Prevented Graph emails from being moved when parsing, storing, or committing data fails", + "Added explicit commit and rollback handling to guarantee database consistency", + "Improved logging around import, commit, and rollback failures" + ] + }, + { + "title": "Administrative Mail Auditing and Visibility", + "type": "feature", + "changes": [ + "Introduced an admin-only \"All Mail\" audit page", + "Implemented pagination with a fixed page size of 50 items", + "Added always-visible search filters (From, Subject, Backup, Type, Job name, date range)", + "Added \"Only unlinked\" filter to identify messages not associated with any job" + ] + }, + { + "title": "Run Checks Usability and Control", + "type": "improvement", + "changes": [ + "Added copy-to-clipboard icon next to ticket numbers", + "Introduced manual \"Success (override)\" action for Operators and Admins", + "Updated UI indicators for overridden runs with blue success status", + "Improved mail rendering with fallback to text bodies and EML extraction" + ] + }, + { + "title": "Parser Enhancements", + "type": "improvement", + "changes": [ + "Added parser support for 3CX SSL Certificate notification emails", + "Added detection for Synology DSM automatic update cancellation messages" + ] + } + ] + }, + { + "version": "v0.1.18", + "date": "2026-01-05", + "summary": "This release focuses on improving ticket reuse, scoping, and visibility across jobs, runs, and history views.", + "sections": [ + { + "title": "Ticket Linking and Reuse", + "type": "improvement", + "changes": [ + "Updated ticket linking logic to allow the same ticket number across multiple jobs and runs", + "Prevented duplicate ticket creation errors when reusing existing ticket codes", + "Ensured existing tickets are consistently reused and linked" + ] + }, + { + "title": "Job History Enhancements", + "type": "feature", + "changes": [ + "Added Tickets and Remarks section to Job History mail popup", + "Enabled viewing and managing tickets/remarks directly from Job History", + "Aligned ticket handling with Run Checks popup behavior" + ] + } + ] + }, + { + "version": "v0.1.17", + "date": "2025-12-30", + "summary": "This release focuses on improving job normalization, ticket and remark handling, UI usability, and the robustness of run and object detection.", + "sections": [ + { + "title": "Job Normalization and Aggregation", + "type": "improvement", + "changes": [ + "Veeam job names now normalized to prevent duplicates (Combined/Full suffixes merged)", + "Added support for archiving inactive jobs" + ] + }, + { + "title": "Inbox and Bulk Operations", + "type": "feature", + "changes": [ + "Introduced multi-select inbox functionality for Operator and Admin roles", + "Added bulk \"Delete selected\" action with validation and audit logging" + ] + }, + { + "title": "Tickets and Remarks", + "type": "improvement", + "changes": [ + "Ticket creation now uses user-provided codes with strict validation", + "Editing of tickets/remarks disabled; must be resolved and recreated", + "Removed ticket description fields to prevent inconsistent data" + ] + } + ] + }, + { + "version": "v0.1.16", + "date": "2025-12-25", + "summary": "This release significantly expands and stabilizes the reporting functionality, focusing on configurability, correctness, and richer output formats.", + "sections": [ + { + "title": "Reporting Enhancements", + "type": "feature", + "changes": [ + "Reports now job-aggregated instead of object-level", + "Full report lifecycle management added", + "Advanced reporting foundations with configurable definitions", + "Multiple export formats: CSV, HTML, and PDF", + "Extensive column selection with drag-and-drop ordering", + "Job-level aggregated metrics and success rate charts" + ] + } + ] + }, + { + "version": "v0.1.15", + "date": "2025-12-20", + "summary": "This release focused on improving operational clarity and usability by strengthening dashboard guidance and introducing reporting foundation.", + "sections": [ + { + "title": "Dashboard and User Guidance", + "type": "improvement", + "changes": [ + "Added comprehensive explanatory section to Dashboard", + "Implemented automatic redirection to Dashboard on first daily visit", + "Refactored Settings area into clearly separated sections" + ] + }, + { + "title": "Dashboard News", + "type": "feature", + "changes": [ + "Added per-user Dashboard News section with read/unread tracking", + "Full admin management of news items" + ] + }, + { + "title": "Run Checks Multi-Select", + "type": "improvement", + "changes": [ + "Added Shift-click multi-selection for efficient bulk review", + "Fixed edge cases with selection and checkbox synchronization" + ] + } + ] + }, + { + "version": "v0.1.14", + "date": "2025-12-15", + "summary": "Focused on improving sorting, parsing, and override functionality.", + "sections": [ + { + "title": "Daily Jobs Sorting", + "type": "improvement", + "changes": [ + "Consistent multi-level sort: Customer → Backup Software → Type → Job Name", + "Fixed backend ordering to ensure server-side consistency" + ] + }, + { + "title": "Overrides Configuration", + "type": "improvement", + "changes": [ + "Replaced free-text inputs with dropdowns for Backup Software and Type", + "Made newly created overrides apply immediately and retroactively", + "Added full support for editing existing overrides" + ] + }, + { + "title": "Overrides UI Indicators", + "type": "feature", + "changes": [ + "Introduced blue status indicator for runs with overrides applied", + "Added persistent override reporting metadata to job runs" + ] + } + ] + }, + { + "version": "v0.1.13", + "date": "2025-12-10", + "summary": "Focused on improving visibility and consistency of Tickets and Remarks.", + "sections": [ + { + "title": "Tickets and Remarks Visibility", + "type": "improvement", + "changes": [ + "Added clear visual indicators for active Tickets and Remarks in Run Checks", + "Enhanced Job Details to display actual ticket numbers and remark messages", + "Improved navigation with direct \"Job page\" links" + ] + }, + { + "title": "Missed Run Detection", + "type": "improvement", + "changes": [ + "Now includes ±1 hour tolerance window", + "Respects configured UI timezone" + ] + } + ] + }, + { + "version": "v0.1.12", + "date": "2025-12-05", + "summary": "Dashboard improvements, inbox soft-delete, and enhanced parser support.", + "sections": [ + { + "title": "Dashboard and UI", + "type": "improvement", + "changes": [ + "Corrected dashboard counters for Expected, Missed, and Success (override) statuses", + "Fixed layout issues and improved label wrapping", + "Extended Job History with weekday labels and review metadata" + ] + }, + { + "title": "Inbox Soft-Delete", + "type": "feature", + "changes": [ + "Introduced soft-delete for Inbox messages", + "Added Admin-only \"Deleted mails\" page with audit details", + "Added popup previews for deleted mails" + ] + }, + { + "title": "Parser Enhancements", + "type": "improvement", + "changes": [ + "Improved Veeam parsing (Health Check, License Key)", + "Added Synology support (Active Backup, R-Sync, Account Protection)", + "Added R-Drive Image and Syncovery parsers" + ] + } + ] + }, + { + "version": "v0.1.11", + "date": "2025-11-30", + "summary": "Major stability fixes and introduction of Run Checks page.", + "sections": [ + { + "title": "Stability and Bug Fixes", + "type": "fixed", + "changes": [ + "Fixed multiple page crashes caused by missing imports", + "Resolved Jinja2 template errors and SQL/runtime issues" + ] + }, + { + "title": "Run Checks Page", + "type": "feature", + "changes": [ + "Introduced new Run Checks page to review job runs independently", + "Displays all unreviewed runs with no time-based filtering", + "Supports bulk review actions and per-job review via popups", + "Added admin-only features for audit and review management" + ] + }, + { + "title": "Timezone Support", + "type": "feature", + "changes": [ + "Added configurable timezone setting in Settings", + "Updated all frontend date/time rendering to use configured timezone" + ] + } + ] + }, + { + "version": "v0.1.10", + "date": "2025-11-25", + "summary": "Performance improvements and batch processing for large datasets.", + "sections": [ + { + "title": "Performance and Stability", + "type": "improvement", + "changes": [ + "Reworked Re-parse all to process in controlled batches", + "Added execution time guards to prevent timeouts", + "Optimized job-matching queries and database operations" + ] + }, + { + "title": "Job Matching and Parsing", + "type": "improvement", + "changes": [ + "Fixed approved job imports to persist from_address", + "Improved Veeam Backup Job parsing with multi-line warnings/errors", + "Fixed regressions in backup object detection and storage" + ] + }, + { + "title": "Tickets and Overrides", + "type": "improvement", + "changes": [ + "Introduced run-date scoped ticket activity", + "Implemented scoping for remarks", + "Improved override handling with immediate application" + ] + } + ] + }, + { + "version": "v0.1.9", + "date": "2025-11-20", + "summary": "Changelog system improvements and code refactoring.", + "sections": [ + { + "title": "Changelog System", + "type": "improvement", + "changes": [ + "Migrated to structured, non-markdown format", + "Simplified rendering logic", + "Standardized formatting across all versions" + ] + }, + { + "title": "Code Refactoring", + "type": "improvement", + "changes": [ + "Refactored large routes.py into multiple smaller modules", + "Introduced shared routes module for common imports", + "Fixed NameError issues after refactoring" + ] + } + ] + }, + { + "version": "v0.1.8", + "date": "2025-11-15", + "summary": "Consistent job matching and auto-approval across all mail processing flows.", + "sections": [ + { + "title": "Job Matching Improvements", + "type": "improvement", + "changes": [ + "Introduced single shared job-matching helper based on full unique key", + "Updated manual inbox approval to reuse existing jobs", + "Aligned inbox Re-parse all auto-approve logic", + "Fixed automatic mail import auto-approve" + ] + } + ] + }, + { + "version": "v0.1.7", + "date": "2025-11-10", + "summary": "Export/import functionality and parser enhancements.", + "sections": [ + { + "title": "Job Export and Import", + "type": "feature", + "changes": [ + "Introduced export and import functionality for approved jobs", + "Import process automatically creates missing customers", + "Updates existing jobs based on unique identity" + ] + }, + { + "title": "Parser Enhancements", + "type": "improvement", + "changes": [ + "Improved Boxafe parsing (Shared Drives, Domain Accounts)", + "Added Synology Hyper Backup Dutch support", + "Added Veeam SOBR and Health Check support" + ] + } + ] + }, + { + "version": "v0.1.6", + "date": "2025-11-05", + "summary": "Auto-approve fixes and centralized changelog.", + "sections": [ + { + "title": "Bug Fixes", + "type": "fixed", + "changes": [ + "Corrected auto-approve logic for automatic mail imports", + "Fixed Re-parse all to respect approved status", + "Fixed multiple Jinja2 template syntax errors" + ] + }, + { + "title": "Changelog Page", + "type": "feature", + "changes": [ + "Introduced centralized Changelog page", + "Added to main navigation" + ] + } + ] + }, + { + "version": "v0.1.5", + "date": "2025-10-30", + "summary": "Microsoft Graph restoration and application reset functionality.", + "sections": [ + { + "title": "Microsoft Graph", + "type": "fixed", + "changes": [ + "Restored Graph folder retrieval (fixed import error)", + "Fixed automatic mail importer signal-based timeout issues", + "Implemented missing backend logic for automatic imports" + ] + }, + { + "title": "Application Reset", + "type": "feature", + "changes": [ + "Added Application Reset option in Settings", + "Full backend support for complete data wipe", + "Confirmation step to prevent accidental resets" + ] + } + ] + }, + { + "version": "v0.1.4", + "date": "2025-10-25", + "summary": "Database migration stability and object parsing improvements.", + "sections": [ + { + "title": "Database Stability", + "type": "fixed", + "changes": [ + "Stabilized migrations by running in separate transaction scopes", + "Resolved backend startup 502 errors", + "Eliminated ResourceClosedError exceptions" + ] + }, + { + "title": "Object Parsing", + "type": "improvement", + "changes": [ + "Aligned manual imports with Re-parse all behavior", + "Ensured consistent object detection across all import paths", + "Hardened against Microsoft Graph timeouts" + ] + } + ] + }, + { + "version": "v0.1.3", + "date": "2025-10-20", + "summary": "Logging persistence and UI improvements.", + "sections": [ + { + "title": "Logging", + "type": "fixed", + "changes": [ + "Fixed logging persistence to database", + "Added pagination (20 entries per page)", + "Extended view to show all available log fields" + ] + }, + { + "title": "Jobs and Daily Jobs", + "type": "improvement", + "changes": [ + "Standardized default sorting", + "Persisted Daily Jobs start date setting", + "Improved table readability and layout" + ] + }, + { + "title": "Tickets and Remarks", + "type": "feature", + "changes": [ + "Added database schema for persistent tickets", + "Implemented Tickets page with tabbed navigation", + "Added indicators in Daily Jobs for active tickets/remarks" + ] + } + ] + }, + { + "version": "v0.1.2", + "date": "2025-10-15", + "summary": "Parser support expansion and in-app logging system.", + "sections": [ + { + "title": "Parser Support", + "type": "improvement", + "changes": [ + "Extended Synology Hyper Backup parser (Strato HiDrive support)", + "Improved handling of successful runs without objects" + ] + }, + { + "title": "Administration", + "type": "feature", + "changes": [ + "Introduced admin-only \"Delete all jobs\" action", + "Ensured related mails moved back to Inbox on job deletion", + "Fixed foreign key constraint issues" + ] + }, + { + "title": "Logging System", + "type": "feature", + "changes": [ + "Moved to in-app AdminLog-based logging", + "Detailed logging per imported/auto-approved email", + "Summary logging at end of import runs" + ] + }, + { + "title": "Object Persistence", + "type": "improvement", + "changes": [ + "Restored persistence after manual approval", + "Added maintenance action to backfill missing object links", + "Centralized object persistence logic" + ] + } + ] + } +] diff --git a/containers/backupchecks/src/backend/app/integrations/autotask/client.py b/containers/backupchecks/src/backend/app/integrations/autotask/client.py index b53115f..1e353b1 100644 --- a/containers/backupchecks/src/backend/app/integrations/autotask/client.py +++ b/containers/backupchecks/src/backend/app/integrations/autotask/client.py @@ -558,101 +558,127 @@ class AutotaskClient: return {"id": tid} + def update_ticket_resolution_safe(self, ticket_id: int, resolution_text: str) -> Dict[str, Any]: + """Safely update the Ticket 'resolution' field with conditional status update. + Autotask Tickets require a full PUT update; therefore we must: + - GET /Tickets/{id} to retrieve current stabilising fields (including classification/routing) + - Query time entries for the ticket + - PUT /Tickets with stabilising fields and conditional status -def update_ticket_resolution_safe(self, ticket_id: int, resolution_text: str) -> Dict[str, Any]: - """Safely update the Ticket 'resolution' field without changing status. + Status logic (per API contract section 9): + - If NO time entries exist: set status to 5 (Complete) + - If time entries exist: keep current status unchanged - Autotask Tickets require a full PUT update; therefore we must: - - GET /Tickets/{id} to retrieve current stabilising fields (including classification/routing) - - PUT /Tickets with those stabilising fields unchanged, and only update 'resolution' + IMPORTANT: + - GET /Tickets/{id} returns the ticket object under the 'item' envelope in most tenants. + - PUT payloads are not wrapped; fields are sent at the JSON root. + """ - IMPORTANT: - - GET /Tickets/{id} returns the ticket object under the 'item' envelope in most tenants. - - PUT payloads are not wrapped; fields are sent at the JSON root. - """ + try: + tid = int(ticket_id) + except Exception: + raise AutotaskError("Invalid ticket id.") - try: - tid = int(ticket_id) - except Exception: - raise AutotaskError("Invalid ticket id.") + if tid <= 0: + raise AutotaskError("Invalid ticket id.") - if tid <= 0: - raise AutotaskError("Invalid ticket id.") + data = self._request("GET", f"Tickets/{tid}") - data = self._request("GET", f"Tickets/{tid}") + ticket: Dict[str, Any] = {} + if isinstance(data, dict): + if "item" in data and isinstance(data.get("item"), dict): + ticket = data["item"] + elif "items" in data and isinstance(data.get("items"), list) and data.get("items"): + first = data.get("items")[0] + if isinstance(first, dict): + ticket = first + else: + ticket = data + elif isinstance(data, list) and data: + if isinstance(data[0], dict): + ticket = data[0] - ticket: Dict[str, Any] = {} - if isinstance(data, dict): - if "item" in data and isinstance(data.get("item"), dict): - ticket = data["item"] - elif "items" in data and isinstance(data.get("items"), list) and data.get("items"): - first = data.get("items")[0] - if isinstance(first, dict): - ticket = first - else: - ticket = data - elif isinstance(data, list) and data: - if isinstance(data[0], dict): - ticket = data[0] + if not isinstance(ticket, dict) or not ticket: + raise AutotaskError("Autotask did not return a ticket object.") - if not isinstance(ticket, dict) or not ticket: - raise AutotaskError("Autotask did not return a ticket object.") + def _pick(d: Dict[str, Any], keys: List[str]) -> tuple[bool, Any]: + """Pick first available field from possible field names. - def _pick(d: Dict[str, Any], keys: List[str]) -> Any: - for k in keys: - if k in d and d.get(k) not in (None, ""): - return d.get(k) - return None + Returns tuple: (found, value) + - found=True if field exists (even if value is None) + - found=False if field doesn't exist in dict - # Required stabilising fields for safe resolution updates (validated via Postman tests) - resolved_issue_type = _pick(ticket, ["issueType", "issueTypeID", "issueTypeId"]) - resolved_sub_issue_type = _pick(ticket, ["subIssueType", "subIssueTypeID", "subIssueTypeId"]) - resolved_source = _pick(ticket, ["source", "sourceID", "sourceId"]) - resolved_status = _pick(ticket, ["status", "statusID", "statusId"]) + This allows us to distinguish between "field missing" vs "field is null", + which is critical for Autotask PUT payloads that require exact values. + """ + for k in keys: + if k in d: + return (True, d[k]) + return (False, None) - missing: List[str] = [] - if _pick(ticket, ["id"]) in (None, ""): - missing.append("id") - if resolved_issue_type in (None, ""): - missing.append("issueType") - if resolved_sub_issue_type in (None, ""): - missing.append("subIssueType") - if resolved_source in (None, ""): - missing.append("source") - if resolved_status in (None, ""): - missing.append("status") + # Required stabilising fields for safe resolution updates (validated via Postman tests). + # Field names are camelCase as per API contract (docs/autotask_rest_api.md section 2.1). + # We must copy the EXACT values from GET response to PUT payload, even if null. + found_id, ticket_id = _pick(ticket, ["id"]) + found_issue_type, resolved_issue_type = _pick(ticket, ["issueType", "issueTypeID", "issueTypeId"]) + found_sub_issue_type, resolved_sub_issue_type = _pick(ticket, ["subIssueType", "subIssueTypeID", "subIssueTypeId"]) + found_source, resolved_source = _pick(ticket, ["source", "sourceID", "sourceId"]) + found_status, resolved_status = _pick(ticket, ["status", "statusID", "statusId"]) - if missing: - raise AutotaskError( - "Cannot safely update ticket resolution because required fields are missing: " + ", ".join(missing) - ) + # Validate that required fields exist in the response + missing: List[str] = [] + if not found_id or ticket_id in (None, ""): + missing.append("id") + if not found_status or resolved_status in (None, ""): + missing.append("status") + if not found_issue_type: + missing.append("issueType") + if not found_sub_issue_type: + missing.append("subIssueType") + if not found_source: + missing.append("source") - payload: Dict[str, Any] = { - "id": int(ticket.get("id")), - "issueType": resolved_issue_type, - "subIssueType": resolved_sub_issue_type, - "source": resolved_source, - # Keep status unchanged - "status": resolved_status, - "resolution": str(resolution_text or ""), - } + if missing: + raise AutotaskError( + "Cannot safely update ticket resolution because required fields are missing: " + ", ".join(missing) + ) - # Copy other stabilising fields when available (helps avoid tenant-specific validation errors) - optional_fields = [ - "companyID", - "queueID", - "title", - "priority", - "dueDateTime", - "ticketCategory", - "organizationalLevelAssociationID", - ] - for f in optional_fields: - if f in ticket: - payload[f] = ticket.get(f) + # Check for time entries as per API contract section 9 + # If no time entries exist, we can set status to 5 (Complete) + # If time entries exist, status remains unchanged + time_entries = self.query_time_entries_by_ticket_id(int(ticket_id)) + has_time_entries = len(time_entries) > 0 - return self.update_ticket(payload) + # Determine final status based on time entry check + # Status 5 = Complete (sets completedDate and resolvedDateTime) + final_status = resolved_status if has_time_entries else 5 + + # Build payload with exact values from GET response (including null if that's what we got) + payload: Dict[str, Any] = { + "id": int(ticket_id), + "issueType": resolved_issue_type, + "subIssueType": resolved_sub_issue_type, + "source": resolved_source, + "status": final_status, + "resolution": str(resolution_text or ""), + } + + # Copy other stabilising fields when available (helps avoid tenant-specific validation errors) + optional_fields = [ + "companyID", + "queueID", + "title", + "priority", + "dueDateTime", + "ticketCategory", + "organizationalLevelAssociationID", + ] + for f in optional_fields: + if f in ticket: + payload[f] = ticket[f] + + return self.update_ticket(payload) def create_ticket_note(self, note_payload: Dict[str, Any]) -> Dict[str, Any]: """Create a user-visible note on a Ticket. @@ -723,6 +749,7 @@ def update_ticket_resolution_safe(self, ticket_id: int, resolution_text: str) -> return items[0] return {} + def get_ticket_note(self, note_id: int) -> Dict[str, Any]: """Retrieve a TicketNote by ID via GET /TicketNotes/{id}.""" @@ -941,4 +968,64 @@ def update_ticket_resolution_safe(self, ticket_id: int, resolution_text: str) -> # Respect limit if tenant returns more. if limit and isinstance(limit, int) and limit > 0: return items[: int(limit)] - return items \ No newline at end of file + return items + + def query_tickets_by_number( + self, + ticket_number: str, + *, + exclude_status_ids: Optional[List[int]] = None, + limit: int = 10, + ) -> List[Dict[str, Any]]: + """Query Tickets by ticket number across all companies. + + Uses POST /Tickets/query. + + This is useful for linking overarching issues that span multiple companies. + """ + + tnum = (ticket_number or "").strip() + if not tnum: + return [] + + flt: List[Dict[str, Any]] = [ + {"op": "eq", "field": "ticketNumber", "value": tnum}, + ] + + ex: List[int] = [] + for x in exclude_status_ids or []: + try: + v = int(x) + except Exception: + continue + if v > 0: + ex.append(v) + if ex: + flt.append({"op": "notIn", "field": "status", "value": ex}) + + data = self._request("POST", "Tickets/query", json_body={"filter": flt}) + items = self._as_items_list(data) + + # Respect limit if tenant returns more. + if limit and isinstance(limit, int) and limit > 0: + return items[: int(limit)] + return items + + def query_time_entries_by_ticket_id(self, ticket_id: int) -> List[Dict[str, Any]]: + """Query TimeEntries for a specific ticket. + + Uses POST /TimeEntries/query as per API contract section 6. + + Returns list of time entry items. Empty list if no time entries exist. + """ + + try: + tid = int(ticket_id) + except Exception: + tid = 0 + if tid <= 0: + return [] + + payload = {"filter": [{"op": "eq", "field": "ticketID", "value": tid}]} + data = self._request("POST", "TimeEntries/query", json_body=payload) + return self._as_items_list(data) diff --git a/containers/backupchecks/src/backend/app/main/routes_changelog.py b/containers/backupchecks/src/backend/app/main/routes_changelog.py index 094d997..75ab0c0 100644 --- a/containers/backupchecks/src/backend/app/main/routes_changelog.py +++ b/containers/backupchecks/src/backend/app/main/routes_changelog.py @@ -1,48 +1,13 @@ from .routes_shared import * # noqa: F401,F403 -import markdown - - -GITEA_CHANGELOG_RAW_URL = ( - "https://gitea.oskamp.info/ivooskamp/backupchecks/raw/branch/main/docs/changelog.md" -) +from ..changelog import CHANGELOG @main_bp.route("/changelog") @login_required @roles_required("admin", "operator", "reporter", "viewer") def changelog_page(): - changelog_md = "" - changelog_html = "" - error = None - - try: - resp = requests.get( - GITEA_CHANGELOG_RAW_URL, - timeout=10, - headers={"Accept": "text/plain, text/markdown; q=0.9, */*; q=0.1"}, - ) - if resp.status_code != 200: - raise RuntimeError(f"HTTP {resp.status_code}") - changelog_md = resp.text or "" - - changelog_html = markdown.markdown( - changelog_md, - extensions=[ - "fenced_code", - "tables", - "sane_lists", - "toc", - ], - output_format="html5", - ) - except Exception as exc: # pragma: no cover - error = f"Unable to load changelog from Gitea ({GITEA_CHANGELOG_RAW_URL}): {exc}" - return render_template( "main/changelog.html", - changelog_md=changelog_md, - changelog_html=changelog_html, - changelog_error=error, - changelog_source_url=GITEA_CHANGELOG_RAW_URL, + changelog_versions=CHANGELOG, ) diff --git a/containers/backupchecks/src/backend/app/main/routes_daily_jobs.py b/containers/backupchecks/src/backend/app/main/routes_daily_jobs.py index e8d2dc5..890bc29 100644 --- a/containers/backupchecks/src/backend/app/main/routes_daily_jobs.py +++ b/containers/backupchecks/src/backend/app/main/routes_daily_jobs.py @@ -5,212 +5,6 @@ from .routes_shared import _format_datetime, _get_or_create_settings, _apply_ove # A job is only marked Missed after the latest expected time plus this grace. MISSED_GRACE_WINDOW = timedelta(hours=1) -# Job types that should never participate in schedule inference -_SKIP_SCHEDULE_TYPES = { - ("veeam", "license key"), - ("synology", "account protection"), - ("synology", "updates"), - ("qnap", "firmware update"), - ("syncovery", "syncovery"), -} - - -def _batch_infer_schedules(job_ids: list[int], tz) -> dict[int, dict]: - """Batch infer weekly schedules for multiple jobs in a single query. - - Returns dict of job_id -> {weekday: [times]} schedule maps. - """ - MIN_OCCURRENCES = 3 - - if not job_ids: - return {} - - # Load all historical runs for schedule inference in one query - try: - runs = ( - JobRun.query - .filter( - JobRun.job_id.in_(job_ids), - JobRun.run_at.isnot(None), - JobRun.missed.is_(False), - JobRun.mail_message_id.isnot(None), - ) - .order_by(JobRun.job_id, JobRun.run_at.desc()) - .limit(len(job_ids) * 500) # ~500 runs per job max - .all() - ) - except Exception: - runs = [] - - # Group runs by job_id - runs_by_job: dict[int, list] = {jid: [] for jid in job_ids} - for r in runs: - if r.job_id in runs_by_job and len(runs_by_job[r.job_id]) < 500: - runs_by_job[r.job_id].append(r) - - # Process each job's runs - result = {} - for job_id in job_ids: - job_runs = runs_by_job.get(job_id, []) - schedule = {i: [] for i in range(7)} - - if not job_runs: - result[job_id] = schedule - continue - - counts = {i: {} for i in range(7)} - for r in job_runs: - if not r.run_at: - continue - dt = r.run_at - if tz is not None: - try: - if dt.tzinfo is None: - dt = dt.replace(tzinfo=datetime_module.timezone.utc).astimezone(tz) - else: - dt = dt.astimezone(tz) - except Exception: - pass - - wd = dt.weekday() - minute_bucket = (dt.minute // 15) * 15 - tstr = f"{dt.hour:02d}:{minute_bucket:02d}" - counts[wd][tstr] = int(counts[wd].get(tstr, 0)) + 1 - - for wd in range(7): - keep = [t for t, c in counts[wd].items() if int(c) >= MIN_OCCURRENCES] - schedule[wd] = sorted(keep) - - result[job_id] = schedule - - return result - - -def _batch_infer_monthly_schedules(job_ids: list[int], tz) -> dict[int, dict | None]: - """Batch infer monthly schedules for multiple jobs. - - Returns dict of job_id -> monthly schedule dict or None. - """ - MIN_OCCURRENCES = 3 - - if not job_ids: - return {} - - # Load runs for monthly inference - try: - runs = ( - JobRun.query - .filter( - JobRun.job_id.in_(job_ids), - JobRun.run_at.isnot(None), - JobRun.missed.is_(False), - JobRun.mail_message_id.isnot(None), - ) - .order_by(JobRun.job_id, JobRun.run_at.asc()) - .limit(len(job_ids) * 500) - .all() - ) - except Exception: - runs = [] - - # Group runs by job_id - runs_by_job: dict[int, list] = {jid: [] for jid in job_ids} - for r in runs: - if r.job_id in runs_by_job and len(runs_by_job[r.job_id]) < 500: - runs_by_job[r.job_id].append(r) - - result = {} - for job_id in job_ids: - job_runs = runs_by_job.get(job_id, []) - - if len(job_runs) < MIN_OCCURRENCES: - result[job_id] = None - continue - - # Convert to local time - local_dts = [] - for r in job_runs: - if not r.run_at: - continue - dt = r.run_at - if tz is not None: - try: - if dt.tzinfo is None: - dt = dt.replace(tzinfo=datetime_module.timezone.utc).astimezone(tz) - else: - dt = dt.astimezone(tz) - except Exception: - pass - local_dts.append(dt) - - if len(local_dts) < MIN_OCCURRENCES: - result[job_id] = None - continue - - # Cadence heuristic - local_dts_sorted = sorted(local_dts) - gaps = [] - for i in range(1, len(local_dts_sorted)): - try: - delta_days = (local_dts_sorted[i] - local_dts_sorted[i - 1]).total_seconds() / 86400.0 - if delta_days > 0: - gaps.append(delta_days) - except Exception: - continue - - if gaps: - gaps_sorted = sorted(gaps) - median_gap = gaps_sorted[len(gaps_sorted) // 2] - if median_gap < 20.0: - result[job_id] = None - continue - - # Count day-of-month occurrences - dom_counts = {} - time_counts_by_dom = {} - for dt in local_dts: - dom = int(dt.day) - dom_counts[dom] = int(dom_counts.get(dom, 0)) + 1 - - minute_bucket = (dt.minute // 15) * 15 - tstr = f"{int(dt.hour):02d}:{int(minute_bucket):02d}" - if dom not in time_counts_by_dom: - time_counts_by_dom[dom] = {} - time_counts_by_dom[dom][tstr] = int(time_counts_by_dom[dom].get(tstr, 0)) + 1 - - best_dom = None - best_dom_count = 0 - for dom, c in dom_counts.items(): - if int(c) >= MIN_OCCURRENCES and int(c) > best_dom_count: - best_dom = int(dom) - best_dom_count = int(c) - - if best_dom is None: - result[job_id] = None - continue - - time_counts = time_counts_by_dom.get(best_dom) or {} - keep_times = [t for t, c in time_counts.items() if int(c) >= MIN_OCCURRENCES] - if not keep_times: - best_t = None - best_c = 0 - for t, c in time_counts.items(): - if int(c) > best_c: - best_t = t - best_c = int(c) - if best_t: - keep_times = [best_t] - - keep_times = sorted(set(keep_times)) - if not keep_times: - result[job_id] = None - continue - - result[job_id] = {"day_of_month": int(best_dom), "times": keep_times} - - return result - - @main_bp.route("/daily-jobs") @login_required @roles_required("admin", "operator", "viewer") @@ -236,6 +30,8 @@ def daily_jobs(): missed_start_date = getattr(settings, "daily_jobs_start_date", None) # Day window: treat run_at as UTC-naive timestamps stored in UTC (existing behavior) + # Note: if your DB stores local-naive timestamps, this still works because the same logic + # is used consistently in schedule inference and details. if tz: local_midnight = datetime( year=target_date.year, @@ -278,7 +74,6 @@ def daily_jobs(): weekday_idx = target_date.weekday() # 0=Mon..6=Sun - # Load all non-archived jobs with customer eagerly loaded jobs = ( Job.query.join(Customer, isouter=True) .filter(Job.archived.is_(False)) @@ -286,112 +81,18 @@ def daily_jobs(): .all() ) - # Filter out job types that should skip schedule inference - eligible_jobs = [] - for job in jobs: - bs = (job.backup_software or '').strip().lower() - bt = (job.backup_type or '').strip().lower() - if (bs, bt) not in _SKIP_SCHEDULE_TYPES: - eligible_jobs.append(job) - - job_ids = [j.id for j in eligible_jobs] - - # Batch load all today's runs for all jobs in one query - all_runs_today = [] - if job_ids: - try: - all_runs_today = ( - JobRun.query - .filter( - JobRun.job_id.in_(job_ids), - JobRun.run_at >= start_of_day, - JobRun.run_at < end_of_day, - ) - .order_by(JobRun.job_id, JobRun.run_at.asc()) - .all() - ) - except Exception: - all_runs_today = [] - - # Group runs by job_id - runs_by_job: dict[int, list] = {jid: [] for jid in job_ids} - for r in all_runs_today: - if r.job_id in runs_by_job: - runs_by_job[r.job_id].append(r) - - # Batch infer weekly schedules - schedule_maps = _batch_infer_schedules(job_ids, tz) - - # For jobs without weekly schedule, batch infer monthly - jobs_needing_monthly = [ - jid for jid in job_ids - if not (schedule_maps.get(jid, {}).get(weekday_idx) or []) - ] - monthly_schedules = _batch_infer_monthly_schedules(jobs_needing_monthly, tz) if jobs_needing_monthly else {} - - # Batch load ticket indicators - job_has_ticket: dict[int, bool] = {jid: False for jid in job_ids} - job_has_remark: dict[int, bool] = {jid: False for jid in job_ids} - - if job_ids: - try: - ticket_job_ids = db.session.execute( - text( - """ - SELECT DISTINCT ts.job_id - FROM tickets t - JOIN ticket_scopes ts ON ts.ticket_id = t.id - WHERE ts.job_id = ANY(:job_ids) - AND t.active_from_date <= :target_date - AND ( - t.resolved_at IS NULL - OR ((t.resolved_at AT TIME ZONE 'UTC' AT TIME ZONE 'Europe/Amsterdam')::date) >= :target_date - ) - """ - ), - {"job_ids": job_ids, "target_date": target_date}, - ).scalars().all() - for jid in ticket_job_ids: - job_has_ticket[jid] = True - except Exception: - pass - - try: - remark_job_ids = db.session.execute( - text( - """ - SELECT DISTINCT rs.job_id - FROM remarks r - JOIN remark_scopes rs ON rs.remark_id = r.id - WHERE rs.job_id = ANY(:job_ids) - AND COALESCE( - r.active_from_date, - ((r.start_date AT TIME ZONE 'UTC' AT TIME ZONE 'Europe/Amsterdam')::date) - ) <= :target_date - AND ( - r.resolved_at IS NULL - OR ((r.resolved_at AT TIME ZONE 'UTC' AT TIME ZONE 'Europe/Amsterdam')::date) >= :target_date - ) - """ - ), - {"job_ids": job_ids, "target_date": target_date}, - ).scalars().all() - for jid in remark_job_ids: - job_has_remark[jid] = True - except Exception: - pass - rows = [] - for job in eligible_jobs: - schedule_map = schedule_maps.get(job.id, {}) + for job in jobs: + schedule_map = _infer_schedule_map_from_runs(job.id) expected_times = schedule_map.get(weekday_idx) or [] - # If no weekly schedule, try monthly + # If no weekly schedule is inferred (e.g. monthly jobs), try monthly inference. if not expected_times: - monthly = monthly_schedules.get(job.id) + monthly = _infer_monthly_schedule_from_runs(job.id) if monthly: dom = int(monthly.get("day_of_month") or 0) mtimes = monthly.get("times") or [] + # For months shorter than dom, treat the last day of month as the scheduled day. try: import calendar as _calendar last_dom = _calendar.monthrange(target_date.year, target_date.month)[1] @@ -404,14 +105,69 @@ def daily_jobs(): if not expected_times: continue - runs_for_day = runs_by_job.get(job.id, []) + runs_for_day = ( + JobRun.query.filter( + JobRun.job_id == job.id, + JobRun.run_at >= start_of_day, + JobRun.run_at < end_of_day, + ) + .order_by(JobRun.run_at.asc()) + .all() + ) run_count = len(runs_for_day) customer_name = job.customer.name if job.customer else "" - # Use pre-loaded ticket/remark indicators - has_active_ticket = job_has_ticket.get(job.id, False) - has_active_remark = job_has_remark.get(job.id, False) + # Ticket/Remark indicators for this job on this date + # Tickets: active-from date should apply to subsequent runs until resolved. + has_active_ticket = False + has_active_remark = False + try: + t_exists = db.session.execute( + text( + """ + SELECT 1 + FROM tickets t + JOIN ticket_scopes ts ON ts.ticket_id = t.id + WHERE ts.job_id = :job_id + AND t.active_from_date <= :target_date + AND ( + t.resolved_at IS NULL + OR ((t.resolved_at AT TIME ZONE 'UTC' AT TIME ZONE 'Europe/Amsterdam')::date) >= :target_date + ) + LIMIT 1 + """ + ), + {"job_id": job.id, "target_date": target_date}, + ).first() + + has_active_ticket = bool(t_exists) + + r_exists = db.session.execute( + text( + """ + SELECT 1 + FROM remarks r + JOIN remark_scopes rs ON rs.remark_id = r.id + WHERE rs.job_id = :job_id + AND COALESCE( + r.active_from_date, + ((r.start_date AT TIME ZONE 'UTC' AT TIME ZONE 'Europe/Amsterdam')::date) + ) <= :target_date + AND ( + r.resolved_at IS NULL + OR ((r.resolved_at AT TIME ZONE 'UTC' AT TIME ZONE 'Europe/Amsterdam')::date) >= :target_date + ) + LIMIT 1 + """ + ), + {"job_id": job.id, "target_date": target_date}, + ).first() + + has_active_remark = bool(r_exists) + except Exception: + has_active_ticket = False + has_active_remark = False # We show a single row per job for today. last_remark_excerpt = "" diff --git a/containers/backupchecks/src/backend/app/main/routes_run_checks.py b/containers/backupchecks/src/backend/app/main/routes_run_checks.py index 00f3e88..f29f85e 100644 --- a/containers/backupchecks/src/backend/app/main/routes_run_checks.py +++ b/containers/backupchecks/src/backend/app/main/routes_run_checks.py @@ -1576,6 +1576,11 @@ def api_run_checks_autotask_existing_tickets(): """List open (non-terminal) Autotask tickets for the selected run's customer. Phase 2.2: used by the Run Checks modal to link an existing PSA ticket. + + Search behaviour: + - Always searches tickets for the customer's company + - If search term looks like a ticket number (starts with T + digits), also searches + across all companies to enable linking overarching issues """ try: @@ -1640,20 +1645,43 @@ def api_run_checks_autotask_existing_tickets(): # Best-effort; list will still work without labels. pass + # First: query tickets for this customer's company tickets = client.query_tickets_for_company( int(customer.autotask_company_id), search=q, exclude_status_ids=sorted(AUTOTASK_TERMINAL_STATUS_IDS), limit=75, ) + + # Second: if search looks like a ticket number, also search across all companies + # This allows linking overarching issues that span multiple companies + cross_company_tickets = [] + if q and q.upper().startswith("T") and any(ch.isdigit() for ch in q): + try: + cross_company_tickets = client.query_tickets_by_number( + q, + exclude_status_ids=sorted(AUTOTASK_TERMINAL_STATUS_IDS), + limit=10, + ) + except Exception: + # Best-effort; main company query already succeeded + pass + except Exception as exc: return jsonify({"status": "error", "message": f"Autotask ticket lookup failed: {exc}"}), 400 + # Combine and deduplicate results + seen_ids = set() items = [] - for t in tickets or []: + + def add_ticket(t): if not isinstance(t, dict): - continue + return tid = t.get("id") + if tid in seen_ids: + return + seen_ids.add(tid) + tnum = (t.get("ticketNumber") or t.get("number") or "") title = (t.get("title") or "") st = t.get("status") @@ -1672,6 +1700,14 @@ def api_run_checks_autotask_existing_tickets(): } ) + # Add company tickets first (primary results) + for t in tickets or []: + add_ticket(t) + + # Then add cross-company tickets (secondary results for ticket number search) + for t in cross_company_tickets or []: + add_ticket(t) + # Sort: newest-ish first. Autotask query ordering isn't guaranteed, so we provide a stable sort. items.sort(key=lambda x: (x.get("ticketNumber") or ""), reverse=True) @@ -1814,8 +1850,12 @@ def api_run_checks_autotask_link_existing_ticket(): def api_run_checks_autotask_resolve_note(): """Post a user-visible 'should be resolved' update to an existing Autotask ticket. - This step does NOT close the ticket in Autotask. + Status update behaviour (per API contract section 9): + - If NO time entries exist: ticket is closed (status 5 = Complete) + - If time entries exist: ticket remains open + Primary behaviour: create a Ticket note via POST /Tickets/{id}/Notes so the message is clearly visible. + Then updates the ticket resolution field which triggers the conditional status update. Fallback behaviour: if TicketNote create is not supported (HTTP 404), append the marker text to the Ticket description via PUT /Tickets and verify persistence. """ @@ -1847,6 +1887,19 @@ def api_run_checks_autotask_resolve_note(): if ticket_id <= 0: return jsonify({"status": "error", "message": "Run has an invalid Autotask ticket id."}), 400 + try: + client = _build_autotask_client_from_settings() + except Exception as exc: + return jsonify({"status": "error", "message": f"Autotask client setup failed: {exc}"}), 400 + + # Check for time entries to determine ticket closure status + # Per API contract section 9: ticket closes only if no time entries exist + try: + time_entries = client.query_time_entries_by_ticket_id(ticket_id) + has_time_entries = len(time_entries) > 0 + except Exception: + has_time_entries = False # Assume no time entries if query fails + tz_name = _get_ui_timezone_name() tz = _get_ui_timezone() now_utc = datetime.utcnow().replace(tzinfo=timezone.utc) @@ -1855,19 +1908,20 @@ def api_run_checks_autotask_resolve_note(): actor = (getattr(current_user, "email", None) or getattr(current_user, "username", None) or "operator") ticket_number = str(getattr(run, "autotask_ticket_number", "") or "").strip() + # Build dynamic message based on time entry check marker = "[Backupchecks] Marked as resolved in Backupchecks" + if has_time_entries: + status_note = "(ticket remains open in Autotask due to existing time entries)" + else: + status_note = "(ticket will be closed in Autotask)" + body = ( - f"{marker} (ticket remains open in Autotask).\n" + f"{marker} {status_note}.\n" f"Time: {now} ({tz_name})\n" f"By: {actor}\n" + (f"Ticket: {ticket_number}\n" if ticket_number else "") ) - try: - client = _build_autotask_client_from_settings() - except Exception as exc: - return jsonify({"status": "error", "message": f"Autotask client setup failed: {exc}"}), 400 - # 1) Preferred: create an explicit TicketNote (user-visible update) try: note_payload = { diff --git a/containers/backupchecks/src/backend/app/main/routes_settings.py b/containers/backupchecks/src/backend/app/main/routes_settings.py index 189fa7d..af9f86a 100644 --- a/containers/backupchecks/src/backend/app/main/routes_settings.py +++ b/containers/backupchecks/src/backend/app/main/routes_settings.py @@ -408,6 +408,7 @@ def settings(): if request.method == "POST": autotask_form_touched = any(str(k).startswith("autotask_") for k in (request.form or {}).keys()) + import_form_touched = any(str(k).startswith("auto_import_") or str(k).startswith("manual_import_") or str(k).startswith("ingest_eml_") for k in (request.form or {}).keys()) # NOTE: The Settings UI has multiple tabs with separate forms. # Only update values that are present in the submitted form, to avoid @@ -505,7 +506,9 @@ def settings(): settings.daily_jobs_start_date = None # Import configuration - if "auto_import_enabled" in request.form: + # Checkbox: only update when any import field is present (form was submitted) + # Unchecked checkboxes are not sent by browsers, so check import_form_touched + if import_form_touched: settings.auto_import_enabled = bool(request.form.get("auto_import_enabled")) if "auto_import_interval_minutes" in request.form: diff --git a/containers/backupchecks/src/backend/app/main/routes_shared.py b/containers/backupchecks/src/backend/app/main/routes_shared.py index 0af7c3a..e53fab2 100644 --- a/containers/backupchecks/src/backend/app/main/routes_shared.py +++ b/containers/backupchecks/src/backend/app/main/routes_shared.py @@ -534,18 +534,13 @@ def _recompute_override_flags_for_runs(job_ids: list[int] | None = None, start_a except Exception: runs = [] - # Batch load all jobs to avoid N+1 queries - job_ids = {run.job_id for run in runs if run.job_id} - jobs_by_id = {} - if job_ids: - try: - jobs_by_id = {j.id: j for j in Job.query.filter(Job.id.in_(job_ids)).all()} - except Exception: - jobs_by_id = {} - updated = 0 for run in runs: - job = jobs_by_id.get(run.job_id) + job = None + try: + job = Job.query.get(run.job_id) + except Exception: + job = None if not job: continue diff --git a/containers/backupchecks/src/backend/app/migrations.py b/containers/backupchecks/src/backend/app/migrations.py index f11213d..be30fd6 100644 --- a/containers/backupchecks/src/backend/app/migrations.py +++ b/containers/backupchecks/src/backend/app/migrations.py @@ -172,6 +172,7 @@ def migrate_system_settings_ui_timezone() -> None: except Exception as exc: print(f"[migrations] Failed to migrate system_settings.ui_timezone: {exc}") + def migrate_system_settings_autotask_integration() -> None: """Add Autotask integration columns to system_settings if missing.""" @@ -248,8 +249,138 @@ def migrate_customers_autotask_company_mapping() -> None: print(f"[migrations] Failed to migrate customers autotask company mapping columns: {exc}") +def migrate_tickets_resolved_origin() -> None: + """Add resolved_origin column to tickets if missing. + + This column stores the origin of the resolution (psa | backupchecks). + """ + + table = "tickets" + column = "resolved_origin" + + try: + engine = db.get_engine() + except Exception as exc: + print(f"[migrations] Could not get engine for tickets resolved_origin migration: {exc}") + return + + try: + if _column_exists(table, column): + print("[migrations] tickets.resolved_origin already exists.") + return + + with engine.begin() as conn: + conn.execute(text(f'ALTER TABLE "{table}" ADD COLUMN {column} VARCHAR(32)')) + + print("[migrations] migrate_tickets_resolved_origin completed.") + except Exception as exc: + print(f"[migrations] Failed to migrate tickets.resolved_origin: {exc}") +def migrate_job_runs_autotask_ticket_fields() -> None: + """Add Autotask ticket linkage fields to job_runs if missing. + + Columns: + - job_runs.autotask_ticket_id (INTEGER NULL) + - job_runs.autotask_ticket_number (VARCHAR(64) NULL) + - job_runs.autotask_ticket_created_at (TIMESTAMP NULL) + - job_runs.autotask_ticket_created_by_user_id (INTEGER NULL, FK users.id) + """ + + table = "job_runs" + try: + engine = db.get_engine() + except Exception as exc: + print(f"[migrations] Could not get engine for job_runs Autotask ticket migration: {exc}") + return + + try: + with engine.begin() as conn: + existing = _get_table_columns(conn, table) + + if "autotask_ticket_id" not in existing: + print("[migrations] Adding job_runs.autotask_ticket_id column...") + conn.execute(text(f'ALTER TABLE "{table}" ADD COLUMN autotask_ticket_id INTEGER')) + + if "autotask_ticket_number" not in existing: + print("[migrations] Adding job_runs.autotask_ticket_number column...") + conn.execute(text(f'ALTER TABLE "{table}" ADD COLUMN autotask_ticket_number VARCHAR(64)')) + + if "autotask_ticket_created_at" not in existing: + print("[migrations] Adding job_runs.autotask_ticket_created_at column...") + conn.execute(text(f'ALTER TABLE "{table}" ADD COLUMN autotask_ticket_created_at TIMESTAMP')) + + if "autotask_ticket_created_by_user_id" not in existing: + print("[migrations] Adding job_runs.autotask_ticket_created_by_user_id column...") + conn.execute(text(f'ALTER TABLE "{table}" ADD COLUMN autotask_ticket_created_by_user_id INTEGER')) + + print("[migrations] migrate_job_runs_autotask_ticket_fields completed.") + except Exception as exc: + print(f"[migrations] Failed to migrate job_runs Autotask ticket fields: {exc}") + + +def migrate_job_runs_autotask_ticket_deleted_fields() -> None: + """Add Autotask deleted ticket tracking fields to job_runs if missing. + + Columns: + - job_runs.autotask_ticket_deleted_at (TIMESTAMP NULL) + - job_runs.autotask_ticket_deleted_by_resource_id (INTEGER NULL) + """ + + table = "job_runs" + try: + engine = db.get_engine() + except Exception as exc: + print(f"[migrations] Could not get engine for job_runs Autotask deleted fields migration: {exc}") + return + + try: + with engine.begin() as conn: + existing = _get_table_columns(conn, table) + + if "autotask_ticket_deleted_at" not in existing: + print("[migrations] Adding job_runs.autotask_ticket_deleted_at column...") + conn.execute(text(f'ALTER TABLE "{table}" ADD COLUMN autotask_ticket_deleted_at TIMESTAMP')) + + if "autotask_ticket_deleted_by_resource_id" not in existing: + print("[migrations] Adding job_runs.autotask_ticket_deleted_by_resource_id column...") + conn.execute(text(f'ALTER TABLE "{table}" ADD COLUMN autotask_ticket_deleted_by_resource_id INTEGER')) + + print("[migrations] migrate_job_runs_autotask_ticket_deleted_fields completed.") + except Exception as exc: + print(f"[migrations] Failed to migrate job_runs Autotask deleted fields: {exc}") + + +def migrate_job_runs_autotask_ticket_deleted_by_name_fields() -> None: + """Add Autotask deleted ticket by-name fields to job_runs if missing. + + Columns: + - job_runs.autotask_ticket_deleted_by_first_name (VARCHAR(255) NULL) + - job_runs.autotask_ticket_deleted_by_last_name (VARCHAR(255) NULL) + """ + + table = "job_runs" + try: + engine = db.get_engine() + except Exception as exc: + print(f"[migrations] Could not get engine for job_runs Autotask deleted by-name migration: {exc}") + return + + try: + with engine.begin() as conn: + existing = _get_table_columns(conn, table) + + if "autotask_ticket_deleted_by_first_name" not in existing: + print("[migrations] Adding job_runs.autotask_ticket_deleted_by_first_name column...") + conn.execute(text(f'ALTER TABLE "{table}" ADD COLUMN autotask_ticket_deleted_by_first_name VARCHAR(255)')) + + if "autotask_ticket_deleted_by_last_name" not in existing: + print("[migrations] Adding job_runs.autotask_ticket_deleted_by_last_name column...") + conn.execute(text(f'ALTER TABLE "{table}" ADD COLUMN autotask_ticket_deleted_by_last_name VARCHAR(255)')) + + print("[migrations] migrate_job_runs_autotask_ticket_deleted_by_name_fields completed.") + except Exception as exc: + print(f"[migrations] Failed to migrate job_runs Autotask deleted by-name fields: {exc}") def migrate_mail_messages_columns() -> None: @@ -935,147 +1066,6 @@ def run_migrations() -> None: print("[migrations] All migrations completed.") -def migrate_job_runs_autotask_ticket_fields() -> None: - """Add Autotask ticket linkage fields to job_runs if missing. - - Columns: - - job_runs.autotask_ticket_id (INTEGER NULL) - - job_runs.autotask_ticket_number (VARCHAR(64) NULL) - - job_runs.autotask_ticket_created_at (TIMESTAMP NULL) - - job_runs.autotask_ticket_created_by_user_id (INTEGER NULL, FK users.id) - """ - - table = "job_runs" - try: - engine = db.get_engine() - except Exception as exc: - print(f"[migrations] Could not get engine for job_runs Autotask ticket migration: {exc}") - return - - try: - with engine.begin() as conn: - cols = _get_table_columns(conn, table) - if not cols: - print("[migrations] job_runs table not found; skipping migrate_job_runs_autotask_ticket_fields.") - return - - if "autotask_ticket_id" not in cols: - print("[migrations] Adding job_runs.autotask_ticket_id column...") - conn.execute(text('ALTER TABLE "job_runs" ADD COLUMN autotask_ticket_id INTEGER')) - - if "autotask_ticket_number" not in cols: - print("[migrations] Adding job_runs.autotask_ticket_number column...") - conn.execute(text('ALTER TABLE "job_runs" ADD COLUMN autotask_ticket_number VARCHAR(64)')) - - if "autotask_ticket_created_at" not in cols: - print("[migrations] Adding job_runs.autotask_ticket_created_at column...") - conn.execute(text('ALTER TABLE "job_runs" ADD COLUMN autotask_ticket_created_at TIMESTAMP')) - - if "autotask_ticket_created_by_user_id" not in cols: - print("[migrations] Adding job_runs.autotask_ticket_created_by_user_id column...") - conn.execute(text('ALTER TABLE "job_runs" ADD COLUMN autotask_ticket_created_by_user_id INTEGER')) - - try: - conn.execute( - text( - 'ALTER TABLE "job_runs" ' - 'ADD CONSTRAINT job_runs_autotask_ticket_created_by_user_id_fkey ' - 'FOREIGN KEY (autotask_ticket_created_by_user_id) REFERENCES users(id) ' - 'ON DELETE SET NULL' - ) - ) - except Exception as exc: - print( - f"[migrations] Could not add FK job_runs_autotask_ticket_created_by_user_id -> users.id (continuing): {exc}" - ) - - conn.execute(text('CREATE INDEX IF NOT EXISTS idx_job_runs_autotask_ticket_id ON "job_runs" (autotask_ticket_id)')) - except Exception as exc: - print(f"[migrations] migrate_job_runs_autotask_ticket_fields failed (continuing): {exc}") - return - - print("[migrations] migrate_job_runs_autotask_ticket_fields completed.") - - -def migrate_job_runs_autotask_ticket_deleted_fields() -> None: - """Add Autotask deleted ticket audit fields to job_runs if missing. - - Columns: - - job_runs.autotask_ticket_deleted_at (TIMESTAMP NULL) - - job_runs.autotask_ticket_deleted_by_resource_id (INTEGER NULL) - """ - - table = "job_runs" - try: - engine = db.get_engine() - except Exception as exc: - print(f"[migrations] Could not get engine for job_runs Autotask ticket deleted fields migration: {exc}") - return - - try: - with engine.begin() as conn: - cols = _get_table_columns(conn, table) - if not cols: - print("[migrations] job_runs table not found; skipping migrate_job_runs_autotask_ticket_deleted_fields.") - return - - if "autotask_ticket_deleted_at" not in cols: - print("[migrations] Adding job_runs.autotask_ticket_deleted_at column...") - conn.execute(text('ALTER TABLE "job_runs" ADD COLUMN autotask_ticket_deleted_at TIMESTAMP')) - - if "autotask_ticket_deleted_by_resource_id" not in cols: - print("[migrations] Adding job_runs.autotask_ticket_deleted_by_resource_id column...") - conn.execute(text('ALTER TABLE "job_runs" ADD COLUMN autotask_ticket_deleted_by_resource_id INTEGER')) - - conn.execute(text('CREATE INDEX IF NOT EXISTS idx_job_runs_autotask_ticket_deleted_by_resource_id ON "job_runs" (autotask_ticket_deleted_by_resource_id)')) - - conn.execute(text('CREATE INDEX IF NOT EXISTS idx_job_runs_autotask_ticket_deleted_at ON "job_runs" (autotask_ticket_deleted_at)')) - except Exception as exc: - print(f"[migrations] migrate_job_runs_autotask_ticket_deleted_fields failed (continuing): {exc}") - return - - print("[migrations] migrate_job_runs_autotask_ticket_deleted_fields completed.") - - -def migrate_job_runs_autotask_ticket_deleted_by_name_fields() -> None: - """Add Autotask deleted-by name audit fields to job_runs if missing. - - Columns: - - job_runs.autotask_ticket_deleted_by_first_name (VARCHAR(255) NULL) - - job_runs.autotask_ticket_deleted_by_last_name (VARCHAR(255) NULL) - """ - - table = "job_runs" - - try: - engine = db.get_engine() - except Exception as exc: - print(f"[migrations] Could not get engine for job_runs Autotask deleted-by name fields migration: {exc}") - return - - try: - with engine.begin() as conn: - cols = _get_table_columns(conn, table) - if not cols: - print("[migrations] job_runs table not found; skipping migrate_job_runs_autotask_ticket_deleted_by_name_fields.") - return - - if "autotask_ticket_deleted_by_first_name" not in cols: - print("[migrations] Adding job_runs.autotask_ticket_deleted_by_first_name column...") - conn.execute(text('ALTER TABLE "job_runs" ADD COLUMN autotask_ticket_deleted_by_first_name VARCHAR(255)')) - - if "autotask_ticket_deleted_by_last_name" not in cols: - print("[migrations] Adding job_runs.autotask_ticket_deleted_by_last_name column...") - conn.execute(text('ALTER TABLE "job_runs" ADD COLUMN autotask_ticket_deleted_by_last_name VARCHAR(255)')) - - conn.execute(text('CREATE INDEX IF NOT EXISTS idx_job_runs_autotask_ticket_deleted_by_first_name ON "job_runs" (autotask_ticket_deleted_by_first_name)')) - conn.execute(text('CREATE INDEX IF NOT EXISTS idx_job_runs_autotask_ticket_deleted_by_last_name ON "job_runs" (autotask_ticket_deleted_by_last_name)')) - except Exception as exc: - print(f"[migrations] migrate_job_runs_autotask_ticket_deleted_by_name_fields failed (continuing): {exc}") - - print("[migrations] migrate_job_runs_autotask_ticket_deleted_by_name_fields completed.") - - def migrate_jobs_archiving() -> None: """Add archiving columns to jobs if missing. @@ -1434,34 +1424,6 @@ def migrate_tickets_active_from_date() -> None: - -def migrate_tickets_resolved_origin() -> None: - """Add tickets.resolved_origin column if missing. - - Used to show whether a ticket was resolved by PSA polling or manually inside Backupchecks. - """ - - table = "tickets" - try: - engine = db.get_engine() - except Exception as exc: - print(f"[migrations] Could not get engine for tickets resolved_origin migration: {exc}") - return - - try: - with engine.begin() as conn: - cols = _get_table_columns(conn, table) - if not cols: - print("[migrations] tickets table not found; skipping migrate_tickets_resolved_origin.") - return - if "resolved_origin" not in cols: - print("[migrations] Adding tickets.resolved_origin column...") - conn.execute(text('ALTER TABLE "tickets" ADD COLUMN resolved_origin VARCHAR(32)')) - except Exception as exc: - print(f"[migrations] tickets resolved_origin migration failed (continuing): {exc}") - - print("[migrations] migrate_tickets_resolved_origin completed.") - def migrate_mail_messages_overall_message() -> None: """Add overall_message column to mail_messages if missing.""" table = "mail_messages" diff --git a/containers/backupchecks/src/backend/app/models.py b/containers/backupchecks/src/backend/app/models.py index 79aa0b1..8ecfd83 100644 --- a/containers/backupchecks/src/backend/app/models.py +++ b/containers/backupchecks/src/backend/app/models.py @@ -253,12 +253,6 @@ class Job(db.Model): class JobRun(db.Model): __tablename__ = "job_runs" - __table_args__ = ( - db.Index("idx_job_run_job_id", "job_id"), - db.Index("idx_job_run_job_id_run_at", "job_id", "run_at"), - db.Index("idx_job_run_job_id_reviewed_at", "job_id", "reviewed_at"), - db.Index("idx_job_run_mail_message_id", "mail_message_id"), - ) id = db.Column(db.Integer, primary_key=True) @@ -297,8 +291,6 @@ class JobRun(db.Model): autotask_ticket_deleted_by_first_name = db.Column(db.String(255), nullable=True) autotask_ticket_deleted_by_last_name = db.Column(db.String(255), nullable=True) - - created_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False) updated_at = db.Column( db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False @@ -310,7 +302,6 @@ class JobRun(db.Model): ) reviewed_by = db.relationship("User", foreign_keys=[reviewed_by_user_id]) - autotask_ticket_created_by = db.relationship("User", foreign_keys=[autotask_ticket_created_by_user_id]) @@ -352,11 +343,6 @@ class JobObject(db.Model): class MailMessage(db.Model): __tablename__ = "mail_messages" - __table_args__ = ( - db.Index("idx_mail_message_job_id", "job_id"), - db.Index("idx_mail_message_location", "location"), - db.Index("idx_mail_message_job_id_location", "job_id", "location"), - ) id = db.Column(db.Integer, primary_key=True) @@ -416,9 +402,6 @@ class MailMessage(db.Model): class MailObject(db.Model): __tablename__ = "mail_objects" - __table_args__ = ( - db.Index("idx_mail_object_mail_message_id", "mail_message_id"), - ) id = db.Column(db.Integer, primary_key=True) mail_message_id = db.Column(db.Integer, db.ForeignKey("mail_messages.id"), nullable=False) @@ -453,10 +436,6 @@ class Ticket(db.Model): class TicketScope(db.Model): __tablename__ = "ticket_scopes" - __table_args__ = ( - db.Index("idx_ticket_scope_ticket_id", "ticket_id"), - db.Index("idx_ticket_scope_job_id", "job_id"), - ) id = db.Column(db.Integer, primary_key=True) ticket_id = db.Column(db.Integer, db.ForeignKey("tickets.id"), nullable=False) scope_type = db.Column(db.String(32), nullable=False) @@ -498,10 +477,6 @@ class Remark(db.Model): class RemarkScope(db.Model): __tablename__ = "remark_scopes" - __table_args__ = ( - db.Index("idx_remark_scope_remark_id", "remark_id"), - db.Index("idx_remark_scope_job_id", "job_id"), - ) id = db.Column(db.Integer, primary_key=True) remark_id = db.Column(db.Integer, db.ForeignKey("remarks.id"), nullable=False) scope_type = db.Column(db.String(32), nullable=False) @@ -724,4 +699,4 @@ class ReportObjectSummary(db.Model): report = db.relationship( "ReportDefinition", backref=db.backref("object_summaries", lazy="dynamic", cascade="all, delete-orphan"), - ) + ) \ No newline at end of file diff --git a/containers/backupchecks/src/static/css/changelog.css b/containers/backupchecks/src/static/css/changelog.css new file mode 100644 index 0000000..66de05d --- /dev/null +++ b/containers/backupchecks/src/static/css/changelog.css @@ -0,0 +1,212 @@ +/* Changelog specific styling */ + +/* Navigation sidebar */ +.changelog-nav { + padding: 1rem; + background: var(--bs-body-bg); + border-radius: 0.5rem; + border: 1px solid var(--bs-border-color); +} + +.changelog-nav .changelog-nav-link { + padding: 0.15rem 0.5rem !important; + margin-bottom: 0.15rem !important; + border-radius: 0.25rem; + color: var(--bs-body-color); + text-decoration: none; + transition: all 0.15s ease-in-out; + font-size: 0.85rem !important; + line-height: 1.1 !important; + display: block; +} + +.changelog-nav .changelog-nav-link span { + font-size: 0.7rem !important; + margin-top: 0; + line-height: 1 !important; + display: block; + opacity: 0.7; +} + +.changelog-nav-link:hover { + background: var(--bs-tertiary-bg); + color: var(--bs-primary); +} + +.changelog-nav-link:active, +.changelog-nav-link.active { + background: var(--bs-primary); + color: white; +} + +/* Version cards */ +.changelog-version-card { + border: 1px solid var(--bs-border-color); + box-shadow: 0 2px 4px rgba(0, 0, 0, 0.05); + transition: box-shadow 0.2s ease-in-out; + scroll-margin-top: 80px; +} + +.changelog-version-card:hover { + box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); +} + +.changelog-version-card .card-header { + padding: 1.25rem 1.5rem; + background: linear-gradient(135deg, var(--bs-primary) 0%, var(--bs-primary-dark, #0056b3) 100%); +} + +.changelog-version-card .card-body { + padding: 1.5rem; +} + +/* Summary section */ +.changelog-summary { + padding: 1rem; + background: var(--bs-light); + border-left: 4px solid var(--bs-primary); + border-radius: 0.375rem; +} + +[data-bs-theme="dark"] .changelog-summary { + background: var(--bs-dark); +} + +.changelog-summary .lead { + margin-bottom: 0; + font-size: 1rem; + line-height: 1.6; +} + +/* Section styling */ +.changelog-section { + border-bottom: 1px solid var(--bs-border-color); + padding-bottom: 1.5rem; +} + +.changelog-section:last-child { + border-bottom: none; + padding-bottom: 0; +} + +/* Type badges */ +.changelog-badge-feature { + background: linear-gradient(135deg, #28a745 0%, #20c997 100%); + color: white; + font-weight: 600; + padding: 0.4rem 0.8rem; + font-size: 0.875rem; +} + +.changelog-badge-improvement { + background: linear-gradient(135deg, #17a2b8 0%, #20c997 100%); + color: white; + font-weight: 600; + padding: 0.4rem 0.8rem; + font-size: 0.875rem; +} + +.changelog-badge-fixed { + background: linear-gradient(135deg, #dc3545 0%, #fd7e14 100%); + color: white; + font-weight: 600; + padding: 0.4rem 0.8rem; + font-size: 0.875rem; +} + +.changelog-badge-added { + background: linear-gradient(135deg, #007bff 0%, #6610f2 100%); + color: white; + font-weight: 600; + padding: 0.4rem 0.8rem; + font-size: 0.875rem; +} + +.changelog-badge-removed { + background: linear-gradient(135deg, #6c757d 0%, #495057 100%); + color: white; + font-weight: 600; + padding: 0.4rem 0.8rem; + font-size: 0.875rem; +} + +.changelog-badge-changed { + background: linear-gradient(135deg, #ffc107 0%, #ff9800 100%); + color: #212529; + font-weight: 600; + padding: 0.4rem 0.8rem; + font-size: 0.875rem; +} + +.changelog-badge-documentation { + background: linear-gradient(135deg, #6f42c1 0%, #e83e8c 100%); + color: white; + font-weight: 600; + padding: 0.4rem 0.8rem; + font-size: 0.875rem; +} + +/* Subsection styling */ +.changelog-subsection { + margin-left: 0.5rem; +} + +.changelog-subsection h4 { + font-weight: 600; + margin-bottom: 0.5rem; +} + +/* List styling */ +.changelog-list { + list-style-type: none; + padding-left: 0; + margin-bottom: 0; +} + +.changelog-list li { + padding: 0.4rem 0 0.4rem 1.75rem; + position: relative; + line-height: 1.6; +} + +.changelog-list li::before { + content: "●"; + position: absolute; + left: 0.5rem; + color: var(--bs-primary); + font-weight: bold; +} + +.changelog-list li:hover { + background: var(--bs-tertiary-bg); + border-radius: 0.25rem; +} + +/* Nested lists (indented items) */ +.changelog-list li:has(+ li) { + margin-bottom: 0.25rem; +} + +/* Responsive adjustments */ +@media (max-width: 767.98px) { + .changelog-version-card .card-header { + padding: 1rem; + } + + .changelog-version-card .card-body { + padding: 1rem; + } + + .changelog-summary { + padding: 0.75rem; + } + + .changelog-list li { + font-size: 0.95rem; + } +} + +/* Smooth scrolling */ +html { + scroll-behavior: smooth; +} diff --git a/containers/backupchecks/src/templates/layout/base.html b/containers/backupchecks/src/templates/layout/base.html index e7a4176..24b3449 100644 --- a/containers/backupchecks/src/templates/layout/base.html +++ b/containers/backupchecks/src/templates/layout/base.html @@ -12,6 +12,7 @@ + {% block head %}{% endblock %}