Add local SQLite database and sync with Uptime Kuma
All checks were successful
Build and Push Container / build (push) Successful in 1m4s

Features:
- SQLite database to track monitors and hosts locally
- Uses Uptime Kuma tags to mark monitors as managed by Kuma Strapper
- Sync on startup, before each scan, and on-demand via API
- Shows existing monitors when re-scanning a host

New files:
- backend/services/database.py - SQLite database service
- backend/services/sync.py - Sync service for Uptime Kuma reconciliation

API endpoints:
- POST /api/sync - Full sync with Uptime Kuma
- POST /api/sync/host/<hostname> - Sync specific host
- GET /api/hosts - List tracked hosts
- GET /api/hosts/<hostname>/monitors - Get monitors for host
- GET /api/monitors/tracked - Get all tracked monitors

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
Debian
2026-01-06 09:10:26 +00:00
parent a65997a391
commit 1fd29e449f
11 changed files with 1044 additions and 47 deletions

View File

@@ -5,6 +5,8 @@ import logging
from services.kuma_client import get_kuma_client, Monitor
from services.claude_agent import MonitorSuggestion
from services.ssh_manager import get_ssh_manager
from services.database import get_database, TrackedMonitor, Deployment
from services.sync import get_sync_service
from services import push_scripts
logger = logging.getLogger(__name__)
@@ -74,6 +76,56 @@ class MonitorService:
def __init__(self):
self.created_monitors: list[dict] = []
def _create_and_track_monitor(
self,
monitor: Monitor,
hostname: str,
kuma,
) -> dict:
"""Create a monitor in Uptime Kuma and track it in the database."""
try:
result = kuma.create_monitor(monitor)
kuma_monitor_id = result.get("monitorID")
# Tag and track in database
if kuma_monitor_id:
try:
sync = get_sync_service()
sync.add_tag_to_monitor(kuma_monitor_id, hostname)
db = get_database()
host = db.get_or_create_host(hostname)
tracked = TrackedMonitor(
id=None,
kuma_monitor_id=kuma_monitor_id,
host_id=host.id,
name=monitor.name,
type=monitor.type,
target=monitor.url or monitor.hostname or monitor.docker_container,
port=monitor.port,
interval_seconds=monitor.interval,
push_metric=None,
status="active",
)
db.add_monitor(tracked)
logger.info(f"Tracked monitor '{monitor.name}' in database")
except Exception as e:
logger.warning(f"Failed to tag/track monitor: {e}")
return {
"monitor": monitor.name,
"type": monitor.type,
"status": "created",
"result": result,
}
except Exception as e:
return {
"monitor": monitor.name,
"type": monitor.type,
"status": "failed",
"error": str(e),
}
def create_default_monitors(
self,
hostname: str,
@@ -92,21 +144,8 @@ class MonitorService:
# Host health monitors
health_monitors = create_host_health_monitors(hostname, ssh_port)
for monitor in health_monitors:
try:
result = kuma.create_monitor(monitor)
created.append({
"monitor": monitor.name,
"type": monitor.type,
"status": "created",
"result": result,
})
except Exception as e:
created.append({
"monitor": monitor.name,
"type": monitor.type,
"status": "failed",
"error": str(e),
})
result = self._create_and_track_monitor(monitor, hostname, kuma)
created.append(result)
# Web server monitors
if web_ports:
@@ -114,41 +153,15 @@ class MonitorService:
https = port == 443 or port == 8443
web_monitors = create_web_server_monitors(hostname, port, https)
for monitor in web_monitors:
try:
result = kuma.create_monitor(monitor)
created.append({
"monitor": monitor.name,
"type": monitor.type,
"status": "created",
"result": result,
})
except Exception as e:
created.append({
"monitor": monitor.name,
"type": monitor.type,
"status": "failed",
"error": str(e),
})
result = self._create_and_track_monitor(monitor, hostname, kuma)
created.append(result)
# Docker container monitors
if has_docker and containers:
docker_monitors = create_docker_container_monitors(hostname, containers)
for monitor in docker_monitors:
try:
result = kuma.create_monitor(monitor)
created.append({
"monitor": monitor.name,
"type": monitor.type,
"status": "created",
"result": result,
})
except Exception as e:
created.append({
"monitor": monitor.name,
"type": monitor.type,
"status": "failed",
"error": str(e),
})
result = self._create_and_track_monitor(monitor, hostname, kuma)
created.append(result)
self.created_monitors.extend(created)
return created
@@ -199,6 +212,34 @@ class MonitorService:
try:
result = kuma.create_monitor(monitor)
kuma_monitor_id = result.get("monitorID")
# Add kuma-strapper tag and track in database
if kuma_monitor_id:
try:
sync = get_sync_service()
sync.add_tag_to_monitor(kuma_monitor_id, hostname)
# Track in local database
db = get_database()
host = db.get_or_create_host(hostname)
tracked = TrackedMonitor(
id=None,
kuma_monitor_id=kuma_monitor_id,
host_id=host.id,
name=monitor.name,
type=monitor.type,
target=suggestion.target,
port=suggestion.port,
interval_seconds=suggestion.interval,
push_metric=suggestion.push_metric,
status="active",
)
db.add_monitor(tracked)
logger.info(f"Tracked monitor '{monitor.name}' in database")
except Exception as e:
logger.warning(f"Failed to tag/track monitor: {e}")
response = {
"monitor": monitor.name,
"type": monitor.type,
@@ -224,6 +265,26 @@ class MonitorService:
port=port,
)
response["deployment"] = deploy_result
# Track deployment in database
if deploy_result.get("status") == "deployed":
try:
db = get_database()
host = db.get_or_create_host(hostname)
tracked = db.get_monitor_by_kuma_id(monitor_id)
if tracked:
deployment = Deployment(
id=None,
monitor_id=tracked.id,
host_id=host.id,
script_path=deploy_result.get("script_path", ""),
scheduling_method=deploy_result.get("scheduling", {}).get("method"),
scheduling_info=deploy_result.get("scheduling", {}).get("info"),
status="deployed",
)
db.add_deployment(deployment)
except Exception as e:
logger.warning(f"Failed to track deployment: {e}")
else:
response["deployment"] = {
"status": "failed",