Add push monitor script deployment via SSH
All checks were successful
Build and Push Container / build (push) Successful in 33s
All checks were successful
Build and Push Container / build (push) Successful in 33s
- Add push_scripts.py with bash templates for heartbeat, disk, memory, cpu, and updates monitoring
- Modify kuma_client.py to return push token from created monitors
- Add deploy_push_script() to monitors.py for SSH-based script deployment
- Add heartbeat push_metric type to Claude agent suggestions
- Add /api/monitors/<id>/deploy-script and /api/monitors/deploy-all-scripts endpoints
- Update frontend to show push monitors with deployment status and retry buttons
Scripts are deployed to /usr/local/bin/kuma-push-{metric}-{id}.sh with cron entries.
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
108
backend/app.py
108
backend/app.py
@@ -145,6 +145,7 @@ def analyze_with_claude(result: DiscoveryResult):
|
||||
"port": m.port,
|
||||
"interval": m.interval,
|
||||
"reason": m.reason,
|
||||
"push_metric": m.push_metric,
|
||||
}
|
||||
for m in response.monitors
|
||||
],
|
||||
@@ -190,6 +191,7 @@ def get_scan(scan_id):
|
||||
"port": m.port,
|
||||
"interval": m.interval,
|
||||
"reason": m.reason,
|
||||
"push_metric": m.push_metric,
|
||||
}
|
||||
for m in suggestions.monitors
|
||||
],
|
||||
@@ -308,6 +310,7 @@ def approve_request(approval_id):
|
||||
"port": m.port,
|
||||
"interval": m.interval,
|
||||
"reason": m.reason,
|
||||
"push_metric": m.push_metric,
|
||||
}
|
||||
for m in response.monitors
|
||||
],
|
||||
@@ -406,6 +409,111 @@ def create_suggested_monitors():
|
||||
return jsonify({"created": created})
|
||||
|
||||
|
||||
@app.route("/api/monitors/<int:monitor_id>/deploy-script", methods=["POST"])
|
||||
def deploy_push_script(monitor_id):
|
||||
"""Deploy a push monitoring script to a remote host for an existing push monitor."""
|
||||
data = request.json
|
||||
hostname = data.get("hostname")
|
||||
push_metric = data.get("push_metric")
|
||||
username = data.get("username", "root")
|
||||
port = data.get("port", 22)
|
||||
interval_minutes = data.get("interval_minutes", 5)
|
||||
|
||||
if not hostname:
|
||||
return jsonify({"error": "hostname is required"}), 400
|
||||
if not push_metric:
|
||||
return jsonify({"error": "push_metric is required"}), 400
|
||||
|
||||
valid_metrics = ["heartbeat", "disk", "memory", "cpu", "updates"]
|
||||
if push_metric not in valid_metrics:
|
||||
return jsonify({"error": f"push_metric must be one of: {', '.join(valid_metrics)}"}), 400
|
||||
|
||||
try:
|
||||
kuma = get_kuma_client()
|
||||
push_token = kuma.get_monitor_push_token(monitor_id)
|
||||
|
||||
if not push_token:
|
||||
return jsonify({"error": "Could not get push token. Is this a push monitor?"}), 400
|
||||
|
||||
monitor_service = get_monitor_service()
|
||||
result = monitor_service.deploy_push_script(
|
||||
hostname=hostname,
|
||||
push_metric=push_metric,
|
||||
push_token=push_token,
|
||||
monitor_id=monitor_id,
|
||||
interval_minutes=interval_minutes,
|
||||
username=username,
|
||||
port=port,
|
||||
)
|
||||
|
||||
return jsonify(result)
|
||||
|
||||
except Exception as e:
|
||||
return jsonify({"error": str(e)}), 500
|
||||
|
||||
|
||||
@app.route("/api/monitors/deploy-all-scripts", methods=["POST"])
|
||||
def deploy_all_push_scripts():
|
||||
"""Deploy scripts for multiple push monitors at once."""
|
||||
data = request.json
|
||||
monitors = data.get("monitors", [])
|
||||
|
||||
if not monitors:
|
||||
return jsonify({"error": "monitors array is required"}), 400
|
||||
|
||||
results = []
|
||||
kuma = get_kuma_client()
|
||||
monitor_service = get_monitor_service()
|
||||
|
||||
for monitor_config in monitors:
|
||||
monitor_id = monitor_config.get("monitor_id")
|
||||
hostname = monitor_config.get("hostname")
|
||||
push_metric = monitor_config.get("push_metric")
|
||||
username = monitor_config.get("username", "root")
|
||||
port = monitor_config.get("port", 22)
|
||||
interval_minutes = monitor_config.get("interval_minutes", 5)
|
||||
|
||||
if not monitor_id or not hostname or not push_metric:
|
||||
results.append({
|
||||
"monitor_id": monitor_id,
|
||||
"status": "failed",
|
||||
"error": "monitor_id, hostname, and push_metric are required",
|
||||
})
|
||||
continue
|
||||
|
||||
try:
|
||||
push_token = kuma.get_monitor_push_token(monitor_id)
|
||||
|
||||
if not push_token:
|
||||
results.append({
|
||||
"monitor_id": monitor_id,
|
||||
"status": "failed",
|
||||
"error": "Could not get push token",
|
||||
})
|
||||
continue
|
||||
|
||||
result = monitor_service.deploy_push_script(
|
||||
hostname=hostname,
|
||||
push_metric=push_metric,
|
||||
push_token=push_token,
|
||||
monitor_id=monitor_id,
|
||||
interval_minutes=interval_minutes,
|
||||
username=username,
|
||||
port=port,
|
||||
)
|
||||
result["monitor_id"] = monitor_id
|
||||
results.append(result)
|
||||
|
||||
except Exception as e:
|
||||
results.append({
|
||||
"monitor_id": monitor_id,
|
||||
"status": "failed",
|
||||
"error": str(e),
|
||||
})
|
||||
|
||||
return jsonify({"results": results})
|
||||
|
||||
|
||||
# Uptime Kuma authentication endpoints
|
||||
@app.route("/api/kuma/auth", methods=["GET"])
|
||||
def kuma_auth_status():
|
||||
|
||||
@@ -49,7 +49,7 @@ Always respond with valid JSON in this structure:
|
||||
"port": 80,
|
||||
"interval": 60,
|
||||
"reason": "Why this should be monitored",
|
||||
"push_metric": "disk|memory|cpu|updates (only for push type)"
|
||||
"push_metric": "heartbeat|disk|memory|cpu|updates (only for push type)"
|
||||
}
|
||||
],
|
||||
"additional_commands": [
|
||||
@@ -72,25 +72,30 @@ Always respond with valid JSON in this structure:
|
||||
## Push Monitors for System Metrics
|
||||
Always suggest push monitors for system health metrics. These run as cron jobs on the host and push status to Uptime Kuma. Suggest these based on what you see:
|
||||
|
||||
1. **Disk Space** - Alert when any partition exceeds 85% usage
|
||||
1. **Heartbeat** - Simple "I'm alive" check for hosts that can't be reached directly (behind NAT/firewall)
|
||||
- Name: "{hostname} - Heartbeat"
|
||||
- push_metric: "heartbeat"
|
||||
- Use this INSTEAD of ping for remote hosts that Uptime Kuma cannot reach directly
|
||||
|
||||
2. **Disk Space** - Alert when any partition exceeds 90% usage
|
||||
- Name: "{hostname} - Disk Space"
|
||||
- push_metric: "disk"
|
||||
|
||||
2. **Memory Usage** - Alert when RAM exceeds 90% usage
|
||||
3. **Memory Usage** - Alert when RAM exceeds 90% usage
|
||||
- Name: "{hostname} - Memory"
|
||||
- push_metric: "memory"
|
||||
|
||||
3. **CPU Load** - Alert when load average exceeds CPU count
|
||||
4. **CPU Load** - Alert when 5-min load average exceeds 95% of CPU cores
|
||||
- Name: "{hostname} - CPU Load"
|
||||
- push_metric: "cpu"
|
||||
|
||||
4. **System Updates** - Alert when security updates are pending (Debian/Ubuntu)
|
||||
5. **System Updates** - Alert when security updates are pending (Debian/Ubuntu/RHEL)
|
||||
- Name: "{hostname} - Updates"
|
||||
- push_metric: "updates"
|
||||
|
||||
For push monitors, set:
|
||||
- type: "push"
|
||||
- target: the metric type (disk, memory, cpu, updates)
|
||||
- target: the metric type (heartbeat, disk, memory, cpu, updates)
|
||||
- interval: 300 (5 minutes is typical for system metrics)
|
||||
|
||||
Be thorough but not excessive. Quality over quantity - suggest monitors that will actually catch real problems."""
|
||||
|
||||
@@ -223,10 +223,21 @@ class UptimeKumaClient:
|
||||
if monitor.docker_host:
|
||||
kwargs["docker_host"] = monitor.docker_host
|
||||
elif monitor.type == "push":
|
||||
# Push monitors are created and get a token back
|
||||
# Push monitors don't need additional fields
|
||||
# The push token is returned in the result
|
||||
pass
|
||||
|
||||
# Add conditions field for Uptime Kuma v2 compatibility
|
||||
kwargs["conditions"] = []
|
||||
|
||||
result = api.add_monitor(**kwargs)
|
||||
|
||||
# For push monitors, extract the push token from the result
|
||||
if monitor.type == "push" and "monitorID" in result:
|
||||
monitor_data = api.get_monitor(result["monitorID"])
|
||||
if "pushToken" in monitor_data:
|
||||
result["pushToken"] = monitor_data["pushToken"]
|
||||
|
||||
return result
|
||||
except Exception as e:
|
||||
self._disconnect()
|
||||
@@ -272,6 +283,32 @@ class UptimeKumaClient:
|
||||
self._disconnect()
|
||||
return False
|
||||
|
||||
def get_push_url(self, push_token: str) -> str:
|
||||
"""Build the full push URL for a push monitor.
|
||||
|
||||
Args:
|
||||
push_token: The push token from the monitor
|
||||
|
||||
Returns:
|
||||
Full push URL like 'https://kuma.example.com/api/push/abc123'
|
||||
"""
|
||||
return f"{self.base_url}/api/push/{push_token}"
|
||||
|
||||
def get_monitor_push_token(self, monitor_id: int) -> Optional[str]:
|
||||
"""Get the push token for an existing push monitor.
|
||||
|
||||
Args:
|
||||
monitor_id: The Uptime Kuma monitor ID
|
||||
|
||||
Returns:
|
||||
The push token, or None if not a push monitor or not found
|
||||
"""
|
||||
try:
|
||||
monitor_data = self.get_monitor(monitor_id)
|
||||
return monitor_data.get("pushToken")
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
# Global client instance
|
||||
_kuma_client: Optional[UptimeKumaClient] = None
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional
|
||||
import logging
|
||||
|
||||
from services.kuma_client import get_kuma_client, Monitor
|
||||
from services.claude_agent import MonitorSuggestion
|
||||
from services.ssh_manager import get_ssh_manager
|
||||
from services import push_scripts
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -176,16 +181,42 @@ class MonitorService:
|
||||
elif suggestion.type == "docker":
|
||||
monitor.docker_container = suggestion.target
|
||||
monitor.docker_host = hostname
|
||||
elif suggestion.type == "push":
|
||||
# Push monitors need the push_metric field
|
||||
pass
|
||||
|
||||
try:
|
||||
result = kuma.create_monitor(monitor)
|
||||
return {
|
||||
response = {
|
||||
"monitor": monitor.name,
|
||||
"type": monitor.type,
|
||||
"status": "created",
|
||||
"result": result,
|
||||
"reason": suggestion.reason,
|
||||
"push_metric": suggestion.push_metric,
|
||||
}
|
||||
|
||||
# For push monitors, deploy the script to the remote host
|
||||
if suggestion.type == "push" and suggestion.push_metric:
|
||||
push_token = result.get("pushToken")
|
||||
monitor_id = result.get("monitorID")
|
||||
|
||||
if push_token and monitor_id:
|
||||
deploy_result = self.deploy_push_script(
|
||||
hostname=hostname,
|
||||
push_metric=suggestion.push_metric,
|
||||
push_token=push_token,
|
||||
monitor_id=monitor_id,
|
||||
interval_minutes=max(1, suggestion.interval // 60),
|
||||
)
|
||||
response["deployment"] = deploy_result
|
||||
else:
|
||||
response["deployment"] = {
|
||||
"status": "failed",
|
||||
"error": "No push token returned from Uptime Kuma",
|
||||
}
|
||||
|
||||
return response
|
||||
except Exception as e:
|
||||
return {
|
||||
"monitor": monitor.name,
|
||||
@@ -195,6 +226,107 @@ class MonitorService:
|
||||
"reason": suggestion.reason,
|
||||
}
|
||||
|
||||
def deploy_push_script(
|
||||
self,
|
||||
hostname: str,
|
||||
push_metric: str,
|
||||
push_token: str,
|
||||
monitor_id: int,
|
||||
interval_minutes: int = 5,
|
||||
username: str = "root",
|
||||
port: int = 22,
|
||||
) -> dict:
|
||||
"""
|
||||
Deploy a push monitoring script to a remote host via SSH.
|
||||
|
||||
Args:
|
||||
hostname: The remote host to deploy to
|
||||
push_metric: The metric type (heartbeat, disk, memory, cpu, updates)
|
||||
push_token: The Uptime Kuma push token
|
||||
monitor_id: The Uptime Kuma monitor ID
|
||||
interval_minutes: Cronjob interval in minutes
|
||||
username: SSH username
|
||||
port: SSH port
|
||||
|
||||
Returns:
|
||||
Dict with status and any error messages
|
||||
"""
|
||||
kuma = get_kuma_client()
|
||||
ssh = get_ssh_manager()
|
||||
|
||||
# Build the push URL and script
|
||||
push_url = kuma.get_push_url(push_token)
|
||||
script_content = push_scripts.generate_script(push_metric, push_url)
|
||||
|
||||
if not script_content:
|
||||
return {
|
||||
"status": "failed",
|
||||
"error": f"Unknown push metric type: {push_metric}",
|
||||
}
|
||||
|
||||
script_path = push_scripts.get_script_path(push_metric, monitor_id)
|
||||
script_filename = push_scripts.get_script_filename(push_metric, monitor_id)
|
||||
cronjob_entry = push_scripts.get_cronjob_entry(push_metric, monitor_id, interval_minutes)
|
||||
|
||||
try:
|
||||
# Ensure SSH connection
|
||||
if not ssh.is_connected(hostname, username, port):
|
||||
connected = ssh.connect(hostname, username, port)
|
||||
if not connected:
|
||||
return {
|
||||
"status": "failed",
|
||||
"error": f"Could not connect to {hostname}",
|
||||
}
|
||||
|
||||
# Write the script to the remote host using heredoc
|
||||
# Escape any single quotes in the script content
|
||||
escaped_content = script_content.replace("'", "'\"'\"'")
|
||||
write_cmd = f"cat > {script_path} << 'KUMA_SCRIPT_EOF'\n{script_content}KUMA_SCRIPT_EOF"
|
||||
result = ssh.execute(hostname, write_cmd, username, port)
|
||||
if not result.success:
|
||||
return {
|
||||
"status": "failed",
|
||||
"error": f"Failed to write script: {result.stderr}",
|
||||
}
|
||||
|
||||
# Make the script executable
|
||||
chmod_result = ssh.execute(hostname, f"chmod +x {script_path}", username, port)
|
||||
if not chmod_result.success:
|
||||
return {
|
||||
"status": "failed",
|
||||
"error": f"Failed to make script executable: {chmod_result.stderr}",
|
||||
}
|
||||
|
||||
# Add cronjob entry (remove existing entry first to avoid duplicates)
|
||||
cron_cmd = f"(crontab -l 2>/dev/null | grep -v '{script_filename}'; echo '{cronjob_entry}') | crontab -"
|
||||
cron_result = ssh.execute(hostname, cron_cmd, username, port)
|
||||
if not cron_result.success:
|
||||
return {
|
||||
"status": "failed",
|
||||
"error": f"Failed to add cronjob: {cron_result.stderr}",
|
||||
}
|
||||
|
||||
# Run the script once immediately to verify it works
|
||||
run_result = ssh.execute(hostname, script_path, username, port, timeout=30)
|
||||
|
||||
return {
|
||||
"status": "deployed",
|
||||
"script_path": script_path,
|
||||
"cronjob": cronjob_entry,
|
||||
"initial_run": {
|
||||
"success": run_result.success,
|
||||
"stdout": run_result.stdout,
|
||||
"stderr": run_result.stderr,
|
||||
},
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(f"Failed to deploy push script to {hostname}")
|
||||
return {
|
||||
"status": "failed",
|
||||
"error": str(e),
|
||||
}
|
||||
|
||||
def get_existing_monitors(self) -> list[dict]:
|
||||
"""Get all existing monitors from Uptime Kuma."""
|
||||
kuma = get_kuma_client()
|
||||
|
||||
165
backend/services/push_scripts.py
Normal file
165
backend/services/push_scripts.py
Normal file
@@ -0,0 +1,165 @@
|
||||
"""Bash script templates for push monitor cronjobs."""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
|
||||
def get_push_url(kuma_url: str, push_token: str) -> str:
|
||||
"""Build the full push URL for Uptime Kuma."""
|
||||
base = kuma_url.rstrip("/")
|
||||
return f"{base}/api/push/{push_token}"
|
||||
|
||||
|
||||
SCRIPT_TEMPLATES = {
|
||||
"heartbeat": '''#!/bin/bash
|
||||
# Kuma Push Monitor - Heartbeat
|
||||
# Simple "I'm alive" ping for remote/unreachable hosts
|
||||
PUSH_URL="{push_url}"
|
||||
HOSTNAME=$(hostname)
|
||||
|
||||
curl -fsS -m 10 -o /dev/null "$PUSH_URL?status=up&msg=${{HOSTNAME}}%20alive"
|
||||
''',
|
||||
|
||||
"disk": '''#!/bin/bash
|
||||
# Kuma Push Monitor - Disk Usage
|
||||
# Fails if any mount exceeds threshold
|
||||
PUSH_URL="{push_url}"
|
||||
THRESHOLD=90
|
||||
|
||||
# Find worst disk usage (excluding tmpfs, devtmpfs, etc.)
|
||||
WORST=$(df -h -x tmpfs -x devtmpfs -x squashfs 2>/dev/null | awk 'NR>1 && $5+0 > 0 {{gsub(/%/,"",$5); print $5, $6}}' | sort -rn | head -1)
|
||||
USAGE=$(echo "$WORST" | awk '{{print $1}}')
|
||||
MOUNT=$(echo "$WORST" | awk '{{print $2}}')
|
||||
|
||||
if [ -z "$USAGE" ]; then
|
||||
curl -fsS -m 10 -o /dev/null "$PUSH_URL?status=up&msg=No%20disks%20found"
|
||||
elif [ "$USAGE" -gt "$THRESHOLD" ]; then
|
||||
curl -fsS -m 10 -o /dev/null "$PUSH_URL?status=down&msg=Disk%20$MOUNT%20at%20${{USAGE}}%25"
|
||||
else
|
||||
curl -fsS -m 10 -o /dev/null "$PUSH_URL?status=up&msg=Disk%20OK%20(worst:%20${{USAGE}}%25%20on%20$MOUNT)"
|
||||
fi
|
||||
''',
|
||||
|
||||
"memory": '''#!/bin/bash
|
||||
# Kuma Push Monitor - Memory Usage
|
||||
# Fails if memory usage exceeds threshold
|
||||
PUSH_URL="{push_url}"
|
||||
THRESHOLD=90
|
||||
|
||||
USAGE=$(free | awk '/Mem:/ {{printf "%.0f", $3/$2 * 100}}')
|
||||
|
||||
if [ "$USAGE" -gt "$THRESHOLD" ]; then
|
||||
curl -fsS -m 10 -o /dev/null "$PUSH_URL?status=down&msg=Memory%20at%20${{USAGE}}%25"
|
||||
else
|
||||
curl -fsS -m 10 -o /dev/null "$PUSH_URL?status=up&msg=Memory%20OK%20(${{USAGE}}%25)"
|
||||
fi
|
||||
''',
|
||||
|
||||
"cpu": '''#!/bin/bash
|
||||
# Kuma Push Monitor - CPU Load
|
||||
# Fails if 5-minute load average exceeds threshold (as % of cores)
|
||||
PUSH_URL="{push_url}"
|
||||
THRESHOLD=95
|
||||
|
||||
CORES=$(nproc)
|
||||
LOAD=$(awk '{{print $2}}' /proc/loadavg) # 5-min average
|
||||
PERCENT=$(awk "BEGIN {{printf \\"%.0f\\", ($LOAD / $CORES) * 100}}")
|
||||
|
||||
if [ "$PERCENT" -gt "$THRESHOLD" ]; then
|
||||
curl -fsS -m 10 -o /dev/null "$PUSH_URL?status=down&msg=CPU%20load%20${{PERCENT}}%25%20(${{LOAD}}/${{CORES}})"
|
||||
else
|
||||
curl -fsS -m 10 -o /dev/null "$PUSH_URL?status=up&msg=CPU%20OK%20(${{PERCENT}}%25)"
|
||||
fi
|
||||
''',
|
||||
|
||||
"updates": '''#!/bin/bash
|
||||
# Kuma Push Monitor - Security Updates
|
||||
# Fails if security updates are pending
|
||||
PUSH_URL="{push_url}"
|
||||
|
||||
# Try apt (Debian/Ubuntu)
|
||||
if command -v apt-get &>/dev/null; then
|
||||
apt-get update -qq 2>/dev/null
|
||||
SECURITY=$(apt-get -s upgrade 2>/dev/null | grep -c "^Inst.*security" || echo "0")
|
||||
if [ "$SECURITY" -gt 0 ]; then
|
||||
curl -fsS -m 10 -o /dev/null "$PUSH_URL?status=down&msg=${{SECURITY}}%20security%20updates%20pending"
|
||||
else
|
||||
curl -fsS -m 10 -o /dev/null "$PUSH_URL?status=up&msg=No%20security%20updates"
|
||||
fi
|
||||
# Try dnf (RHEL/Fedora)
|
||||
elif command -v dnf &>/dev/null; then
|
||||
SECURITY=$(dnf check-update --security 2>/dev/null | grep -c "^[a-zA-Z]" || echo "0")
|
||||
if [ "$SECURITY" -gt 0 ]; then
|
||||
curl -fsS -m 10 -o /dev/null "$PUSH_URL?status=down&msg=${{SECURITY}}%20security%20updates%20pending"
|
||||
else
|
||||
curl -fsS -m 10 -o /dev/null "$PUSH_URL?status=up&msg=No%20security%20updates"
|
||||
fi
|
||||
# Try yum (older RHEL/CentOS)
|
||||
elif command -v yum &>/dev/null; then
|
||||
SECURITY=$(yum check-update --security 2>/dev/null | grep -c "^[a-zA-Z]" || echo "0")
|
||||
if [ "$SECURITY" -gt 0 ]; then
|
||||
curl -fsS -m 10 -o /dev/null "$PUSH_URL?status=down&msg=${{SECURITY}}%20security%20updates%20pending"
|
||||
else
|
||||
curl -fsS -m 10 -o /dev/null "$PUSH_URL?status=up&msg=No%20security%20updates"
|
||||
fi
|
||||
else
|
||||
curl -fsS -m 10 -o /dev/null "$PUSH_URL?status=up&msg=Unknown%20package%20manager"
|
||||
fi
|
||||
''',
|
||||
}
|
||||
|
||||
|
||||
def generate_script(push_metric: str, push_url: str) -> Optional[str]:
|
||||
"""Generate a push monitoring script for the given metric type.
|
||||
|
||||
Args:
|
||||
push_metric: One of 'heartbeat', 'disk', 'memory', 'cpu', 'updates'
|
||||
push_url: The full Uptime Kuma push URL including token
|
||||
|
||||
Returns:
|
||||
The bash script content, or None if invalid metric type
|
||||
"""
|
||||
template = SCRIPT_TEMPLATES.get(push_metric)
|
||||
if not template:
|
||||
return None
|
||||
return template.format(push_url=push_url)
|
||||
|
||||
|
||||
def get_script_filename(push_metric: str, monitor_id: int) -> str:
|
||||
"""Generate the script filename for a push monitor.
|
||||
|
||||
Args:
|
||||
push_metric: The metric type
|
||||
monitor_id: The Uptime Kuma monitor ID
|
||||
|
||||
Returns:
|
||||
Filename like 'kuma-push-disk-123.sh'
|
||||
"""
|
||||
return f"kuma-push-{push_metric}-{monitor_id}.sh"
|
||||
|
||||
|
||||
def get_script_path(push_metric: str, monitor_id: int) -> str:
|
||||
"""Generate the full path for a push monitor script.
|
||||
|
||||
Args:
|
||||
push_metric: The metric type
|
||||
monitor_id: The Uptime Kuma monitor ID
|
||||
|
||||
Returns:
|
||||
Full path like '/usr/local/bin/kuma-push-disk-123.sh'
|
||||
"""
|
||||
return f"/usr/local/bin/{get_script_filename(push_metric, monitor_id)}"
|
||||
|
||||
|
||||
def get_cronjob_entry(push_metric: str, monitor_id: int, interval_minutes: int = 5) -> str:
|
||||
"""Generate the crontab entry for a push monitor script.
|
||||
|
||||
Args:
|
||||
push_metric: The metric type
|
||||
monitor_id: The Uptime Kuma monitor ID
|
||||
interval_minutes: How often to run (default 5 minutes)
|
||||
|
||||
Returns:
|
||||
Crontab entry string
|
||||
"""
|
||||
script_path = get_script_path(push_metric, monitor_id)
|
||||
return f"*/{interval_minutes} * * * * {script_path} >/dev/null 2>&1"
|
||||
@@ -58,6 +58,20 @@ export const api = {
|
||||
method: 'POST',
|
||||
body: JSON.stringify({ scan_id: scanId, monitors: monitorIndices }),
|
||||
}),
|
||||
deployPushScript: (monitorId, hostname, pushMetric, options = {}) => fetchApi(`/monitors/${monitorId}/deploy-script`, {
|
||||
method: 'POST',
|
||||
body: JSON.stringify({
|
||||
hostname,
|
||||
push_metric: pushMetric,
|
||||
username: options.username || 'root',
|
||||
port: options.port || 22,
|
||||
interval_minutes: options.intervalMinutes || 5,
|
||||
}),
|
||||
}),
|
||||
deployAllPushScripts: (monitors) => fetchApi('/monitors/deploy-all-scripts', {
|
||||
method: 'POST',
|
||||
body: JSON.stringify({ monitors }),
|
||||
}),
|
||||
|
||||
// Uptime Kuma
|
||||
testKumaConnection: () => fetchApi('/kuma/test'),
|
||||
|
||||
@@ -8,6 +8,7 @@ export default function DiscoveryResults({ scanId, scan, analysis, devMode, onCo
|
||||
const [createResults, setCreateResults] = useState(null);
|
||||
const [runningCommands, setRunningCommands] = useState({});
|
||||
const [questionAnswers, setQuestionAnswers] = useState({});
|
||||
const [deployingScripts, setDeployingScripts] = useState({});
|
||||
|
||||
const handleRunCommand = async (command, index) => {
|
||||
setRunningCommands(prev => ({ ...prev, [index]: true }));
|
||||
@@ -65,6 +66,38 @@ export default function DiscoveryResults({ scanId, scan, analysis, devMode, onCo
|
||||
);
|
||||
};
|
||||
|
||||
const handleRetryDeploy = async (resultIndex, result) => {
|
||||
const monitorId = result.result?.monitorID;
|
||||
const pushMetric = result.push_metric;
|
||||
|
||||
if (!monitorId || !pushMetric) {
|
||||
console.error('Missing monitor ID or push metric for deployment');
|
||||
return;
|
||||
}
|
||||
|
||||
setDeployingScripts(prev => ({ ...prev, [resultIndex]: true }));
|
||||
try {
|
||||
const deployResult = await api.deployPushScript(
|
||||
monitorId,
|
||||
scan.hostname,
|
||||
pushMetric,
|
||||
{ port: scan.port || 22 }
|
||||
);
|
||||
|
||||
// Update the result with new deployment status
|
||||
setCreateResults(prev => prev.map((r, i) =>
|
||||
i === resultIndex ? { ...r, deployment: deployResult } : r
|
||||
));
|
||||
} catch (err) {
|
||||
console.error('Failed to deploy script:', err);
|
||||
setCreateResults(prev => prev.map((r, i) =>
|
||||
i === resultIndex ? { ...r, deployment: { status: 'failed', error: err.message } } : r
|
||||
));
|
||||
} finally {
|
||||
setDeployingScripts(prev => ({ ...prev, [resultIndex]: false }));
|
||||
}
|
||||
};
|
||||
|
||||
if (!scan.connected) {
|
||||
return (
|
||||
<div className="bg-red-900/30 border border-red-600 rounded-lg p-6">
|
||||
@@ -190,9 +223,14 @@ export default function DiscoveryResults({ scanId, scan, analysis, devMode, onCo
|
||||
monitor.type === 'http' ? 'bg-blue-900 text-blue-200' :
|
||||
monitor.type === 'tcp' ? 'bg-green-900 text-green-200' :
|
||||
monitor.type === 'docker' ? 'bg-cyan-900 text-cyan-200' :
|
||||
monitor.type === 'push' ? 'bg-purple-900 text-purple-200' :
|
||||
monitor.type === 'ping' ? 'bg-emerald-900 text-emerald-200' :
|
||||
'bg-slate-600 text-slate-200'
|
||||
}`}>
|
||||
{monitor.type.toUpperCase()}
|
||||
{monitor.type === 'push' && monitor.push_metric && (
|
||||
<span className="ml-1 opacity-75">({monitor.push_metric})</span>
|
||||
)}
|
||||
</span>
|
||||
<span className="font-medium">{monitor.name}</span>
|
||||
</div>
|
||||
@@ -300,25 +338,82 @@ export default function DiscoveryResults({ scanId, scan, analysis, devMode, onCo
|
||||
{createResults.map((result, index) => (
|
||||
<div
|
||||
key={index}
|
||||
className={`p-3 rounded flex items-center gap-2 ${
|
||||
className={`p-3 rounded ${
|
||||
result.status === 'created'
|
||||
? 'bg-green-900/30 text-green-300'
|
||||
: 'bg-red-900/30 text-red-300'
|
||||
}`}
|
||||
>
|
||||
{result.status === 'created' ? (
|
||||
<svg className="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M5 13l4 4L19 7" />
|
||||
</svg>
|
||||
) : (
|
||||
<svg className="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M6 18L18 6M6 6l12 12" />
|
||||
</svg>
|
||||
<div className="flex items-center gap-2">
|
||||
{result.status === 'created' ? (
|
||||
<svg className="w-5 h-5 flex-shrink-0" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M5 13l4 4L19 7" />
|
||||
</svg>
|
||||
) : (
|
||||
<svg className="w-5 h-5 flex-shrink-0" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M6 18L18 6M6 6l12 12" />
|
||||
</svg>
|
||||
)}
|
||||
<span className="font-medium">{result.monitor}</span>
|
||||
<span className="text-sm opacity-75">({result.type})</span>
|
||||
{result.error && (
|
||||
<span className="text-sm ml-auto">{result.error}</span>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Push monitor deployment status */}
|
||||
{result.type === 'push' && result.deployment && (
|
||||
<div className={`mt-2 ml-7 text-sm ${
|
||||
result.deployment.status === 'deployed' ? 'text-green-400' :
|
||||
result.deployment.status === 'failed' ? 'text-red-400' : 'text-slate-400'
|
||||
}`}>
|
||||
<div className="flex items-center gap-2">
|
||||
{result.deployment.status === 'deployed' ? (
|
||||
<>
|
||||
<svg className="w-4 h-4" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M9 12l2 2 4-4m6 2a9 9 0 11-18 0 9 9 0 0118 0z" />
|
||||
</svg>
|
||||
<span>Script deployed to {result.deployment.script_path}</span>
|
||||
</>
|
||||
) : (
|
||||
<>
|
||||
<svg className="w-4 h-4" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M12 8v4m0 4h.01M21 12a9 9 0 11-18 0 9 9 0 0118 0z" />
|
||||
</svg>
|
||||
<span>Deployment failed: {result.deployment.error}</span>
|
||||
<button
|
||||
onClick={() => handleRetryDeploy(index, result)}
|
||||
disabled={deployingScripts[index]}
|
||||
className="ml-2 px-2 py-0.5 text-xs bg-purple-600 hover:bg-purple-500 disabled:bg-slate-600 text-white rounded transition-colors"
|
||||
>
|
||||
{deployingScripts[index] ? 'Deploying...' : 'Retry Deploy'}
|
||||
</button>
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
{result.deployment.cronjob && (
|
||||
<div className="mt-1 text-xs text-slate-500 font-mono">
|
||||
Cronjob: {result.deployment.cronjob}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
<span className="font-medium">{result.monitor}</span>
|
||||
<span className="text-sm opacity-75">({result.type})</span>
|
||||
{result.error && (
|
||||
<span className="text-sm ml-auto">{result.error}</span>
|
||||
|
||||
{/* Push monitor without deployment info (needs manual deploy) */}
|
||||
{result.type === 'push' && result.status === 'created' && !result.deployment && (
|
||||
<div className="mt-2 ml-7 text-sm text-amber-400 flex items-center gap-2">
|
||||
<svg className="w-4 h-4" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M12 9v2m0 4h.01m-6.938 4h13.856c1.54 0 2.502-1.667 1.732-3L13.732 4c-.77-1.333-2.694-1.333-3.464 0L3.34 16c-.77 1.333.192 3 1.732 3z" />
|
||||
</svg>
|
||||
<span>Script not deployed</span>
|
||||
<button
|
||||
onClick={() => handleRetryDeploy(index, result)}
|
||||
disabled={deployingScripts[index]}
|
||||
className="ml-2 px-2 py-0.5 text-xs bg-purple-600 hover:bg-purple-500 disabled:bg-slate-600 text-white rounded transition-colors"
|
||||
>
|
||||
{deployingScripts[index] ? 'Deploying...' : 'Deploy Script'}
|
||||
</button>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
))}
|
||||
|
||||
Reference in New Issue
Block a user