diff --git a/deploy/microshift-bootc/Containerfile b/deploy/microshift-bootc/Containerfile new file mode 100644 index 0000000..7226c78 --- /dev/null +++ b/deploy/microshift-bootc/Containerfile @@ -0,0 +1,29 @@ +FROM ghcr.io/microshift-io/microshift:4.21.0_gbc8e20c07_4.21.0_okd_scos.ec.14 +# Install dependencies for config-svc +RUN dnf install -y epel-release && \ + dnf install -y python3 iproute python3-flask python3-pip && \ + pip3 install python-pam && \ + dnf clean all + +# Install MicroShift manifests +RUN mkdir -p /etc/microshift/manifests.d/002-jumpstarter +COPY deploy/microshift-bootc/kustomization.yaml /etc/microshift/manifests.d/002-jumpstarter/kustomization.yaml +COPY deploy/operator/dist/install.yaml /etc/microshift/manifests.d/002-jumpstarter/install-operator.yaml + +# Configure firewalld to open required ports +# Use firewall-offline-cmd since firewalld is not running during build +RUN firewall-offline-cmd --add-service=http && \ + firewall-offline-cmd --add-service=https && \ + firewall-offline-cmd --add-port=8880/tcp + +# Set root password +RUN echo "root:jumpstarter" | chpasswd + +# Install config-svc systemd service +COPY deploy/microshift-bootc/config-svc/app.py /usr/local/bin/config-svc +RUN chmod +x /usr/local/bin/config-svc +COPY deploy/microshift-bootc/config-svc/update-banner.sh /usr/local/bin/update-banner.sh +RUN chmod +x /usr/local/bin/update-banner.sh +COPY deploy/microshift-bootc/config-svc/config-svc.service /etc/systemd/system/config-svc.service +COPY deploy/microshift-bootc/config-svc/update-banner.service /etc/systemd/system/update-banner.service +RUN systemctl enable config-svc.service update-banner.service \ No newline at end of file diff --git a/deploy/microshift-bootc/Makefile b/deploy/microshift-bootc/Makefile new file mode 100644 index 0000000..4db7157 --- /dev/null +++ b/deploy/microshift-bootc/Makefile @@ -0,0 +1,154 @@ +.PHONY: help build bootc-build bootc-build-multi push bootc-push bootc-push-multi bootc-run bootc-stop bootc-sh bootc-rm build-image build-iso build-all build-all-multi push-all push-all-multi + +# Default image tags +BOOTC_IMG ?= quay.io/jumpstarter-dev/microshift/bootc:latest + + +help: ## Display this help + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-20s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +##@ Build + +build: bootc-build ## Build bootc image (default target) + +bootc-build: ## Build the bootc image with MicroShift + @echo "Building bootc image: $(BOOTC_IMG): building as root to be on the container storage from root" + sudo podman build -t $(BOOTC_IMG) -f Containerfile ../.. + +bootc-build-multi: ## Build the bootc image for multiple architectures (amd64, arm64) + @echo "Building multiarch bootc image: $(BOOTC_IMG)" + @echo "This will build for linux/amd64 and linux/arm64" + @# Remove existing manifest if it exists + -podman manifest rm $(BOOTC_IMG) 2>/dev/null || true + @# Create a new manifest + podman manifest create $(BOOTC_IMG) + @# Build for amd64 + @echo "Building for linux/amd64..." + podman build --platform linux/amd64 -t $(BOOTC_IMG)-amd64 -f Containerfile ../.. + @# Build for arm64 + @echo "Building for linux/arm64..." + podman build --platform linux/arm64 -t $(BOOTC_IMG)-arm64 -f Containerfile ../.. + @# Add both images to the manifest + podman manifest add $(BOOTC_IMG) $(BOOTC_IMG)-amd64 + podman manifest add $(BOOTC_IMG) $(BOOTC_IMG)-arm64 + @echo "Multiarch manifest created successfully!" + @echo "To inspect: podman manifest inspect $(BOOTC_IMG)" + @echo "To push: make bootc-push-multi" + +output/qcow2/disk.qcow2: ## Build a bootable QCOW2 image from the bootc image + @echo "Building QCOW2 image from: $(BOOTC_IMG)"a + @echo "Running bootc-image-builder..." + @mkdir -p output + sudo podman run \ + --rm \ + -it \ + --privileged \ + --pull=newer \ + --security-opt label=type:unconfined_t \ + -v ./config.toml:/config.toml:ro \ + -v ./output:/output \ + -v /var/lib/containers/storage:/var/lib/containers/storage \ + quay.io/centos-bootc/bootc-image-builder:latest \ + --type qcow2 \ + -v \ + $(BOOTC_IMG) + @echo "QCOW2 image built successfully in ./output/" + +output/iso/disk.iso: ## Build a bootable ISO image from the bootc image + @echo "Building ISO image from: $(BOOTC_IMG)" + @echo "Running bootc-image-builder..." + @mkdir -p output + sudo podman run \ + --rm \ + -it \ + --privileged \ + --pull=newer \ + --security-opt label=type:unconfined_t \ + -v ./config.toml:/config.toml:ro \ + -v ./output:/output \ + -v /var/lib/containers/storage:/var/lib/containers/storage \ + quay.io/centos-bootc/bootc-image-builder:latest \ + --type iso \ + -v \ + $(BOOTC_IMG) + @echo "ISO image built successfully in ./output/" + +build-image: bootc-build ## Build the bootc based qcow2 image + @echo "Building image: output/qcow2/disk.qcow2" + @echo "Cleaning up any existing LVM resources to avoid conflicts..." + -sudo vgs --noheadings -o vg_name,vg_uuid | grep myvg1 | while read vg uuid; do sudo vgremove -f --select vg_uuid=$$uuid 2>/dev/null || true; done + -sudo losetup -D 2>/dev/null || true + sudo rm -f output/qcow2/disk.qcow2 + make output/qcow2/disk.qcow2 + @echo "Image built successfully in ./output/" + +build-iso: bootc-build ## Build the bootc based ISO image + @echo "Building ISO image: output/iso/disk.iso" + @echo "Cleaning up any existing LVM resources to avoid conflicts..." + -sudo vgs --noheadings -o vg_name,vg_uuid | grep myvg1 | while read vg uuid; do sudo vgremove -f --select vg_uuid=$$uuid 2>/dev/null || true; done + -sudo losetup -D 2>/dev/null || true + sudo rm -f output/iso/disk.iso + make output/iso/disk.iso + @echo "ISO image built successfully in ./output/" + +##@ Push + +push: bootc-push ## Push bootc image to registry + +bootc-push: ## Push the bootc image to registry + @echo "Pushing bootc image: $(BOOTC_IMG)" + sudo podman push $(BOOTC_IMG) + +bootc-push-multi: ## Push the multiarch manifest to registry + @echo "Pushing multiarch manifest: $(BOOTC_IMG)" + @echo "This will push the manifest list with amd64 and arm64 images" + podman manifest push $(BOOTC_IMG) $(BOOTC_IMG) + @echo "Multiarch manifest pushed successfully!" + @echo "Images available for linux/amd64 and linux/arm64" + +##@ Development + +build-all: bootc-build ## Build bootc image + +build-all-multi: bootc-build-multi ## Build multiarch bootc image + +push-all: bootc-push ## Push bootc image to registry + +push-all-multi: bootc-push-multi ## Push multiarch bootc image to registry + +bootc-run: ## Run MicroShift in a bootc container + @echo "Running MicroShift container with image: $(BOOTC_IMG)" + @BOOTC_IMG=$(BOOTC_IMG) sudo -E ./run-microshift.sh + +bootc-stop: ## Stop the running MicroShift container + @echo "Stopping MicroShift container..." + -sudo podman stop jumpstarter-microshift-okd + +bootc-rm: bootc-stop ## Remove the MicroShift container + @echo "Removing MicroShift container..." + -sudo podman rm -f jumpstarter-microshift-okd + @echo "Cleaning up LVM resources..." + -sudo vgremove -f myvg1 2>/dev/null || true + -sudo losetup -d $$(sudo losetup -j /var/lib/microshift-okd/lvmdisk.image | cut -d: -f1) 2>/dev/null || true + @echo "LVM cleanup complete" + +bootc-sh: ## Open a shell in the running MicroShift container + @echo "Opening shell in MicroShift container..." + sudo podman exec -it jumpstarter-microshift-okd /bin/bash -l + +bootc-reload-app: ## Reload the config service app without rebuilding (dev mode) + @echo "Reloading config-svc app..." + sudo podman cp config-svc/app.py jumpstarter-microshift-okd:/usr/local/bin/config-svc + sudo podman exec jumpstarter-microshift-okd systemctl restart config-svc + @echo "Config service reloaded successfully!" + +clean: ## Clean up local images and build artifacts + @echo "Removing local images..." + -sudo podman rmi $(BOOTC_IMG) + @echo "Removing QCOW2 output..." + -sudo rm -rf output/qcow2/disk.qcow2 + @echo "Removing ISO output..." + -sudo rm -rf output/iso/disk.iso + @echo "Removing LVM disk image..." + -sudo rm -f /var/lib/microshift-okd/lvmdisk.image + diff --git a/deploy/microshift-bootc/README.md b/deploy/microshift-bootc/README.md new file mode 100644 index 0000000..b35d923 --- /dev/null +++ b/deploy/microshift-bootc/README.md @@ -0,0 +1,420 @@ +# MicroShift Bootc Deployment + +This directory contains the configuration and scripts to build a bootable container (bootc) image with MicroShift and the Jumpstarter operator pre-installed. + +> **⚠️ Community Edition Disclaimer** +> +> This MicroShift-based deployment is a **community-supported edition** intended for development, testing, and evaluation scenarios. It is **not officially supported** for production use, although it can be OK for small labs. +> +> **For production deployments**, we strongly recommend using the official Jumpstarter Controller deployment on Kubernetes or OpenShift clusters with proper high availability, security, and support. See the [official installation documentation](https://jumpstarter.dev/main/getting-started/installation/service/index.html) for production deployment guides. + +## Overview + +This community edition deployment provides a lightweight, all-in-one solution ideal for: +- **Edge devices** with limited resources +- **Development and testing** environments +- **Proof-of-concept** deployments +- **Local experimentation** with Jumpstarter + +**Features:** +- **MicroShift 4.20 (OKD)** - Lightweight Kubernetes distribution +- **Jumpstarter Operator** - Pre-installed and ready to use +- **TopoLVM CSI** - Dynamic storage provisioning using LVM +- **Configuration Web UI** - Easy setup and management at port 8880 +- **Pod Monitoring** - Real-time pod status dashboard + +## Prerequisites + +- **Fedora/RHEL-based system** (tested on Fedora 42) +- **Podman** installed and configured +- **Root/sudo access** required for privileged operations +- **At least 4GB RAM** and 20GB disk space recommended + +## Quick Start + +### 1. Build the Bootc Image + +```bash +make bootc-build +``` + +This builds a container image with MicroShift and all dependencies. + +### 2. Run as Container (Development/Testing) + +```bash +make bootc-run +``` + +This will: +- Create a 1GB LVM disk image at `/var/lib/microshift-okd/lvmdisk.image` +- Start MicroShift in a privileged container +- Set up LVM volume groups inside the container for TopoLVM +- Wait for MicroShift to be ready + +**Output example:** +``` +MicroShift is running in a bootc container +Hostname: jumpstarter.10.0.2.2.nip.io +Container: jumpstarter-microshift-okd +LVM disk: /var/lib/microshift-okd/lvmdisk.image +VG name: myvg1 +Ports: HTTP:80, HTTPS:443, Config Service:8880 +``` + +### 3. Access the Services + +#### Configuration Web UI +- URL: `http://localhost:8880` +- Login: `root` / `jumpstarter` (default - you'll be required to change it) +- Features: + - Configure hostname and base domain + - Set controller image version + - Change root password (required on first use) + - Download kubeconfig + - Monitor pod status + +#### MicroShift API +- URL: `https://jumpstarter..nip.io:6443` +- Download kubeconfig from the web UI or extract from container + +#### Pod Monitoring Dashboard +- URL: `http://localhost:8880/pods` +- Auto-refreshes every 5 seconds +- Shows all pods across all namespaces + +## Container Management + +### View Running Pods + +```bash +sudo podman exec -it jumpstarter-microshift-okd oc get pods -A +``` + +### Open Shell in Container + +```bash +make bootc-sh +``` + +### Stop Container + +```bash +make bootc-stop +``` + +### Remove Container + +```bash +make bootc-rm +``` + +This will: +- Stop the container +- Remove the container +- Clean up LVM volume groups (myvg1) +- Detach loop devices + +**Note:** The LVM disk image (`/var/lib/microshift-okd/lvmdisk.image`) is preserved. To remove it completely, use `make clean`. + +### Complete Rebuild + +```bash +make bootc-rm bootc-build bootc-run +``` + +This stops, removes, rebuilds, and restarts the container with the latest changes. + +## Creating a Bootable QCOW2 Image + +For production deployments, you can create a bootable QCOW2 disk image that can be: +- Installed on bare metal +- Used in virtual machines (KVM/QEMU, OpenStack, etc.) +- Deployed to edge devices + +### Build QCOW2 Image + +```bash +make build-image +``` + +This will: +1. Clean up any existing LVM resources to avoid conflicts +2. Build the bootc container image (if not already built) +3. Use `bootc-image-builder` to create a bootable QCOW2 image +4. Output the image to `./output/qcow2/disk.qcow2` + +**Note:** This process takes several minutes and requires significant disk space (20GB+). + +**Important:** If you're running the container (`make bootc-run`) and want to build the image, stop the container first with `make bootc-rm` to avoid LVM conflicts. + +### Configuration + +The QCOW2 image is configured via `config.toml`: +- **LVM partitioning:** Creates `myvg1` volume group with 20GB minimum +- **Root filesystem:** XFS on LVM (10GB minimum) +- **Default password:** `root:jumpstarter` (change via web UI on first boot) + +### Using the QCOW2 Image + +#### In a Virtual Machine (KVM/QEMU) + +```bash +qemu-system-x86_64 \ + -m 4096 \ + -smp 2 \ + -drive file=output/qcow2/disk.qcow2,format=qcow2 \ + -net nic -net user,hostfwd=tcp::8880-:8880,hostfwd=tcp::443-:443 +``` + +#### Convert to Other Formats + +```bash +# Convert to raw disk image +qemu-img convert -f qcow2 -O raw output/qcow2/disk.qcow2 output/disk.raw + +# Convert to VirtualBox VDI +qemu-img convert -f qcow2 -O vdi output/qcow2/disk.qcow2 output/disk.vdi +``` + +## Architecture + +### Components + +``` +┌─────────────────────────────────────────────┐ +│ Bootc Container / Image │ +├─────────────────────────────────────────────┤ +│ • Fedora CoreOS 9 base │ +│ • MicroShift 4.20 (OKD) │ +│ • Jumpstarter Operator │ +│ • TopoLVM CSI (storage) │ +│ • Configuration Service (Python/Flask) │ +│ • Firewalld (ports 22, 80, 443, 8880) │ +└─────────────────────────────────────────────┘ +``` + +### Storage Setup + +When running as a container: +1. Script creates `/var/lib/microshift-okd/lvmdisk.image` (1GB) +2. Image is copied into the container +3. Loop device is created inside container +4. LVM volume group `myvg1` is created +5. TopoLVM uses `myvg1` for dynamic PV provisioning + +When deployed from QCOW2: +1. Bootc image builder creates proper disk partitioning +2. LVM volume group `myvg1` is set up on disk +3. Root filesystem uses part of the VG +4. Remaining space available for TopoLVM + +## Customization + +### Change Default Image + +```bash +BOOTC_IMG=quay.io/your-org/microshift-bootc:v1.0 make bootc-build +``` + +### Modify Manifests + +Add Kubernetes manifests to `/etc/microshift/manifests.d/002-jumpstarter/` by editing: +- `kustomization.yaml` - Kustomize configuration +- Additional YAML files will be automatically applied + +### Update Configuration Service + +Edit `config-svc/app.py` and rebuild: + +```bash +make bootc-build +``` + +For live testing without rebuild: + +```bash +make bootc-reload-app +``` + +## Troubleshooting + +### LVM/TopoLVM Issues + +Check if volume group exists in container: + +```bash +sudo podman exec jumpstarter-microshift-okd vgs +sudo podman exec jumpstarter-microshift-okd pvs +``` + +If TopoLVM pods are crashing, recreate the LVM setup: + +```bash +make bootc-rm # Automatically cleans up VG and loop devices +make clean # Remove the disk image for a fresh start +make bootc-run +``` + +### MicroShift Not Starting + +Check logs: + +```bash +sudo podman logs jumpstarter-microshift-okd +sudo podman exec jumpstarter-microshift-okd journalctl -u microshift -f +``` + +### Configuration Service Issues + +Check service status: + +```bash +sudo podman exec jumpstarter-microshift-okd systemctl status config-svc +sudo podman exec jumpstarter-microshift-okd journalctl -u config-svc -f +``` + +### Port Conflicts + +If ports 80, 443, or 8880 are in use, modify `run-microshift.sh`: + +```bash +HTTP_PORT=8080 +HTTPS_PORT=8443 +CONFIG_SVC_PORT=9880 +``` + +### Bootc Image Builder Fails + +Ensure sufficient disk space and clean up: + +```bash +sudo podman system prune -a +sudo rm -rf output/ +``` + +## Makefile Targets + +| Target | Description | +|--------|-------------| +| `make help` | Display all available targets | +| `make bootc-build` | Build the bootc container image | +| `make bootc-run` | Run MicroShift in a container | +| `make bootc-stop` | Stop the running container | +| `make bootc-rm` | Remove container and clean up LVM resources | +| `make bootc-sh` | Open shell in container | +| `make bootc-reload-app` | Reload config service without rebuild (dev mode) | +| `make build-image` | Create bootable QCOW2 image | +| `make bootc-push` | Push image to registry | +| `make clean` | Clean up images, artifacts, and LVM disk | + +## Files + +| File | Description | +|------|-------------| +| `Containerfile` | Container build definition | +| `config.toml` | Bootc image builder configuration | +| `run-microshift.sh` | Container startup script | +| `kustomization.yaml` | Kubernetes manifests configuration | +| `config-svc/app.py` | Configuration web UI service | +| `config-svc/config-svc.service` | Systemd service definition | + +## Network Configuration + +### Hostname Resolution + +The system uses `nip.io` for automatic DNS resolution: +- Default: `jumpstarter..nip.io` +- Example: `jumpstarter.10.0.2.2.nip.io` resolves to `10.0.2.2` + +### Firewall Ports + +| Port | Service | Description | +|------|---------|-------------| +| 80 | HTTP | MicroShift ingress | +| 443 | HTTPS | MicroShift API and ingress | +| 8880 | Config UI | Web configuration interface | +| 6443 | API Server | Kubernetes API (internal) | + +## Security Notes + +⚠️ **Important Security Considerations:** + +1. **Default Password:** The system ships with `root:jumpstarter` as the default password + - **Console login:** You will be forced to change the password on first SSH/console login + - **Web UI:** You must change the password before accessing the configuration interface +2. **TLS Certificates:** MicroShift uses self-signed certs by default +3. **Privileged Container:** Required for systemd, LVM, and networking +4. **Authentication:** Web UI uses PAM authentication with root credentials +5. **Production Use:** Consider additional hardening for production deployments + +## Development Workflow + +Typical development cycle: + +```bash +# 1. Make changes to code/configuration +vim config-svc/app.py + +# 2. Quick reload (no rebuild needed) +make bootc-reload-app + +# 3. Access and test +curl http://localhost:8880 + +# 4. Check logs if issues +make bootc-sh +journalctl -u config-svc -f + +# 5. For major changes, do full rebuild +make bootc-rm bootc-build bootc-run +``` + +## Production Deployment + +1. **Build QCOW2 image:** + ```bash + make build-image + ``` + +2. **Copy image to target system:** + ```bash + scp output/qcow2/disk.qcow2 target-host:/var/lib/libvirt/images/ + ``` + +3. **Create VM or write to disk:** + ```bash + # For VM + virt-install --name jumpstarter \ + --memory 4096 \ + --vcpus 2 \ + --disk path=/var/lib/libvirt/images/disk.qcow2 \ + --import \ + --os-variant fedora39 + + # For bare metal + dd if=output/qcow2/disk.qcow2 of=/dev/sdX bs=4M status=progress + ``` + +4. **First boot:** + - Console login will require password change from default `jumpstarter` + - Access web UI at `http://:8880` and set new password + +## Resources + +### Jumpstarter Documentation +- [Official Installation Guide](https://jumpstarter.dev/main/getting-started/installation/service/index.html) - **Recommended for production** +- [Jumpstarter Project](https://github.com/jumpstarter-dev/jumpstarter) + +### Technology Stack +- [MicroShift Documentation](https://microshift.io/) +- [Bootc Documentation](https://containers.github.io/bootc/) +- [TopoLVM Documentation](https://github.com/topolvm/topolvm) + +## Support + +For issues and questions: +- File issues on the Jumpstarter GitHub repository +- Check container logs: `sudo podman logs jumpstarter-microshift-okd` +- Review systemd journals: `make bootc-sh` then `journalctl -xe` + diff --git a/deploy/microshift-bootc/config-svc/app.py b/deploy/microshift-bootc/config-svc/app.py new file mode 100644 index 0000000..7ad2515 --- /dev/null +++ b/deploy/microshift-bootc/config-svc/app.py @@ -0,0 +1,3110 @@ +#!/usr/bin/env python3 +""" +Jumpstarter Configuration Web UI + +A simple web service for configuring Jumpstarter deployment settings: +- Hostname configuration with smart defaults +- Jumpstarter CR management (baseDomain + image version) +- MicroShift kubeconfig download +""" + +import json +import os +import re +import socket +import subprocess +import sys +import tempfile +from functools import wraps +from io import BytesIO +from pathlib import Path + +from flask import Flask, request, send_file, render_template_string, Response, jsonify + +app = Flask(__name__) + +# MicroShift kubeconfig path +KUBECONFIG_PATH = '/var/lib/microshift/resources/kubeadmin/kubeconfig' + + +def validate_hostname(hostname): + """ + Validate hostname according to RFC 1123 standards. + + Rules: + - Total length <= 253 characters + - Each label 1-63 characters + - Labels match /^[a-z0-9]([a-z0-9-]*[a-z0-9])?$/i (case-insensitive) + - No leading/trailing hyphen in labels + - Reject empty or illegal characters + - Optionally reject trailing dot + + Returns: (is_valid: bool, error_message: str) + """ + if not hostname: + return False, "Hostname cannot be empty" + + # Remove trailing dot if present (optional rejection) + if hostname.endswith('.'): + hostname = hostname.rstrip('.') + + # Check total length + if len(hostname) > 253: + return False, f"Hostname too long: {len(hostname)} characters (maximum 253)" + + # Split into labels + labels = hostname.split('.') + + # Check each label + label_pattern = re.compile(r'^[a-z0-9]([a-z0-9-]*[a-z0-9])?$', re.IGNORECASE) + + for i, label in enumerate(labels): + if not label: + return False, f"Empty label at position {i+1} (consecutive dots not allowed)" + + if len(label) > 63: + return False, f"Label '{label}' too long: {len(label)} characters (maximum 63)" + + if not label_pattern.match(label): + return False, f"Label '{label}' contains invalid characters. Labels must start and end with alphanumeric characters and can contain hyphens in between" + + # Additional check: no leading/trailing hyphen (pattern should catch this, but be explicit) + if label.startswith('-') or label.endswith('-'): + return False, f"Label '{label}' cannot start or end with a hyphen" + + return True, "" + + +def validate_password(password): + """ + Validate password to prevent chpasswd injection and enforce security. + + Rules: + - Reject newline characters ('\n') + - Reject colon characters (':') + - Minimum length: 8 characters + - Maximum length: 128 characters (reasonable limit) + + Returns: (is_valid: bool, error_message: str) + """ + if not password: + return False, "Password cannot be empty" + + # Check for forbidden characters + if '\n' in password: + return False, "Password cannot contain newline characters" + + if ':' in password: + return False, "Password cannot contain colon characters" + + # Check length + if len(password) < 8: + return False, f"Password too short: {len(password)} characters (minimum 8)" + + if len(password) > 128: + return False, f"Password too long: {len(password)} characters (maximum 128)" + + return True, "" + + +def check_auth(username, password): + """Check if a username/password combination is valid using PAM.""" + if username != 'root': + return False + + try: + # Try using PAM authentication first + import pam + p = pam.pam() + return p.authenticate(username, password) + except ImportError: + # Fallback: use subprocess to authenticate via su + try: + result = subprocess.run( + ['su', username, '-c', 'true'], + input=password.encode(), + capture_output=True, + timeout=5 + ) + return result.returncode == 0 + except Exception as e: + print(f"Authentication error: {e}", file=sys.stderr) + return False + + +def is_default_password(): + """Check if the root password is still the default 'jumpstarter'.""" + return check_auth('root', 'jumpstarter') + + +def authenticate(): + """Send a 401 response that enables basic auth.""" + return Response( + 'Authentication required. Please login with root credentials.', + 401, + {'WWW-Authenticate': 'Basic realm="Jumpstarter Configuration"'} + ) + + +def requires_auth(f): + """Decorator to require HTTP Basic Authentication.""" + @wraps(f) + def decorated(*args, **kwargs): + auth = request.authorization + if not auth or not check_auth(auth.username, auth.password): + return authenticate() + return f(*args, **kwargs) + return decorated + + +# HTML template for forced password change +PASSWORD_REQUIRED_TEMPLATE = """ + + + + + Password Change Required - Jumpstarter + + + + +
+ + +
+

Security Setup Required

+ + {% for msg in messages %} +
{{ msg.text }}
+ {% endfor %} + +
+

⚠️ Default Password Detected

+

You are using the default password. For security reasons, you must change the root password before accessing the configuration interface.

+
+ +
+
+
+ + +
Minimum 8 characters (required to change from default password)
+
+
+ + +
Re-enter your new password
+
+
+ + +
One SSH public key per line. Leave empty to clear existing keys.
+
+ +
+ +
+
+ +""" + +# HTML template for the main page +HTML_TEMPLATE = """ + + + + + Jumpstarter Configuration + + + + +
+ + + + +
+ {% for msg in messages %} +
{{ msg.text }}
+ {% endfor %} + +
+

Jumpstarter Deployment Configuration

+
+
+
+ + +
The base domain for Jumpstarter routes
+
+
+ + +
The Jumpstarter controller container image to use
+
+
+ + +
When to pull the container image
+
+ +
+ +

Hostname Configuration

+
+
+ + +
Set the system hostname
+
+ +
+
+ +
+

Change Root Password

+
+
+
+ + +
Leave empty to only update SSH keys. Minimum 8 characters if provided.
+
+
+ + +
Re-enter your new password (required if password is provided)
+
+
+ + +
One SSH public key per line. Leave empty to clear existing keys.
+
+ +
+
+ +
+

BootC Operations

+
+
+ + +
Container image reference to switch to (e.g., quay.io/jumpstarter-dev/microshift/bootc:latest)
+
+
+ + + +
+ +

System Information

+
+
Loading system statistics...
+
+ +

BootC Status

+
+
Loading BootC status...
+
+ +

Kernel Log

+
+
Loading kernel log...
+
+
+ +
+
+

Kubeconfig

+

+ Download the MicroShift kubeconfig file to access the Kubernetes cluster from your local machine. +

+ Download Kubeconfig +
+ +
+

Routes

+
+ +
+ + + + + + + + + + + + + + + + + + +
NamespaceNameHostServicePortTLSAdmittedAge
Loading routes...
+
+
+ +
+

Pod Status

+
+ +
+ + + + + + + + + + + + + + + + + + +
NamespaceNameReadyStatusRestartsAgeNodeActions
Loading pods...
+
+
+
+
+
+ + + +""" + + +@app.route('/static/styles.css') +def serve_css(): + """Serve the consolidated CSS stylesheet.""" + css = """ + * { + margin: 0; + padding: 0; + box-sizing: border-box; + } + html { + scroll-behavior: smooth; + } + body { + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif; + background: linear-gradient(135deg, #4c4c4c 0%, #1a1a1a 100%); + min-height: 100vh; + display: flex; + justify-content: center; + align-items: center; + padding: 20px; + } + .container { + background: white; + border-radius: 12px; + box-shadow: 0 10px 60px rgba(0,0,0,0.5), 0 0 0 1px rgba(255, 193, 7, 0.1); + max-width: 1000px; + width: 100%; + padding: 40px; + } + .banner { + margin: -40px -40px 30px -40px; + padding: 25px 40px; + background: linear-gradient(135deg, #757575 0%, #616161 100%); + border-radius: 12px 12px 0 0; + text-align: center; + } + .banner-text { + color: white; + font-size: 14px; + margin-bottom: 20px; + font-weight: 500; + } + .logos { + display: flex; + justify-content: center; + align-items: center; + gap: 40px; + flex-wrap: wrap; + } + .logo-link { + display: inline-block; + transition: opacity 0.3s; + } + .logo-link:hover { + opacity: 0.9; + } + .logo-link img { + height: 45px; + width: auto; + } + .microshift-logo { + height: 40px !important; + filter: brightness(0) invert(1); + } + .jumpstarter-logo { + height: 40px !important; + } + .nav-bar { + display: flex; + gap: 0; + margin: 0 -40px 30px -40px; + border-bottom: 1px solid #e0e0e0; + background: #fafafa; + } + .nav-link { + flex: 1; + text-align: center; + padding: 15px 20px; + text-decoration: none; + color: #666; + font-size: 14px; + font-weight: 500; + transition: all 0.3s; + border-bottom: 3px solid transparent; + } + .nav-link:hover { + background: #f5f5f5; + color: #333; + border-bottom-color: #ffc107; + } + .nav-link.active { + color: #000; + border-bottom-color: #ffc107; + background: white; + } + .content-area { + padding: 0 40px 40px 40px; + margin: 0 -40px -40px -40px; + } + h2 { + color: #333; + font-size: 20px; + margin-bottom: 15px; + } + .section { + display: none; + padding: 20px 0; + animation: fadeIn 0.3s ease-in; + } + @keyframes fadeIn { + from { + opacity: 0; + transform: translateY(10px); + } + to { + opacity: 1; + transform: translateY(0); + } + } + .info { + background: #f8f9fa; + padding: 12px 16px; + border-radius: 6px; + margin-bottom: 15px; + font-size: 14px; + color: #555; + } + .info strong { + color: #333; + } + .warning-box { + background: #fff3cd; + border: 1px solid #ffc107; + border-radius: 6px; + padding: 16px; + margin-bottom: 30px; + } + .warning-box h2 { + color: #856404; + font-size: 18px; + margin-bottom: 10px; + } + .warning-box p { + color: #856404; + font-size: 14px; + line-height: 1.5; + } + .form-group { + margin-bottom: 15px; + } + label { + display: block; + margin-bottom: 6px; + color: #555; + font-size: 14px; + font-weight: 500; + } + input[type="text"], + input[type="password"], + textarea { + width: 100%; + padding: 10px 12px; + border: 1px solid #ddd; + border-radius: 6px; + font-size: 14px; + transition: border-color 0.3s, opacity 0.3s; + font-family: inherit; + } + textarea { + font-family: monospace; + resize: vertical; + } + input[type="text"]:focus, + input[type="password"]:focus, + textarea:focus { + outline: none; + border-color: #ffc107; + box-shadow: 0 0 0 2px rgba(255, 193, 7, 0.2); + } + input[type="text"]:disabled, + input[type="password"]:disabled, + textarea:disabled { + background-color: #f5f5f5; + cursor: not-allowed; + opacity: 0.6; + } + select { + width: 100%; + padding: 10px 12px; + border: 1px solid #ddd; + border-radius: 6px; + font-size: 14px; + background-color: white; + cursor: pointer; + transition: border-color 0.3s; + } + select:focus { + outline: none; + border-color: #ffc107; + box-shadow: 0 0 0 2px rgba(255, 193, 7, 0.2); + } + .hint { + font-size: 12px; + color: #888; + margin-top: 4px; + } + button { + background: #ffc107; + color: #000; + border: none; + padding: 12px 24px; + border-radius: 6px; + font-size: 14px; + font-weight: 600; + cursor: pointer; + transition: background 0.3s, opacity 0.3s; + } + button:hover { + background: #ffb300; + } + button:disabled { + background: #666; + color: #999; + cursor: not-allowed; + opacity: 0.6; + } + button:disabled:hover { + background: #666; + } + button[type="submit"] { + width: 100%; + } + .download-btn { + background: #ffc107; + display: inline-block; + text-decoration: none; + color: #000; + padding: 12px 24px; + border-radius: 6px; + font-size: 14px; + font-weight: 600; + transition: background 0.3s; + } + .download-btn:hover { + background: #ffb300; + } + .message { + padding: 12px 16px; + border-radius: 6px; + margin-bottom: 20px; + font-size: 14px; + } + .message.success { + background: #d4edda; + color: #155724; + border: 1px solid #c3e6cb; + } + .message.error { + background: #f8d7da; + color: #721c24; + border: 1px solid #f5c6cb; + } + .message.info { + background: #d1ecf1; + color: #0c5460; + border: 1px solid #bee5eb; + } + /* MicroShift page specific styles */ + .status-badge { + display: inline-block; + padding: 4px 8px; + border-radius: 4px; + font-size: 11px; + font-weight: 600; + text-transform: uppercase; + } + .status-running { + background: #d4edda; + color: #155724; + } + .status-pending { + background: #fff3cd; + color: #856404; + } + .status-failed { + background: #f8d7da; + color: #721c24; + } + .status-succeeded { + background: #d1ecf1; + color: #0c5460; + } + .status-crashloopbackoff { + background: #f8d7da; + color: #721c24; + } + .status-terminating { + background: #ffeaa7; + color: #856404; + } + .status-unknown { + background: #e2e3e5; + color: #383d41; + } + table { + width: 100%; + border-collapse: collapse; + margin-top: 20px; + font-size: 13px; + } + th { + background: #f8f9fa; + padding: 12px 8px; + text-align: left; + font-weight: 600; + color: #333; + border-bottom: 2px solid #dee2e6; + position: sticky; + top: 0; + z-index: 10; + } + td { + padding: 10px 8px; + border-bottom: 1px solid #eee; + color: #555; + } + tr:hover { + background: #f8f9fa; + } + .table-wrapper { + overflow-x: auto; + max-height: 70vh; + overflow-y: auto; + } + .loading { + text-align: center; + padding: 40px; + color: #666; + } + .error { + background: #f8d7da; + color: #721c24; + padding: 12px 16px; + border-radius: 6px; + margin-bottom: 20px; + } + .pod-count { + color: #666; + font-size: 14px; + margin-bottom: 10px; + } + .microshift-section { + margin-bottom: 30px; + padding-bottom: 30px; + border-bottom: 1px solid #eee; + } + .microshift-section:last-child { + border-bottom: none; + } + .action-icon { + text-decoration: none; + font-size: 18px; + padding: 4px 6px; + margin: 0 2px; + border-radius: 4px; + transition: all 0.3s; + display: inline-block; + cursor: pointer; + } + .action-icon:hover { + background: #fff3e0; + transform: scale(1.2); + } + """ + return Response(css, mimetype='text/css') + + +@app.route('/logout') +def logout(): + """Logout endpoint that forces re-authentication.""" + return Response( + 'Logged out. Please close this dialog to log in again.', + 401, + {'WWW-Authenticate': 'Basic realm="Jumpstarter Configuration"'} + ) + + +@app.route('/') +@requires_auth +def index(): + """Serve the main configuration page.""" + current_hostname = get_current_hostname() + jumpstarter_config = get_jumpstarter_config() + password_required = is_default_password() + ssh_keys = get_ssh_authorized_keys() + + # Force password change if still using default + if password_required: + return render_template_string( + PASSWORD_REQUIRED_TEMPLATE, + messages=[], + current_hostname=current_hostname, + ssh_keys=ssh_keys + ) + + return render_template_string( + HTML_TEMPLATE, + messages=[], + current_hostname=current_hostname, + jumpstarter_config=jumpstarter_config, + password_required=password_required, + ssh_keys=ssh_keys + ) + + +@app.route('/api/change-password', methods=['POST']) +@requires_auth +def api_change_password(): + """API endpoint to handle password change request (returns JSON).""" + data = request.get_json() if request.is_json else {} + new_password = data.get('newPassword', request.form.get('newPassword', '')).strip() + confirm_password = data.get('confirmPassword', request.form.get('confirmPassword', '')).strip() + ssh_keys_value = data.get('sshKeys', request.form.get('sshKeys', '')).strip() + + was_default = is_default_password() + existing_ssh_keys = get_ssh_authorized_keys() + + messages = [] + password_updated = False + ssh_updated = False + requires_redirect = False + + # If password is provided, validate and set it + if new_password: + # Validate password format and security + password_valid, password_error = validate_password(new_password) + if not password_valid: + messages.append({'type': 'error', 'text': password_error}) + elif new_password != confirm_password: + messages.append({'type': 'error', 'text': 'Passwords do not match'}) + else: + password_success, password_message = set_root_password(new_password) + if not password_success: + messages.append({'type': 'error', 'text': f'Failed to set password: {password_message}'}) + else: + password_updated = True + messages.append({'type': 'success', 'text': 'Password changed successfully!'}) + if was_default: + # Update login banner on first password change + update_login_banner() + requires_redirect = True + elif was_default: + # If we're on the default password screen and no password provided, require it + messages.append({'type': 'error', 'text': 'Password is required to change from default password'}) + + # Process SSH keys (always process if form was submitted) + ssh_success, ssh_message = set_ssh_authorized_keys(ssh_keys_value) + if ssh_success: + ssh_updated = True + if ssh_keys_value: + messages.append({'type': 'success', 'text': ssh_message}) + else: + # Only show message if keys were cleared and there were keys before + if existing_ssh_keys: + messages.append({'type': 'success', 'text': ssh_message}) + else: + messages.append({'type': 'error', 'text': f'Failed to set SSH keys: {ssh_message}'}) + + has_errors = any(msg.get('type') == 'error' for msg in messages) + success = not has_errors and (password_updated or ssh_updated) + + return jsonify({ + 'success': success, + 'messages': messages, + 'password_updated': password_updated, + 'ssh_updated': ssh_updated, + 'requires_redirect': requires_redirect, + 'ssh_keys': get_ssh_authorized_keys() if ssh_updated else existing_ssh_keys + }) + + +@app.route('/configure-hostname', methods=['POST']) +@requires_auth +def configure_hostname(): + """Handle hostname configuration request.""" + hostname = request.form.get('hostname', '').strip() + + current_hostname = get_current_hostname() + jumpstarter_config = get_jumpstarter_config() + password_required = is_default_password() + + messages = [] + + if not hostname: + messages.append({'type': 'error', 'text': 'Hostname is required'}) + else: + # Validate hostname format + hostname_valid, hostname_error = validate_hostname(hostname) + if not hostname_valid: + messages.append({'type': 'error', 'text': f'Invalid hostname: {hostname_error}'}) + else: + hostname_success, hostname_message = set_hostname(hostname) + if not hostname_success: + messages.append({'type': 'error', 'text': f'Failed to update hostname: {hostname_message}'}) + else: + current_hostname = hostname + messages.append({'type': 'success', 'text': f'Hostname updated successfully to: {hostname}'}) + + # Update login banner with the new hostname + banner_success, banner_message = update_login_banner() + if not banner_success: + print(f"Warning: Failed to update login banner: {banner_message}", file=sys.stderr) + + return render_template_string( + HTML_TEMPLATE, + messages=messages, + current_hostname=current_hostname, + jumpstarter_config=jumpstarter_config, + password_required=password_required + ) + + +@app.route('/api/configure-jumpstarter', methods=['POST']) +@requires_auth +def api_configure_jumpstarter(): + """API endpoint to handle Jumpstarter CR configuration request (returns JSON).""" + data = request.get_json() if request.is_json else {} + base_domain = data.get('baseDomain', request.form.get('baseDomain', '')).strip() + image = data.get('image', request.form.get('image', '')).strip() + image_pull_policy = data.get('imagePullPolicy', request.form.get('imagePullPolicy', 'IfNotPresent')).strip() + + messages = [] + success = False + + if not base_domain: + messages.append({'type': 'error', 'text': 'Base domain is required'}) + else: + # Validate base domain format (same as hostname validation) + domain_valid, domain_error = validate_hostname(base_domain) + if not domain_valid: + messages.append({'type': 'error', 'text': f'Invalid base domain: {domain_error}'}) + elif not image: + messages.append({'type': 'error', 'text': 'Controller image is required'}) + else: + # Apply the Jumpstarter CR + cr_success, cr_message = apply_jumpstarter_cr(base_domain, image, image_pull_policy) + + if cr_success: + msg = f'Jumpstarter configuration applied successfully! Base Domain: {base_domain}, Image: {image}' + messages.append({'type': 'success', 'text': msg}) + success = True + else: + messages.append({'type': 'error', 'text': f'Failed to apply Jumpstarter CR: {cr_message}'}) + + return jsonify({ + 'success': success, + 'messages': messages, + 'config': { + 'base_domain': base_domain, + 'image': image, + 'image_pull_policy': image_pull_policy + } if success else None + }) + + +@app.route('/configure-jumpstarter', methods=['POST']) +@requires_auth +def configure_jumpstarter(): + """Handle Jumpstarter CR configuration request (legacy HTML form submission).""" + base_domain = request.form.get('baseDomain', '').strip() + image = request.form.get('image', '').strip() + image_pull_policy = request.form.get('imagePullPolicy', 'IfNotPresent').strip() + + current_hostname = get_current_hostname() + jumpstarter_config = get_jumpstarter_config() + password_required = is_default_password() + + messages = [] + + if not base_domain: + messages.append({'type': 'error', 'text': 'Base domain is required'}) + else: + # Validate base domain format (same as hostname validation) + domain_valid, domain_error = validate_hostname(base_domain) + if not domain_valid: + messages.append({'type': 'error', 'text': f'Invalid base domain: {domain_error}'}) + elif not image: + messages.append({'type': 'error', 'text': 'Controller image is required'}) + else: + # Apply the Jumpstarter CR + cr_success, cr_message = apply_jumpstarter_cr(base_domain, image, image_pull_policy) + + if cr_success: + msg = f'Jumpstarter configuration applied successfully! Base Domain: {base_domain}, Image: {image}' + messages.append({'type': 'success', 'text': msg}) + # Update config to show what was just applied + jumpstarter_config = { + 'base_domain': base_domain, + 'image': image, + 'image_pull_policy': image_pull_policy + } + else: + messages.append({'type': 'error', 'text': f'Failed to apply Jumpstarter CR: {cr_message}'}) + + return render_template_string( + HTML_TEMPLATE, + messages=messages, + current_hostname=current_hostname, + jumpstarter_config=jumpstarter_config, + password_required=password_required + ) + + +def get_lvm_pv_info(): + """ + Parse pvscan output to get LVM physical volume information. + Returns dict with PV info or None if not available. + """ + try: + result = subprocess.run(['pvscan'], capture_output=True, text=True, timeout=5) + if result.returncode != 0: + return None + + # Parse output like: "PV /dev/sda3 VG myvg1 lvm2 [62.41 GiB / 52.41 GiB free]" + # or: "Total: 1 [62.41 GiB] / in use: 1 [62.41 GiB] / in no VG: 0 [0 ]" + output = result.stdout.strip() + if not output: + return None + + lines = output.split('\n') + + # Look for PV line + pv_device = None + vg_name = None + total_size = None + free_size = None + + for line in lines: + line = line.strip() + # Match: "PV /dev/sda3 VG myvg1 lvm2 [62.41 GiB / 52.41 GiB free]" + if line.startswith('PV '): + parts = line.split() + if len(parts) >= 2: + pv_device = parts[1] + # Find VG name + for i, part in enumerate(parts): + if part == 'VG' and i + 1 < len(parts): + vg_name = parts[i + 1] + break + # Find size info in brackets + bracket_match = re.search(r'\[([^\]]+)\]', line) + if bracket_match: + size_info = bracket_match.group(1) + # Parse "62.41 GiB / 52.41 GiB free" + size_parts = size_info.split('/') + if len(size_parts) >= 1: + total_size = size_parts[0].strip() + if len(size_parts) >= 2: + free_match = re.search(r'([\d.]+)\s*([KMGT]i?B)', size_parts[1]) + if free_match: + free_size = free_match.group(1) + ' ' + free_match.group(2) + + if not pv_device or not total_size: + return None + + # Calculate used space and percentage + # Parse sizes to calculate percentage + def parse_size(size_str): + """Parse size string like '62.41 GiB' to bytes.""" + match = re.match(r'([\d.]+)\s*([KMGT]i?)B?', size_str, re.IGNORECASE) + if not match: + return 0 + value = float(match.group(1)) + unit = match.group(2).upper() + multipliers = {'K': 1024, 'M': 1024**2, 'G': 1024**3, 'T': 1024**4} + return int(value * multipliers.get(unit, 1)) + + total_bytes = parse_size(total_size) + free_bytes = parse_size(free_size) if free_size else 0 + used_bytes = total_bytes - free_bytes + percent = int((used_bytes / total_bytes * 100)) if total_bytes > 0 else 0 + + # Format used size + def format_size(bytes_val): + """Format bytes to human-readable size.""" + for unit, multiplier in [('TiB', 1024**4), ('GiB', 1024**3), ('MiB', 1024**2), ('KiB', 1024)]: + if bytes_val >= multiplier: + return f"{bytes_val / multiplier:.2f} {unit}" + return f"{bytes_val} B" + + used_size = format_size(used_bytes) + + return { + 'pv_device': pv_device, + 'vg_name': vg_name or 'N/A', + 'total': total_size, + 'free': free_size or '0 B', + 'used': used_size, + 'percent': percent + } + except Exception as e: + print(f"Error parsing LVM PV info: {e}", file=sys.stderr) + return None + + +def get_root_filesystem(): + """ + Detect the real root filesystem mount point. + On bootc systems, /sysroot is the real root filesystem. + Otherwise, find the largest real block device filesystem. + """ + # Check if /sysroot exists and is a mount point (bootc systems) + try: + result = subprocess.run(['findmnt', '-n', '-o', 'TARGET', '/sysroot'], + capture_output=True, text=True, timeout=5) + if result.returncode == 0 and result.stdout.strip(): + return '/sysroot' + except Exception: + pass + + # Fallback: parse df output to find the real root filesystem + try: + df_result = subprocess.run(['df', '-h'], capture_output=True, text=True, timeout=5) + if df_result.returncode != 0: + return '/' # Fallback to root + + lines = df_result.stdout.strip().split('\n') + if len(lines) < 2: + return '/' # Fallback to root + + # Virtual filesystem types to skip + virtual_fs = ('tmpfs', 'overlay', 'composefs', 'devtmpfs', 'proc', 'sysfs', + 'devpts', 'cgroup', 'pstore', 'bpf', 'tracefs', 'debugfs', + 'configfs', 'fusectl', 'mqueue', 'hugetlbfs', 'efivarfs', 'ramfs', + 'nsfs', 'shm', 'vfat') + + # Boot partitions to skip + boot_paths = ('/boot', '/boot/efi') + + best_fs = None + best_size = 0 + + for line in lines[1:]: # Skip header + parts = line.split() + if len(parts) < 6: + continue + + filesystem = parts[0] + mount_point = parts[5] + size_str = parts[1] + + # Skip virtual filesystems + fs_type = filesystem.split('/')[-1] if '/' in filesystem else filesystem + if any(vfs in fs_type.lower() for vfs in virtual_fs): + continue + + # Skip boot partitions + if mount_point in boot_paths: + continue + + # Skip if not a block device (doesn't start with /dev) + if not filesystem.startswith('/dev'): + continue + + # Prefer LVM root volumes + if '/mapper/' in filesystem and 'root' in filesystem.lower(): + return mount_point + + # Calculate size for comparison (convert to bytes for comparison) + try: + # Parse size like "10G", "500M", etc. + size_val = float(size_str[:-1]) + size_unit = size_str[-1].upper() + if size_unit == 'G': + size_bytes = size_val * 1024 * 1024 * 1024 + elif size_unit == 'M': + size_bytes = size_val * 1024 * 1024 + elif size_unit == 'K': + size_bytes = size_val * 1024 + else: + size_bytes = size_val + + if size_bytes > best_size: + best_size = size_bytes + best_fs = mount_point + except (ValueError, IndexError): + continue + + if best_fs: + return best_fs + + except Exception: + pass + + # Final fallback + return '/' + + +@app.route('/api/system-stats') +@requires_auth +def get_system_stats(): + """API endpoint to get system statistics.""" + try: + stats = {} + + # Disk usage - use detected root filesystem + root_fs = get_root_filesystem() + disk_result = subprocess.run(['df', '-h', root_fs], capture_output=True, text=True) + disk_lines = disk_result.stdout.strip().split('\n') + if len(disk_lines) > 1: + disk_parts = disk_lines[1].split() + stats['disk'] = { + 'total': disk_parts[1], + 'used': disk_parts[2], + 'available': disk_parts[3], + 'percent': int(disk_parts[4].rstrip('%')) + } + else: + stats['disk'] = {'total': 'N/A', 'used': 'N/A', 'available': 'N/A', 'percent': 0} + + # Memory usage + mem_result = subprocess.run(['free', '-h'], capture_output=True, text=True) + mem_lines = mem_result.stdout.strip().split('\n') + if len(mem_lines) > 1: + mem_parts = mem_lines[1].split() + # Parse percentage + mem_total_result = subprocess.run(['free'], capture_output=True, text=True) + mem_total_lines = mem_total_result.stdout.strip().split('\n')[1].split() + mem_percent = int((int(mem_total_lines[2]) / int(mem_total_lines[1])) * 100) + + stats['memory'] = { + 'total': mem_parts[1], + 'used': mem_parts[2], + 'available': mem_parts[6] if len(mem_parts) > 6 else mem_parts[3], + 'percent': mem_percent + } + else: + stats['memory'] = {'total': 'N/A', 'used': 'N/A', 'available': 'N/A', 'percent': 0} + + # CPU info + cpu_count_result = subprocess.run(['nproc'], capture_output=True, text=True) + cpu_cores = int(cpu_count_result.stdout.strip()) if cpu_count_result.returncode == 0 else 0 + + # CPU usage - get from top + top_result = subprocess.run(['top', '-bn1'], capture_output=True, text=True) + cpu_usage = 0 + for line in top_result.stdout.split('\n'): + if 'Cpu(s)' in line or '%Cpu' in line: + # Parse line like "%Cpu(s): 2.0 us, 1.0 sy, 0.0 ni, 97.0 id,..." + parts = line.split(',') + for part in parts: + if 'id' in part: + idle = float(part.split()[0]) + cpu_usage = round(100 - idle, 1) + break + break + + stats['cpu'] = { + 'cores': cpu_cores, + 'usage': cpu_usage + } + + # System info + kernel_result = subprocess.run(['uname', '-r'], capture_output=True, text=True) + kernel = kernel_result.stdout.strip() + + hostname = get_current_hostname() + + # Uptime + uptime_result = subprocess.run(['uptime', '-p'], capture_output=True, text=True) + uptime = uptime_result.stdout.strip().replace('up ', '') + + # Load average + loadavg_result = subprocess.run(['cat', '/proc/loadavg'], capture_output=True, text=True) + loadavg_parts = loadavg_result.stdout.strip().split() + + stats['system'] = { + 'kernel': kernel, + 'hostname': hostname, + 'uptime': uptime, + 'load_1': loadavg_parts[0] if len(loadavg_parts) > 0 else '0', + 'load_5': loadavg_parts[1] if len(loadavg_parts) > 1 else '0', + 'load_15': loadavg_parts[2] if len(loadavg_parts) > 2 else '0' + } + + # Network interfaces + ip_result = subprocess.run(['ip', '-4', 'addr', 'show'], capture_output=True, text=True) + interfaces = [] + current_iface = None + # Prefixes to skip (container/virtual interfaces) + skip_prefixes = ('veth', 'docker', 'br-', 'cni', 'flannel', 'cali') + + for line in ip_result.stdout.split('\n'): + line = line.strip() + if line and line[0].isdigit() and ':' in line: + # Interface line + parts = line.split(':') + if len(parts) >= 2: + iface_name = parts[1].strip().split('@')[0] + # Skip virtual/container interfaces + if not iface_name.startswith(skip_prefixes): + current_iface = iface_name + else: + current_iface = None + elif 'inet ' in line and current_iface: + # IP line + ip_addr = line.split()[1].split('/')[0] + if ip_addr != '127.0.0.1': # Skip localhost + interfaces.append({ + 'name': current_iface, + 'ip': ip_addr + }) + current_iface = None + + stats['network'] = { + 'interfaces': interfaces + } + + # LVM Physical Volume information + lvm_info = get_lvm_pv_info() + if lvm_info: + stats['lvm'] = lvm_info + + return jsonify(stats) + + except Exception as e: + return jsonify({'error': f'Error gathering system statistics: {str(e)}'}), 500 + + +@app.route('/api/bootc-status') +@requires_auth +def get_bootc_status(): + """API endpoint to get BootC status and upgrade check information.""" + try: + status_output = '' + upgrade_check_output = '' + + # Get bootc status + try: + status_result = subprocess.run( + ['bootc', 'status'], + capture_output=True, + text=True, + timeout=10 + ) + if status_result.returncode == 0: + status_output = status_result.stdout.strip() + else: + status_output = f"Error: {status_result.stderr.strip()}" + except FileNotFoundError: + status_output = "bootc command not found" + except subprocess.TimeoutExpired: + status_output = "Command timed out" + except Exception as e: + status_output = f"Error: {str(e)}" + + # Get upgrade check + try: + upgrade_result = subprocess.run( + ['bootc', 'upgrade', '--check'], + capture_output=True, + text=True, + timeout=30 + ) + if upgrade_result.returncode == 0: + upgrade_check_output = upgrade_result.stdout.strip() + else: + upgrade_check_output = f"Error: {upgrade_result.stderr.strip()}" + except FileNotFoundError: + upgrade_check_output = "bootc command not found" + except subprocess.TimeoutExpired: + upgrade_check_output = "Command timed out" + except Exception as e: + upgrade_check_output = f"Error: {str(e)}" + + return jsonify({ + 'status': status_output, + 'upgrade_check': upgrade_check_output + }) + + except Exception as e: + return jsonify({'error': f'Error getting BootC status: {str(e)}'}), 500 + + +@app.route('/api/bootc-upgrade-check', methods=['POST']) +@requires_auth +def bootc_upgrade_check(): + """API endpoint to check for BootC upgrades.""" + try: + result = subprocess.run( + ['bootc', 'upgrade', '--check'], + capture_output=True, + text=True, + timeout=30 + ) + + if result.returncode == 0: + return jsonify({ + 'success': True, + 'output': result.stdout.strip(), + 'message': 'Upgrade check completed' + }) + else: + return jsonify({ + 'success': False, + 'error': result.stderr.strip() or 'Upgrade check failed' + }), 400 + + except FileNotFoundError: + return jsonify({'success': False, 'error': 'bootc command not found'}), 404 + except subprocess.TimeoutExpired: + return jsonify({'success': False, 'error': 'Command timed out'}), 500 + except Exception as e: + return jsonify({'success': False, 'error': f'Error: {str(e)}'}), 500 + + +@app.route('/api/bootc-upgrade', methods=['POST']) +@requires_auth +def bootc_upgrade(): + """API endpoint to apply BootC upgrade.""" + try: + # Run bootc upgrade (this may take a while) + result = subprocess.run( + ['bootc', 'upgrade'], + capture_output=True, + text=True, + timeout=600 # 10 minutes timeout for upgrade + ) + + if result.returncode == 0: + return jsonify({ + 'success': True, + 'output': result.stdout.strip(), + 'message': 'Upgrade completed successfully. Reboot may be required.' + }) + else: + return jsonify({ + 'success': False, + 'error': result.stderr.strip() or 'Upgrade failed' + }), 400 + + except FileNotFoundError: + return jsonify({'success': False, 'error': 'bootc command not found'}), 404 + except subprocess.TimeoutExpired: + return jsonify({'success': False, 'error': 'Command timed out (upgrade may still be in progress)'}), 500 + except Exception as e: + return jsonify({'success': False, 'error': f'Error: {str(e)}'}), 500 + + +@app.route('/api/bootc-switch', methods=['POST']) +@requires_auth +def bootc_switch(): + """API endpoint to switch BootC to a different image.""" + try: + data = request.get_json() if request.is_json else {} + image = data.get('image', '').strip() + + if not image: + return jsonify({'success': False, 'error': 'Image reference is required'}), 400 + + # Validate image format (basic check) + if not (image.startswith('quay.io/') or image.startswith('docker.io/') or + ':' in image or '/' in image): + return jsonify({'success': False, 'error': 'Invalid image reference format'}), 400 + + # Run bootc switch (this may take a while) + result = subprocess.run( + ['bootc', 'switch', image], + capture_output=True, + text=True, + timeout=600 # 10 minutes timeout for switch + ) + + if result.returncode == 0: + return jsonify({ + 'success': True, + 'output': result.stdout.strip(), + 'message': f'Switched to {image} successfully. Reboot may be required.' + }) + else: + return jsonify({ + 'success': False, + 'error': result.stderr.strip() or 'Switch failed' + }), 400 + + except FileNotFoundError: + return jsonify({'success': False, 'error': 'bootc command not found'}), 404 + except subprocess.TimeoutExpired: + return jsonify({'success': False, 'error': 'Command timed out (switch may still be in progress)'}), 500 + except Exception as e: + return jsonify({'success': False, 'error': f'Error: {str(e)}'}), 500 + + +@app.route('/api/dmesg') +@requires_auth +def get_dmesg(): + """API endpoint to get kernel log (dmesg).""" + try: + # Run dmesg command to get kernel log + result = subprocess.run( + ['dmesg'], + capture_output=True, + text=True, + timeout=10 + ) + + if result.returncode != 0: + return jsonify({'error': f'Failed to get dmesg: {result.stderr.strip()}'}), 500 + + # Return the log (limit to last 10000 lines to avoid huge responses) + log_lines = result.stdout.strip().split('\n') + if len(log_lines) > 10000: + log_lines = log_lines[-10000:] + + return jsonify({ + 'log': '\n'.join(log_lines), + 'line_count': len(log_lines) + }) + + except subprocess.TimeoutExpired: + return jsonify({'error': 'Command timed out'}), 500 + except Exception as e: + return jsonify({'error': f'Error getting dmesg: {str(e)}'}), 500 + + +@app.route('/api/operator-status') +@requires_auth +def get_operator_status(): + """API endpoint to check if the Jumpstarter operator is ready.""" + try: + # Path to MicroShift kubeconfig + kubeconfig_path = KUBECONFIG_PATH + + # Check if kubeconfig exists + if not os.path.exists(kubeconfig_path): + return jsonify({'ready': False, 'message': 'MicroShift kubeconfig not found. Waiting for MicroShift to start...'}), 200 + + # Check if jumpstarter-operator pod is running and ready + result = subprocess.run( + ['oc', '--kubeconfig', kubeconfig_path, 'get', 'pods', '-n', 'jumpstarter-operator-system', '-o', 'json'], + capture_output=True, + text=True, + check=True, + timeout=10 + ) + + pods_data = json.loads(result.stdout) + + # Look for the operator controller manager pod + for pod in pods_data.get('items', []): + pod_name = pod.get('metadata', {}).get('name', '') + if 'jumpstarter-operator-controller-manager' in pod_name: + # Check if pod is running and ready + status = pod.get('status', {}) + phase = status.get('phase', '') + container_statuses = status.get('containerStatuses', []) + + if phase == 'Running' and container_statuses: + all_ready = all(c.get('ready', False) for c in container_statuses) + if all_ready: + return jsonify({'ready': True, 'message': 'Jumpstarter operator is ready'}), 200 + else: + return jsonify({'ready': False, 'message': 'Jumpstarter operator is starting...'}), 200 + else: + return jsonify({'ready': False, 'message': f'Jumpstarter operator status: {phase}'}), 200 + + # Operator pod not found + return jsonify({'ready': False, 'message': 'Waiting for Jumpstarter operator to deploy...'}), 200 + + except subprocess.CalledProcessError as e: + # Namespace might not exist yet + return jsonify({'ready': False, 'message': 'Waiting for Jumpstarter operator to deploy...'}), 200 + except subprocess.TimeoutExpired: + return jsonify({'ready': False, 'message': 'Timeout checking operator status'}), 200 + except Exception as e: + return jsonify({'ready': False, 'message': 'Checking operator status...'}), 200 + + +@app.route('/api/pods') +@requires_auth +def get_pods(): + """API endpoint to get pod status as JSON.""" + try: + # Path to MicroShift kubeconfig + kubeconfig_path = KUBECONFIG_PATH + + # Check if kubeconfig exists + if not os.path.exists(kubeconfig_path): + return jsonify({'error': 'MicroShift kubeconfig not found. Is MicroShift running?'}), 503 + + # Run oc get pods -A -o json with explicit kubeconfig + result = subprocess.run( + ['oc', '--kubeconfig', kubeconfig_path, 'get', 'pods', '-A', '-o', 'json'], + capture_output=True, + text=True, + check=True, + timeout=10 + ) + + pods_data = json.loads(result.stdout) + pods_list = [] + + for pod in pods_data.get('items', []): + metadata = pod.get('metadata', {}) + spec = pod.get('spec', {}) + status = pod.get('status', {}) + + # Calculate ready containers + container_statuses = status.get('containerStatuses', []) + ready_count = sum(1 for c in container_statuses if c.get('ready', False)) + total_count = len(container_statuses) + + # Calculate total restarts + restarts = sum(c.get('restartCount', 0) for c in container_statuses) + + # Check if pod is terminating (has deletionTimestamp) + if metadata.get('deletionTimestamp'): + phase = 'Terminating' + else: + # Determine pod phase/status + phase = status.get('phase', 'Unknown') + + # Check for more specific status from container states + for container in container_statuses: + state = container.get('state', {}) + if 'waiting' in state: + reason = state['waiting'].get('reason', '') + if reason: + phase = reason + break + + # Calculate age + creation_time = metadata.get('creationTimestamp', '') + age = calculate_age(creation_time) + + pods_list.append({ + 'namespace': metadata.get('namespace', 'default'), + 'name': metadata.get('name', 'unknown'), + 'ready': f"{ready_count}/{total_count}", + 'status': phase, + 'restarts': restarts, + 'age': age, + 'node': spec.get('nodeName', 'N/A') + }) + + return jsonify({'pods': pods_list}) + + except subprocess.CalledProcessError as e: + error_msg = e.stderr.strip() if e.stderr else str(e) + return jsonify({'error': f'Failed to get pods: {error_msg}'}), 500 + except subprocess.TimeoutExpired: + return jsonify({'error': 'Command timed out'}), 500 + except Exception as e: + return jsonify({'error': f'Error: {str(e)}'}), 500 + + +@app.route('/api/routes') +@requires_auth +def get_routes(): + """API endpoint to get OpenShift routes as JSON.""" + try: + # Path to MicroShift kubeconfig + kubeconfig_path = KUBECONFIG_PATH + + # Check if kubeconfig exists + if not os.path.exists(kubeconfig_path): + return jsonify({'error': 'MicroShift kubeconfig not found. Is MicroShift running?'}), 503 + + # Run oc get routes -A -o json with explicit kubeconfig + result = subprocess.run( + ['oc', '--kubeconfig', kubeconfig_path, 'get', 'routes', '-A', '-o', 'json'], + capture_output=True, + text=True, + check=True, + timeout=10 + ) + + routes_data = json.loads(result.stdout) + routes_list = [] + + for route in routes_data.get('items', []): + metadata = route.get('metadata', {}) + spec = route.get('spec', {}) + status = route.get('status', {}) + + # Get route host + host = spec.get('host', 'N/A') + + # Get target service and port + to = spec.get('to', {}) + service_name = to.get('name', 'N/A') + + port = spec.get('port', {}) + target_port = port.get('targetPort', 'N/A') if port else 'N/A' + + # Get TLS configuration + tls = spec.get('tls', {}) + tls_termination = tls.get('termination', 'None') if tls else 'None' + + # Get ingress status + ingresses = status.get('ingress', []) + admitted = 'False' + if ingresses: + for ingress in ingresses: + conditions = ingress.get('conditions', []) + for condition in conditions: + if condition.get('type') == 'Admitted': + admitted = 'True' if condition.get('status') == 'True' else 'False' + break + + # Calculate age + creation_time = metadata.get('creationTimestamp', '') + age = calculate_age(creation_time) + + routes_list.append({ + 'namespace': metadata.get('namespace', 'default'), + 'name': metadata.get('name', 'unknown'), + 'host': host, + 'service': service_name, + 'port': str(target_port), + 'tls': tls_termination, + 'admitted': admitted, + 'age': age + }) + + return jsonify({'routes': routes_list}) + + except subprocess.CalledProcessError as e: + error_msg = e.stderr.strip() if e.stderr else str(e) + return jsonify({'error': f'Failed to get routes: {error_msg}'}), 500 + except subprocess.TimeoutExpired: + return jsonify({'error': 'Command timed out'}), 500 + except Exception as e: + return jsonify({'error': f'Error: {str(e)}'}), 500 + + +@app.route('/api/pods//', methods=['DELETE']) +@requires_auth +def delete_pod(namespace, pod_name): + """API endpoint to delete a pod (causing it to restart).""" + try: + # Path to MicroShift kubeconfig + kubeconfig_path = KUBECONFIG_PATH + + # Check if kubeconfig exists + if not os.path.exists(kubeconfig_path): + return jsonify({'success': False, 'error': 'MicroShift kubeconfig not found. Is MicroShift running?'}), 503 + + # Run oc delete pod with explicit kubeconfig + subprocess.run( + ['oc', '--kubeconfig', kubeconfig_path, 'delete', 'pod', pod_name, '-n', namespace], + capture_output=True, + text=True, + check=True, + timeout=10 + ) + + return jsonify({'success': True, 'message': f'Pod {pod_name} deleted successfully'}) + + except subprocess.CalledProcessError as e: + error_msg = e.stderr.strip() if e.stderr else str(e) + return jsonify({'success': False, 'error': f'Failed to delete pod: {error_msg}'}), 500 + except subprocess.TimeoutExpired: + return jsonify({'success': False, 'error': 'Command timed out'}), 500 + except Exception as e: + return jsonify({'success': False, 'error': f'Error: {str(e)}'}), 500 + + +@app.route('/logs//') +@requires_auth +def stream_logs(namespace, pod_name): + """Stream pod logs in real-time.""" + kubeconfig_path = KUBECONFIG_PATH + + # Check if kubeconfig exists + if not os.path.exists(kubeconfig_path): + return "MicroShift kubeconfig not found. Is MicroShift running?", 503 + + def generate(): + """Generator function to stream logs.""" + process = None + try: + # Start oc logs -f process + process = subprocess.Popen( + ['oc', '--kubeconfig', kubeconfig_path, 'logs', '-f', '-n', namespace, pod_name], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, + bufsize=1 + ) + + # Stream output line by line + for line in iter(process.stdout.readline, ''): + if not line: + break + yield f"{line}" + + except Exception as e: + yield f"Error streaming logs: {str(e)}\n" + finally: + # Clean up process when connection closes + if process: + try: + process.terminate() + process.wait(timeout=5) + except Exception: + process.kill() + + # Return streaming response with HTML wrapper + html_header = f""" + + + Logs: {namespace}/{pod_name} + + + +
+

📋 Pod Logs

+
Namespace: {namespace} | Pod: {pod_name}
+
+
""" + + html_footer = """
+ + +""" + + def generate_with_html(): + yield html_header + for line in generate(): + yield line.replace('<', '<').replace('>', '>') + yield html_footer + + return Response(generate_with_html(), mimetype='text/html') + + +@app.route('/kubeconfig') +@requires_auth +def download_kubeconfig(): + """Serve the kubeconfig file for download with nip.io hostname and insecure TLS.""" + kubeconfig_path = Path(KUBECONFIG_PATH) + + if not kubeconfig_path.exists(): + return "Kubeconfig file not found", 404 + + try: + # Read the original kubeconfig + with open(kubeconfig_path, 'r') as f: + kubeconfig_content = f.read() + + # Always use nip.io format based on default route IP + default_ip = get_default_route_ip() + if default_ip: + nip_hostname = f"jumpstarter.{default_ip}.nip.io" + else: + # Fallback to current hostname if IP detection fails + nip_hostname = get_current_hostname() + + # Extract the original server hostname (likely localhost) before replacing + # This is needed for tls-server-name to match the certificate + original_server_match = re.search(r'server:\s+https://([^:]+):(\d+)', kubeconfig_content) + original_hostname = 'localhost' # Default fallback + if original_server_match: + original_hostname = original_server_match.group(1) + + # Replace localhost with the nip.io hostname + kubeconfig_content = re.sub( + r'server:\s+https://localhost:(\d+)', + f'server: https://{nip_hostname}:\\1', + kubeconfig_content + ) + + # Keep the CA certificate fields (certificate-authority-data or certificate-authority) + # They are needed for certificate chain verification + + # Remove insecure-skip-tls-verify if it exists (we'll replace it with tls-server-name) + kubeconfig_content = re.sub( + r'^\s+insecure-skip-tls-verify:\s+.*\n', + '', + kubeconfig_content, + flags=re.MULTILINE + ) + + # Add tls-server-name to verify the CA but allow hostname mismatch + # This tells the client to verify the certificate as if it were issued for the original hostname + # (e.g., localhost), even though we're connecting via nip.io hostname + kubeconfig_content = re.sub( + r'(server:\s+https://[^\n]+\n)', + f'\\1 tls-server-name: {original_hostname}\n', + kubeconfig_content + ) + + # Create a BytesIO object to send as file + kubeconfig_bytes = BytesIO(kubeconfig_content.encode('utf-8')) + kubeconfig_bytes.seek(0) + + return send_file( + kubeconfig_bytes, + as_attachment=True, + download_name='kubeconfig', + mimetype='application/octet-stream' + ) + except Exception as e: + return f"Error reading kubeconfig: {str(e)}", 500 + + +def calculate_age(creation_timestamp): + """Calculate age from Kubernetes timestamp.""" + if not creation_timestamp: + return 'N/A' + + try: + from datetime import datetime, timezone + + # Parse ISO 8601 timestamp + created = datetime.fromisoformat(creation_timestamp.replace('Z', '+00:00')) + now = datetime.now(timezone.utc) + delta = now - created + + # Format age + seconds = int(delta.total_seconds()) + if seconds < 60: + return f'{seconds}s' + elif seconds < 3600: + return f'{seconds // 60}m' + elif seconds < 86400: + hours = seconds // 3600 + minutes = (seconds % 3600) // 60 + return f'{hours}h{minutes}m' if minutes > 0 else f'{hours}h' + else: + days = seconds // 86400 + hours = (seconds % 86400) // 3600 + return f'{days}d{hours}h' if hours > 0 else f'{days}d' + except Exception as e: + print(f"Error calculating age: {e}", file=sys.stderr) + return 'N/A' + + +def get_default_route_ip(): + """Get the IP address of the default route interface.""" + try: + # Get default route + result = subprocess.run( + ['ip', 'route', 'show', 'default'], + capture_output=True, + text=True, + check=True + ) + + # Parse output: "default via X.X.X.X dev ethX ..." + lines = result.stdout.strip().split('\n') + if not lines: + return None + + parts = lines[0].split() + if len(parts) < 5: + return None + + # Find the device name + dev_idx = parts.index('dev') if 'dev' in parts else None + if dev_idx is None or dev_idx + 1 >= len(parts): + return None + + dev_name = parts[dev_idx + 1] + + # Get IP address for this device + result = subprocess.run( + ['ip', '-4', 'addr', 'show', dev_name], + capture_output=True, + text=True, + check=True + ) + + # Parse: " inet 192.168.1.10/24 ..." + for line in result.stdout.split('\n'): + line = line.strip() + if line.startswith('inet '): + ip_with_mask = line.split()[1] + ip = ip_with_mask.split('/')[0] + return ip.replace('.', '-') # Format for nip.io + + return None + except Exception as e: + print(f"Error getting default route IP: {e}", file=sys.stderr) + return None + + +def get_current_hostname(): + """Get the current system hostname.""" + try: + return socket.gethostname() + except Exception as e: + print(f"Error getting hostname: {e}", file=sys.stderr) + return "unknown" + + +def get_jumpstarter_config(): + """Get the current Jumpstarter CR configuration from the cluster.""" + default_ip = get_default_route_ip() + default_base_domain = f"jumpstarter.{default_ip}.nip.io" if default_ip else "jumpstarter.local" + + defaults = { + 'base_domain': default_base_domain, + 'image': 'quay.io/jumpstarter-dev/jumpstarter-controller:latest', + 'image_pull_policy': 'IfNotPresent' + } + + try: + # Path to MicroShift kubeconfig + kubeconfig_path = KUBECONFIG_PATH + + # Check if kubeconfig exists + if not os.path.exists(kubeconfig_path): + return defaults + + # Try to get existing Jumpstarter CR + result = subprocess.run( + ['oc', '--kubeconfig', kubeconfig_path, 'get', 'jumpstarter', 'jumpstarter', '-n', 'default', '-o', 'json'], + capture_output=True, + text=True, + timeout=5 + ) + + if result.returncode == 0: + cr_data = json.loads(result.stdout) + spec = cr_data.get('spec', {}) + controller = spec.get('controller', {}) + + return { + 'base_domain': spec.get('baseDomain', defaults['base_domain']), + 'image': controller.get('image', defaults['image']), + 'image_pull_policy': controller.get('imagePullPolicy', defaults['image_pull_policy']) + } + else: + # CR doesn't exist yet, return defaults + return defaults + + except Exception as e: + print(f"Error getting Jumpstarter config: {e}", file=sys.stderr) + return defaults + + +def set_hostname(hostname): + """Set the system hostname using hostnamectl.""" + try: + subprocess.run( + ['hostnamectl', 'set-hostname', hostname], + capture_output=True, + text=True, + check=True + ) + return True, "Success" + except subprocess.CalledProcessError as e: + error_msg = e.stderr.strip() if e.stderr else str(e) + print(f"Error setting hostname: {error_msg}", file=sys.stderr) + return False, error_msg + except Exception as e: + print(f"Error setting hostname: {e}", file=sys.stderr) + return False, str(e) + + +def set_root_password(password): + """Set the root user password using chpasswd.""" + try: + # Use chpasswd to set password (more reliable than passwd for scripting) + process = subprocess.Popen( + ['chpasswd'], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True + ) + stdout, stderr = process.communicate(input=f'root:{password}\n') + + if process.returncode != 0: + error_msg = stderr.strip() if stderr else "Unknown error" + print(f"Error setting root password: {error_msg}", file=sys.stderr) + return False, error_msg + + return True, "Success" + except Exception as e: + print(f"Error setting root password: {e}", file=sys.stderr) + return False, str(e) + + +def get_ssh_authorized_keys(): + """Read existing SSH authorized keys from /root/.ssh/authorized_keys.""" + ssh_dir = Path('/root/.ssh') + authorized_keys_path = ssh_dir / 'authorized_keys' + + if authorized_keys_path.exists(): + try: + with open(authorized_keys_path, 'r') as f: + return f.read().strip() + except Exception as e: + print(f"Error reading authorized_keys: {e}", file=sys.stderr) + return "" + return "" + + +def set_ssh_authorized_keys(keys_content): + """Set SSH authorized keys in /root/.ssh/authorized_keys with proper permissions.""" + ssh_dir = Path('/root/.ssh') + authorized_keys_path = ssh_dir / 'authorized_keys' + + try: + # Create .ssh directory if it doesn't exist + ssh_dir.mkdir(mode=0o700, parents=True, exist_ok=True) + + # Write authorized_keys file + keys_content = keys_content.strip() + if keys_content: + with open(authorized_keys_path, 'w') as f: + f.write(keys_content) + if not keys_content.endswith('\n'): + f.write('\n') + + # Set proper permissions: .ssh directory = 700, authorized_keys = 600 + os.chmod(ssh_dir, 0o700) + os.chmod(authorized_keys_path, 0o600) + + return True, "SSH authorized keys updated successfully" + else: + # If empty, remove the file if it exists + if authorized_keys_path.exists(): + authorized_keys_path.unlink() + # Ensure .ssh directory still has correct permissions + os.chmod(ssh_dir, 0o700) + return True, "SSH authorized keys cleared" + except Exception as e: + print(f"Error setting SSH authorized keys: {e}", file=sys.stderr) + return False, str(e) + + +def update_login_banner(): + """Update the login banner with the web UI URL.""" + try: + default_ip = get_default_route_ip() + if default_ip: + hostname = f"jumpstarter.{default_ip}.nip.io" + port = 8880 + url = f"http://{hostname}:{port}" + + # Format URL line to fit properly in the box (62 chars content width) + url_line = f" → {url}" + + banner = f""" +╔══════════════════════════════════════════════════════════════════╗ +║ ║ +║ Jumpstarter Controller Community Edition ║ +║ Powered by MicroShift ║ +║ ║ +║ Web Configuration UI: ║ +║ {url_line:<64}║ +║ ║ +║ Login with: root / ║ +║ ║ +╚══════════════════════════════════════════════════════════════════╝ + +""" + + # Write to /etc/issue for pre-login banner + with open('/etc/issue', 'w') as f: + f.write(banner) + + return True, "Success" + else: + return False, "Could not determine IP address" + except Exception as e: + print(f"Error updating login banner: {e}", file=sys.stderr) + return False, str(e) + + +def apply_jumpstarter_cr(base_domain, image, image_pull_policy='IfNotPresent'): + """Apply Jumpstarter Custom Resource using oc.""" + try: + # Path to MicroShift kubeconfig + kubeconfig_path = KUBECONFIG_PATH + + # Check if kubeconfig exists + if not os.path.exists(kubeconfig_path): + return False, 'MicroShift kubeconfig not found. Is MicroShift running?' + + # Build the CR YAML + cr = { + 'apiVersion': 'operator.jumpstarter.dev/v1alpha1', + 'kind': 'Jumpstarter', + 'metadata': { + 'name': 'jumpstarter', + 'namespace': 'default' + }, + 'spec': { + 'baseDomain': base_domain, + 'controller': { + 'grpc': { + 'endpoints': [ + { + 'address': f'grpc.{base_domain}', + 'route': { + 'enabled': True + } + } + ] + }, + 'image': image, + 'imagePullPolicy': image_pull_policy, + 'replicas': 1 + }, + 'routers': { + 'grpc': { + 'endpoints': [ + { + 'address': f'router.{base_domain}', + 'route': { + 'enabled': True + } + } + ] + }, + 'image': image, + 'imagePullPolicy': image_pull_policy, + 'replicas': 1 + }, + 'useCertManager': True + } + } + + # Write CR to temporary file + with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as f: + yaml_content = json_to_yaml(cr) + f.write(yaml_content) + temp_file = f.name + + try: + # Apply using oc with explicit kubeconfig + result = subprocess.run( + ['oc', '--kubeconfig', kubeconfig_path, 'apply', '-f', temp_file], + capture_output=True, + text=True, + check=True + ) + return True, result.stdout.strip() + finally: + # Clean up temp file + try: + os.unlink(temp_file) + except Exception: + pass + + except subprocess.CalledProcessError as e: + error_msg = e.stderr.strip() if e.stderr else str(e) + print(f"Error applying Jumpstarter CR: {error_msg}", file=sys.stderr) + return False, error_msg + except Exception as e: + print(f"Error applying Jumpstarter CR: {e}", file=sys.stderr) + return False, str(e) + + +def json_to_yaml(obj, indent=0): + """Convert a JSON object to YAML format (simple implementation).""" + lines = [] + indent_str = ' ' * indent + + if isinstance(obj, dict): + for key, value in obj.items(): + if isinstance(value, (dict, list)): + lines.append(f"{indent_str}{key}:") + lines.append(json_to_yaml(value, indent + 1)) + else: + lines.append(f"{indent_str}{key}: {yaml_value(value)}") + elif isinstance(obj, list): + for item in obj: + if isinstance(item, (dict, list)): + lines.append(f"{indent_str}-") + lines.append(json_to_yaml(item, indent + 1)) + else: + lines.append(f"{indent_str}- {yaml_value(item)}") + + return '\n'.join(lines) + + +def yaml_value(value): + """Format a value for YAML output.""" + if value is None: + return 'null' + elif isinstance(value, bool): + return 'true' if value else 'false' + elif isinstance(value, str): + # Quote strings that contain special characters + if ':' in value or '#' in value or value.startswith('-'): + return f'"{value}"' + return value + else: + return str(value) + + +def main(): + """Main entry point.""" + port = int(os.environ.get('PORT', 8080)) + + print(f"Starting Jumpstarter Configuration UI on port {port}...", file=sys.stderr) + print(f"Access the UI at http://localhost:{port}/", file=sys.stderr) + + # Update login banner on startup + banner_success, banner_message = update_login_banner() + if banner_success: + print("Login banner updated with web UI URL", file=sys.stderr) + else: + print(f"Warning: Could not update login banner: {banner_message}", file=sys.stderr) + + app.run(host='0.0.0.0', port=port, debug=False) + + +if __name__ == '__main__': + main() + diff --git a/deploy/microshift-bootc/config-svc/config-svc.service b/deploy/microshift-bootc/config-svc/config-svc.service new file mode 100644 index 0000000..027fef8 --- /dev/null +++ b/deploy/microshift-bootc/config-svc/config-svc.service @@ -0,0 +1,21 @@ +[Unit] +Description=Jumpstarter Configuration Web UI +Documentation=https://github.com/jumpstarter-dev/jumpstarter-controller +After=network-online.target +Wants=network-online.target +Before=getty@.service systemd-user-sessions.service + +[Service] +Type=simple +ExecStart=/usr/bin/python3 /usr/local/bin/config-svc +Restart=on-failure +RestartSec=5 +Environment="PORT=8880" + +# Security and resource limits +StandardOutput=journal +StandardError=journal +SyslogIdentifier=config-svc + +[Install] +WantedBy=multi-user.target diff --git a/deploy/microshift-bootc/config-svc/update-banner.service b/deploy/microshift-bootc/config-svc/update-banner.service new file mode 100644 index 0000000..03d75f8 --- /dev/null +++ b/deploy/microshift-bootc/config-svc/update-banner.service @@ -0,0 +1,19 @@ +[Unit] +Description=Update Jumpstarter Login Banner +Documentation=https://github.com/jumpstarter-dev/jumpstarter-controller +After=network-online.target +Wants=network-online.target +Before=getty@.service getty.target systemd-user-sessions.service +DefaultDependencies=no + +[Service] +Type=oneshot +ExecStart=/usr/local/bin/update-banner.sh +RemainAfterExit=yes +StandardOutput=journal +StandardError=journal +SyslogIdentifier=update-banner + +[Install] +WantedBy=multi-user.target + diff --git a/deploy/microshift-bootc/config-svc/update-banner.sh b/deploy/microshift-bootc/config-svc/update-banner.sh new file mode 100644 index 0000000..57cba77 --- /dev/null +++ b/deploy/microshift-bootc/config-svc/update-banner.sh @@ -0,0 +1,38 @@ +#!/bin/bash +# Update login banner with Jumpstarter web UI URL + +python3 << 'EOF' +import sys +import os + +# Import and call the update function +import importlib.util +import importlib.machinery + +config_svc_path = '/usr/local/bin/config-svc' +if not os.path.exists(config_svc_path): + print(f"Error: {config_svc_path} does not exist", file=sys.stderr) + sys.exit(1) + +# Try to create spec with explicit loader for files without .py extension +try: + # Use SourceFileLoader explicitly for files without .py extension + loader = importlib.machinery.SourceFileLoader('config_svc', config_svc_path) + spec = importlib.util.spec_from_loader('config_svc', loader) + + if spec is None: + print(f"Error: Failed to create spec for {config_svc_path}", file=sys.stderr) + sys.exit(1) + + if spec.loader is None: + print(f"Error: Failed to get loader for {config_svc_path}", file=sys.stderr) + sys.exit(1) + + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + module.update_login_banner() +except Exception as e: + print(f"Error loading or executing {config_svc_path}: {e}", file=sys.stderr) + sys.exit(1) +EOF + diff --git a/deploy/microshift-bootc/config.toml b/deploy/microshift-bootc/config.toml new file mode 100644 index 0000000..eafe8be --- /dev/null +++ b/deploy/microshift-bootc/config.toml @@ -0,0 +1,38 @@ +# Locale and keyboard settings +[customizations.locale] +languages = ["en_US.UTF-8"] +keyboard = "us" + +# SSH key for root user +#[[customizations.sshkey]] +#user = "root" +#key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC... your-ssh-key-here" + +# Note: Network configuration is not supported in bootc-image-builder config +# Network will be configured automatically (DHCP by default) + +# +# Disk customizations (advanced partitioning) +# Note: boot/EFI partitions are handled by the image type/boot method automatically. +# This defines ONE LVM VG and leaves remaining extents unallocated (because we only +# define the root LV and nothing else). +[[customizations.disk.partitions]] +type = "lvm" +name = "myvg1" +minsize = "20 GiB" # VG minimum; image can be bigger, leaving free space in VG + +[[customizations.disk.partitions.logical_volumes]] +name = "root" +mountpoint = "/" +label = "root" +fs_type = "xfs" +minsize = "10 GiB" + + +# If you need to add the OpenShift pull secret file, uncomment and configure: +# [[customizations.files]] +# path = "/etc/crio/openshift-pull-secret" +# mode = "0600" +# user = "root" +# group = "root" +# data = "YOUR_PULL_SECRET_CONTENT_HERE" diff --git a/deploy/microshift-bootc/kustomization.yaml b/deploy/microshift-bootc/kustomization.yaml new file mode 100644 index 0000000..be0feca --- /dev/null +++ b/deploy/microshift-bootc/kustomization.yaml @@ -0,0 +1,11 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - install-operator.yaml + +# Common labels applied to all resources +commonLabels: + app.kubernetes.io/part-of: jumpstarter-controller + app.kubernetes.io/managed-by: microshift + diff --git a/deploy/microshift-bootc/output/.gitkeep b/deploy/microshift-bootc/output/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/deploy/microshift-bootc/run-microshift.sh b/deploy/microshift-bootc/run-microshift.sh new file mode 100755 index 0000000..a4532ea --- /dev/null +++ b/deploy/microshift-bootc/run-microshift.sh @@ -0,0 +1,179 @@ +#!/bin/bash +set -euo pipefail + +# Use the image from environment or default +IMAGE=${BOOTC_IMG:-"quay.io/jumpstarter-dev/microshift/bootc:latest"} +CONTAINER_NAME=${CONTAINER_NAME:-"jumpstarter-microshift-okd"} + +LVM_DISK="/var/lib/microshift-okd/lvmdisk.image" +VG_NAME="myvg1" +HTTP_PORT=80 +HTTPS_PORT=443 +CONFIG_SVC_PORT=8880 + + +function pull_bootc_image() { + local -r image_ref="$1" + + # Skip pulling the local container images + if [[ "${image_ref}" == localhost/* ]]; then + echo "Skipping pull of local container image: ${image_ref}" + return 0 + fi + + # Check if the image already exists locally + if podman image exists "${image_ref}"; then + echo "Image '${image_ref}' already exists locally, skipping pull" + return 0 + fi + + echo "Pulling '${image_ref}'" + podman pull "${image_ref}" +} + +function prepare_lvm_disk() { + local -r lvm_disk="$1" + local -r vg_name="$2" + + mkdir -p "$(dirname "${lvm_disk}")" + + if [ ! -f "${lvm_disk}" ]; then + echo "Creating LVM disk image: ${lvm_disk}" + truncate --size=1G "${lvm_disk}" + else + echo "INFO: '${lvm_disk}' already exists, reusing it." + fi +} + +function setup_lvm_in_container() { + local -r container_name="$1" + local -r lvm_disk="$2" + local -r vg_name="$3" + + echo "Setting up LVM inside container..." + + # Check if VG already exists in container + if podman exec "${container_name}" vgs "${vg_name}" &>/dev/null; then + echo "Volume group '${vg_name}' already exists in container" + return 0 + fi + + # Copy the LVM disk into the container + local container_lvm_disk="/var/lib/lvmdisk.image" + podman cp "${lvm_disk}" "${container_name}:${container_lvm_disk}" + + # Set up loop device and create VG inside the container + podman exec "${container_name}" bash -c " + set -e + # Find available loop device + LOOP_DEV=\$(losetup --find --show --nooverlap '${container_lvm_disk}') + echo \"Created loop device: \${LOOP_DEV}\" + + # Create volume group + vgcreate -f -y '${vg_name}' \"\${LOOP_DEV}\" + echo \"Created volume group: ${vg_name}\" + + # Verify + vgs '${vg_name}' + " +} + +function run_bootc_image() { + local -r image_ref="$1" + local -r container_name="$2" + + # Get the default route IP address + local -r hostname="jumpstarter.127-0-0-1.nip.io" + + # Prerequisites for running the MicroShift container: + # - If the OVN-K CNI driver is used, the `openvswitch` module must be loaded on the host. + # - If the TopoLVM CSI driver is used, the /dev/dm-* device must be shared with the container. + echo "Running '${image_ref}' as container '${container_name}'" + echo "Hostname: ${hostname}" + modprobe openvswitch || true + + # Share the /dev directory with the container to enable TopoLVM CSI driver. + # Mask the devices that may conflict with the host by sharing them on a + # temporary file system. Note that a pseudo-TTY is also allocated to + # prevent the container from using host consoles. + local vol_opts="--tty --volume /dev:/dev" + for device in input snd dri; do + [ -d "/dev/${device}" ] && vol_opts="${vol_opts} --tmpfs /dev/${device}" + done + set -x + # shellcheck disable=SC2086 + podman run --privileged -d \ + --replace \ + ${vol_opts} \ + -p ${CONFIG_SVC_PORT}:8880 \ + -p ${HTTP_PORT}:80 \ + -p ${HTTPS_PORT}:443 \ + --name "${container_name}" \ + --hostname "${hostname}" \ + "${image_ref}" + set +x + + echo "Waiting for MicroShift to start" + local -r kubeconfig="/var/lib/microshift/resources/kubeadmin/kubeconfig" + local -r max_wait=${MICROSHIFT_KUBECONFIG_TIMEOUT:-300} + local start_time + start_time=$(date +%s) + + while true ; do + if podman exec "${container_name}" /bin/test -f "${kubeconfig}" &>/dev/null ; then + break + fi + + local current_time + current_time=$(date +%s) + local elapsed=$((current_time - start_time)) + + if [ "${elapsed}" -ge "${max_wait}" ]; then + echo "ERROR: Timeout waiting for MicroShift kubeconfig after ${elapsed} seconds" >&2 + echo "ERROR: Container: ${container_name}, Kubeconfig path: ${kubeconfig}" >&2 + return 1 + fi + + sleep 1 + done +} + +# Check if the script is running as root +if [ "$(id -u)" -ne 0 ]; then + echo "ERROR: This script must be run as root (use sudo)" + exit 1 +fi + +# Run the procedures +pull_bootc_image "${IMAGE}" +prepare_lvm_disk "${LVM_DISK}" "${VG_NAME}" +run_bootc_image "${IMAGE}" "${CONTAINER_NAME}" +setup_lvm_in_container "${CONTAINER_NAME}" "${LVM_DISK}" "${VG_NAME}" + +# Get the hostname for display +HOSTNAME="jumpstarter.127-0-0-1.nip.io" + +# Follow-up instructions +echo +echo "MicroShift is running in a bootc container" +echo "Hostname: ${HOSTNAME}" +echo "Container: ${CONTAINER_NAME}" +echo "LVM disk: ${LVM_DISK}" +echo "VG name: ${VG_NAME}" +echo "Ports: HTTP:${HTTP_PORT}, HTTPS:${HTTPS_PORT}, Config Service:${CONFIG_SVC_PORT}" +echo +echo "To access the container, run the following command:" +echo " - make bootc-sh" +echo +echo "To verify that MicroShift pods are up and running, run the following command:" +echo " - sudo podman exec -it ${CONTAINER_NAME} oc get pods -A" +echo +echo "To access the web interfaces, visit:" +echo " - Config Service: http://${HOSTNAME%%.*}:${CONFIG_SVC_PORT} or http://localhost:${CONFIG_SVC_PORT}" +echo " - MicroShift: https://${HOSTNAME}" +echo +echo "To stop MicroShift, run the following command:" +echo " - make bootc-stop" +echo "To remove the container, run the following command:" +echo " - make bootc-rm" +